16c80b41aSLeon Romanovsky /* 26c80b41aSLeon Romanovsky * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 36c80b41aSLeon Romanovsky * 46c80b41aSLeon Romanovsky * Redistribution and use in source and binary forms, with or without 56c80b41aSLeon Romanovsky * modification, are permitted provided that the following conditions are met: 66c80b41aSLeon Romanovsky * 76c80b41aSLeon Romanovsky * 1. Redistributions of source code must retain the above copyright 86c80b41aSLeon Romanovsky * notice, this list of conditions and the following disclaimer. 96c80b41aSLeon Romanovsky * 2. Redistributions in binary form must reproduce the above copyright 106c80b41aSLeon Romanovsky * notice, this list of conditions and the following disclaimer in the 116c80b41aSLeon Romanovsky * documentation and/or other materials provided with the distribution. 126c80b41aSLeon Romanovsky * 3. Neither the names of the copyright holders nor the names of its 136c80b41aSLeon Romanovsky * contributors may be used to endorse or promote products derived from 146c80b41aSLeon Romanovsky * this software without specific prior written permission. 156c80b41aSLeon Romanovsky * 166c80b41aSLeon Romanovsky * Alternatively, this software may be distributed under the terms of the 176c80b41aSLeon Romanovsky * GNU General Public License ("GPL") version 2 as published by the Free 186c80b41aSLeon Romanovsky * Software Foundation. 196c80b41aSLeon Romanovsky * 206c80b41aSLeon Romanovsky * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 216c80b41aSLeon Romanovsky * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 226c80b41aSLeon Romanovsky * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 236c80b41aSLeon Romanovsky * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 246c80b41aSLeon Romanovsky * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 256c80b41aSLeon Romanovsky * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 266c80b41aSLeon Romanovsky * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 276c80b41aSLeon Romanovsky * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 286c80b41aSLeon Romanovsky * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 296c80b41aSLeon Romanovsky * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 306c80b41aSLeon Romanovsky * POSSIBILITY OF SUCH DAMAGE. 316c80b41aSLeon Romanovsky */ 326c80b41aSLeon Romanovsky 33e3bf14bdSJason Gunthorpe #include <linux/module.h> 34bf3c5a93SLeon Romanovsky #include <linux/pid.h> 35bf3c5a93SLeon Romanovsky #include <linux/pid_namespace.h> 363856ec4bSSteve Wise #include <linux/mutex.h> 37b4c598a6SLeon Romanovsky #include <net/netlink.h> 3800313983SSteve Wise #include <rdma/rdma_cm.h> 396c80b41aSLeon Romanovsky #include <rdma/rdma_netlink.h> 406c80b41aSLeon Romanovsky 416c80b41aSLeon Romanovsky #include "core_priv.h" 4200313983SSteve Wise #include "cma_priv.h" 4341eda65cSLeon Romanovsky #include "restrack.h" 445bd48c18SJason Gunthorpe #include "uverbs.h" 456c80b41aSLeon Romanovsky 46fb910690SErez Alfasi typedef int (*res_fill_func_t)(struct sk_buff*, bool, 47fb910690SErez Alfasi struct rdma_restrack_entry*, uint32_t); 48fb910690SErez Alfasi 49696de2e9SDoug Ledford /* 50696de2e9SDoug Ledford * Sort array elements by the netlink attribute name 51696de2e9SDoug Ledford */ 52b4c598a6SLeon Romanovsky static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { 53696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_CHARDEV] = { .type = NLA_U64 }, 54696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_CHARDEV_ABI] = { .type = NLA_U64 }, 55696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_CHARDEV_NAME] = { .type = NLA_NUL_STRING, 5634d65cd8SDoug Ledford .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 57696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_CHARDEV_TYPE] = { .type = NLA_NUL_STRING, 5834d65cd8SDoug Ledford .len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE }, 59f8fc8cd9SYamin Friedman [RDMA_NLDEV_ATTR_DEV_DIM] = { .type = NLA_U8 }, 60b4c598a6SLeon Romanovsky [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 }, 61b4c598a6SLeon Romanovsky [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, 6234d65cd8SDoug Ledford .len = IB_DEVICE_NAME_MAX }, 63696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 }, 64696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING, 6534d65cd8SDoug Ledford .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 66696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED }, 67696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED }, 68696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 }, 69696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING, 7034d65cd8SDoug Ledford .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 71696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 }, 72696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 }, 73696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 }, 74696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 }, 758621a7e3SLeon Romanovsky [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING, 7634d65cd8SDoug Ledford .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 7780a06dd3SLeon Romanovsky [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 }, 78696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING, 7934d65cd8SDoug Ledford .len = IFNAMSIZ }, 8034840feaSLeon Romanovsky [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 }, 815b2cc79dSLeon Romanovsky [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 }, 825b2cc79dSLeon Romanovsky [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING, 835b2cc79dSLeon Romanovsky .len = IFNAMSIZ }, 84696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 }, 85696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 }, 86696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 }, 87696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 }, 88696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED }, 89517b773eSLeon Romanovsky [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 }, 90696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED }, 91696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED }, 92696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 }, 93696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 }, 94696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED }, 9512ce208fSNeta Ostrovsky [RDMA_NLDEV_ATTR_RES_CTX] = { .type = NLA_NESTED }, 96c3d02788SLeon Romanovsky [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 }, 9712ce208fSNeta Ostrovsky [RDMA_NLDEV_ATTR_RES_CTX_ENTRY] = { .type = NLA_NESTED }, 98696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_DST_ADDR] = { 99696de2e9SDoug Ledford .len = sizeof(struct __kernel_sockaddr_storage) }, 100696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 }, 101696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING, 10234d65cd8SDoug Ledford .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 103696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 }, 104696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 }, 105696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 }, 106696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED }, 107696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 }, 108696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 }, 109696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED }, 110696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 }, 111696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED }, 112696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 }, 113696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED }, 114696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 }, 115696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 }, 116696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 }, 117696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED }, 118696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED }, 11965959522SMaor Gottlieb [RDMA_NLDEV_ATTR_RES_RAW] = { .type = NLA_BINARY }, 120696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 }, 121696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 }, 122696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 }, 123696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 }, 124696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = { 125696de2e9SDoug Ledford .len = sizeof(struct __kernel_sockaddr_storage) }, 126696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 }, 127696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED }, 128696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED }, 129696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 }, 130696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING, 13134d65cd8SDoug Ledford .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 132696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 }, 133696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 }, 134696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 }, 135391c6bd5SNeta Ostrovsky [RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED }, 136391c6bd5SNeta Ostrovsky [RDMA_NLDEV_ATTR_RES_SRQN] = { .type = NLA_U32 }, 137391c6bd5SNeta Ostrovsky [RDMA_NLDEV_ATTR_RES_SRQ_ENTRY] = { .type = NLA_NESTED }, 138c6c11ad3SNeta Ostrovsky [RDMA_NLDEV_ATTR_MIN_RANGE] = { .type = NLA_U32 }, 139c6c11ad3SNeta Ostrovsky [RDMA_NLDEV_ATTR_MAX_RANGE] = { .type = NLA_U32 }, 140696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 }, 141696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 }, 142b47ae6f8SMark Zhang [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 }, 143b47ae6f8SMark Zhang [RDMA_NLDEV_ATTR_STAT_MODE] = { .type = NLA_U32 }, 144b47ae6f8SMark Zhang [RDMA_NLDEV_ATTR_STAT_RES] = { .type = NLA_U32 }, 145c4ffee7cSMark Zhang [RDMA_NLDEV_ATTR_STAT_COUNTER] = { .type = NLA_NESTED }, 146c4ffee7cSMark Zhang [RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY] = { .type = NLA_NESTED }, 147c4ffee7cSMark Zhang [RDMA_NLDEV_ATTR_STAT_COUNTER_ID] = { .type = NLA_U32 }, 148c4ffee7cSMark Zhang [RDMA_NLDEV_ATTR_STAT_HWCOUNTERS] = { .type = NLA_NESTED }, 149c4ffee7cSMark Zhang [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY] = { .type = NLA_NESTED }, 150c4ffee7cSMark Zhang [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING }, 151c4ffee7cSMark Zhang [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 }, 152696de2e9SDoug Ledford [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 }, 1538f71bb00SJason Gunthorpe [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 }, 154696de2e9SDoug Ledford [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 }, 155696de2e9SDoug Ledford [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 }, 1566cc9e215SGal Pressman [RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 }, 1577301d0a9SAharon Landau [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 }, 1587301d0a9SAharon Landau [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 }, 159b4c598a6SLeon Romanovsky }; 160b4c598a6SLeon Romanovsky 16173937e8aSSteve Wise static int put_driver_name_print_type(struct sk_buff *msg, const char *name, 16273937e8aSSteve Wise enum rdma_nldev_print_type print_type) 16373937e8aSSteve Wise { 16473937e8aSSteve Wise if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) 16573937e8aSSteve Wise return -EMSGSIZE; 16673937e8aSSteve Wise if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC && 16773937e8aSSteve Wise nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) 16873937e8aSSteve Wise return -EMSGSIZE; 16973937e8aSSteve Wise 17073937e8aSSteve Wise return 0; 17173937e8aSSteve Wise } 17273937e8aSSteve Wise 17373937e8aSSteve Wise static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, 17473937e8aSSteve Wise enum rdma_nldev_print_type print_type, 17573937e8aSSteve Wise u32 value) 17673937e8aSSteve Wise { 17773937e8aSSteve Wise if (put_driver_name_print_type(msg, name, print_type)) 17873937e8aSSteve Wise return -EMSGSIZE; 17973937e8aSSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) 18073937e8aSSteve Wise return -EMSGSIZE; 18173937e8aSSteve Wise 18273937e8aSSteve Wise return 0; 18373937e8aSSteve Wise } 18473937e8aSSteve Wise 18573937e8aSSteve Wise static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, 18673937e8aSSteve Wise enum rdma_nldev_print_type print_type, 18773937e8aSSteve Wise u64 value) 18873937e8aSSteve Wise { 18973937e8aSSteve Wise if (put_driver_name_print_type(msg, name, print_type)) 19073937e8aSSteve Wise return -EMSGSIZE; 19173937e8aSSteve Wise if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, 19273937e8aSSteve Wise RDMA_NLDEV_ATTR_PAD)) 19373937e8aSSteve Wise return -EMSGSIZE; 19473937e8aSSteve Wise 19573937e8aSSteve Wise return 0; 19673937e8aSSteve Wise } 19773937e8aSSteve Wise 198e1b95ae0SErez Alfasi int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name, 199e1b95ae0SErez Alfasi const char *str) 200e1b95ae0SErez Alfasi { 201e1b95ae0SErez Alfasi if (put_driver_name_print_type(msg, name, 202e1b95ae0SErez Alfasi RDMA_NLDEV_PRINT_TYPE_UNSPEC)) 203e1b95ae0SErez Alfasi return -EMSGSIZE; 204e1b95ae0SErez Alfasi if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str)) 205e1b95ae0SErez Alfasi return -EMSGSIZE; 206e1b95ae0SErez Alfasi 207e1b95ae0SErez Alfasi return 0; 208e1b95ae0SErez Alfasi } 209e1b95ae0SErez Alfasi EXPORT_SYMBOL(rdma_nl_put_driver_string); 210e1b95ae0SErez Alfasi 21173937e8aSSteve Wise int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) 21273937e8aSSteve Wise { 21373937e8aSSteve Wise return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, 21473937e8aSSteve Wise value); 21573937e8aSSteve Wise } 21673937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32); 21773937e8aSSteve Wise 21873937e8aSSteve Wise int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, 21973937e8aSSteve Wise u32 value) 22073937e8aSSteve Wise { 22173937e8aSSteve Wise return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, 22273937e8aSSteve Wise value); 22373937e8aSSteve Wise } 22473937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex); 22573937e8aSSteve Wise 22673937e8aSSteve Wise int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) 22773937e8aSSteve Wise { 22873937e8aSSteve Wise return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, 22973937e8aSSteve Wise value); 23073937e8aSSteve Wise } 23173937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64); 23273937e8aSSteve Wise 23373937e8aSSteve Wise int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) 23473937e8aSSteve Wise { 23573937e8aSSteve Wise return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, 23673937e8aSSteve Wise value); 23773937e8aSSteve Wise } 23873937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex); 23973937e8aSSteve Wise 240c2409810SLeon Romanovsky static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) 241b4c598a6SLeon Romanovsky { 242b4c598a6SLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) 243b4c598a6SLeon Romanovsky return -EMSGSIZE; 244896de009SJason Gunthorpe if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, 245896de009SJason Gunthorpe dev_name(&device->dev))) 246b4c598a6SLeon Romanovsky return -EMSGSIZE; 247c2409810SLeon Romanovsky 248c2409810SLeon Romanovsky return 0; 249c2409810SLeon Romanovsky } 250c2409810SLeon Romanovsky 251c2409810SLeon Romanovsky static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) 252c2409810SLeon Romanovsky { 253c2409810SLeon Romanovsky char fw[IB_FW_VERSION_NAME_MAX]; 2549e886b39SLeon Romanovsky int ret = 0; 2551fb7f897SMark Bloch u32 port; 256c2409810SLeon Romanovsky 257c2409810SLeon Romanovsky if (fill_nldev_handle(msg, device)) 258c2409810SLeon Romanovsky return -EMSGSIZE; 259c2409810SLeon Romanovsky 260b4c598a6SLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) 261b4c598a6SLeon Romanovsky return -EMSGSIZE; 262ac505253SLeon Romanovsky 263ac505253SLeon Romanovsky BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64)); 264ac505253SLeon Romanovsky if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, 26525a0ad85SSteve Wise device->attrs.device_cap_flags, 26625a0ad85SSteve Wise RDMA_NLDEV_ATTR_PAD)) 267ac505253SLeon Romanovsky return -EMSGSIZE; 268ac505253SLeon Romanovsky 2698621a7e3SLeon Romanovsky ib_get_device_fw_str(device, fw); 2705b2cc79dSLeon Romanovsky /* Device without FW has strlen(fw) = 0 */ 2718621a7e3SLeon Romanovsky if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) 2728621a7e3SLeon Romanovsky return -EMSGSIZE; 2738621a7e3SLeon Romanovsky 2741aaff896SLeon Romanovsky if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, 27525a0ad85SSteve Wise be64_to_cpu(device->node_guid), 27625a0ad85SSteve Wise RDMA_NLDEV_ATTR_PAD)) 2771aaff896SLeon Romanovsky return -EMSGSIZE; 2781aaff896SLeon Romanovsky if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, 27925a0ad85SSteve Wise be64_to_cpu(device->attrs.sys_image_guid), 28025a0ad85SSteve Wise RDMA_NLDEV_ATTR_PAD)) 2811aaff896SLeon Romanovsky return -EMSGSIZE; 2821bb77b8cSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) 2831bb77b8cSLeon Romanovsky return -EMSGSIZE; 284f8fc8cd9SYamin Friedman if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim)) 285f8fc8cd9SYamin Friedman return -EMSGSIZE; 2869e886b39SLeon Romanovsky 2879e886b39SLeon Romanovsky /* 2889e886b39SLeon Romanovsky * Link type is determined on first port and mlx4 device 2899e886b39SLeon Romanovsky * which can potentially have two different link type for the same 2909e886b39SLeon Romanovsky * IB device is considered as better to be avoided in the future, 2919e886b39SLeon Romanovsky */ 2929e886b39SLeon Romanovsky port = rdma_start_port(device); 2939e886b39SLeon Romanovsky if (rdma_cap_opa_mad(device, port)) 2949e886b39SLeon Romanovsky ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa"); 2959e886b39SLeon Romanovsky else if (rdma_protocol_ib(device, port)) 2969e886b39SLeon Romanovsky ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib"); 2979e886b39SLeon Romanovsky else if (rdma_protocol_iwarp(device, port)) 2989e886b39SLeon Romanovsky ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw"); 2999e886b39SLeon Romanovsky else if (rdma_protocol_roce(device, port)) 3009e886b39SLeon Romanovsky ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce"); 3019e886b39SLeon Romanovsky else if (rdma_protocol_usnic(device, port)) 3029e886b39SLeon Romanovsky ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, 3039e886b39SLeon Romanovsky "usnic"); 3049e886b39SLeon Romanovsky return ret; 305b4c598a6SLeon Romanovsky } 306b4c598a6SLeon Romanovsky 3077d02f605SLeon Romanovsky static int fill_port_info(struct sk_buff *msg, 3085b2cc79dSLeon Romanovsky struct ib_device *device, u32 port, 3095b2cc79dSLeon Romanovsky const struct net *net) 3107d02f605SLeon Romanovsky { 3115b2cc79dSLeon Romanovsky struct net_device *netdev = NULL; 312ac505253SLeon Romanovsky struct ib_port_attr attr; 313ac505253SLeon Romanovsky int ret; 3144fa2813dSMichael Guralnik u64 cap_flags = 0; 315ac505253SLeon Romanovsky 316c2409810SLeon Romanovsky if (fill_nldev_handle(msg, device)) 3177d02f605SLeon Romanovsky return -EMSGSIZE; 318c2409810SLeon Romanovsky 3197d02f605SLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) 3207d02f605SLeon Romanovsky return -EMSGSIZE; 321ac505253SLeon Romanovsky 322ac505253SLeon Romanovsky ret = ib_query_port(device, port, &attr); 323ac505253SLeon Romanovsky if (ret) 324ac505253SLeon Romanovsky return ret; 325ac505253SLeon Romanovsky 326dd8028f1SLeon Romanovsky if (rdma_protocol_ib(device, port)) { 3274fa2813dSMichael Guralnik BUILD_BUG_ON((sizeof(attr.port_cap_flags) + 3284fa2813dSMichael Guralnik sizeof(attr.port_cap_flags2)) > sizeof(u64)); 3294fa2813dSMichael Guralnik cap_flags = attr.port_cap_flags | 3304fa2813dSMichael Guralnik ((u64)attr.port_cap_flags2 << 32); 331ac505253SLeon Romanovsky if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, 3324fa2813dSMichael Guralnik cap_flags, RDMA_NLDEV_ATTR_PAD)) 333ac505253SLeon Romanovsky return -EMSGSIZE; 334dd8028f1SLeon Romanovsky if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, 33525a0ad85SSteve Wise attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD)) 33612026fbbSLeon Romanovsky return -EMSGSIZE; 33780a06dd3SLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) 33880a06dd3SLeon Romanovsky return -EMSGSIZE; 33980a06dd3SLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) 34080a06dd3SLeon Romanovsky return -EMSGSIZE; 34134840feaSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) 34234840feaSLeon Romanovsky return -EMSGSIZE; 34380a06dd3SLeon Romanovsky } 3445654e49dSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) 3455654e49dSLeon Romanovsky return -EMSGSIZE; 3465654e49dSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) 3475654e49dSLeon Romanovsky return -EMSGSIZE; 3485b2cc79dSLeon Romanovsky 349c2261dd7SJason Gunthorpe netdev = ib_device_get_netdev(device, port); 3505b2cc79dSLeon Romanovsky if (netdev && net_eq(dev_net(netdev), net)) { 3515b2cc79dSLeon Romanovsky ret = nla_put_u32(msg, 3525b2cc79dSLeon Romanovsky RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); 3535b2cc79dSLeon Romanovsky if (ret) 3545b2cc79dSLeon Romanovsky goto out; 3555b2cc79dSLeon Romanovsky ret = nla_put_string(msg, 3565b2cc79dSLeon Romanovsky RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); 3575b2cc79dSLeon Romanovsky } 3585b2cc79dSLeon Romanovsky 3595b2cc79dSLeon Romanovsky out: 3605b2cc79dSLeon Romanovsky if (netdev) 3615b2cc79dSLeon Romanovsky dev_put(netdev); 3625b2cc79dSLeon Romanovsky return ret; 3637d02f605SLeon Romanovsky } 3647d02f605SLeon Romanovsky 365bf3c5a93SLeon Romanovsky static int fill_res_info_entry(struct sk_buff *msg, 366bf3c5a93SLeon Romanovsky const char *name, u64 curr) 367bf3c5a93SLeon Romanovsky { 368bf3c5a93SLeon Romanovsky struct nlattr *entry_attr; 369bf3c5a93SLeon Romanovsky 370ae0be8deSMichal Kubecek entry_attr = nla_nest_start_noflag(msg, 371ae0be8deSMichal Kubecek RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); 372bf3c5a93SLeon Romanovsky if (!entry_attr) 373bf3c5a93SLeon Romanovsky return -EMSGSIZE; 374bf3c5a93SLeon Romanovsky 375bf3c5a93SLeon Romanovsky if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) 376bf3c5a93SLeon Romanovsky goto err; 37725a0ad85SSteve Wise if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 37825a0ad85SSteve Wise RDMA_NLDEV_ATTR_PAD)) 379bf3c5a93SLeon Romanovsky goto err; 380bf3c5a93SLeon Romanovsky 381bf3c5a93SLeon Romanovsky nla_nest_end(msg, entry_attr); 382bf3c5a93SLeon Romanovsky return 0; 383bf3c5a93SLeon Romanovsky 384bf3c5a93SLeon Romanovsky err: 385bf3c5a93SLeon Romanovsky nla_nest_cancel(msg, entry_attr); 386bf3c5a93SLeon Romanovsky return -EMSGSIZE; 387bf3c5a93SLeon Romanovsky } 388bf3c5a93SLeon Romanovsky 389bf3c5a93SLeon Romanovsky static int fill_res_info(struct sk_buff *msg, struct ib_device *device) 390bf3c5a93SLeon Romanovsky { 391bf3c5a93SLeon Romanovsky static const char * const names[RDMA_RESTRACK_MAX] = { 392bf3c5a93SLeon Romanovsky [RDMA_RESTRACK_PD] = "pd", 393bf3c5a93SLeon Romanovsky [RDMA_RESTRACK_CQ] = "cq", 394bf3c5a93SLeon Romanovsky [RDMA_RESTRACK_QP] = "qp", 39500313983SSteve Wise [RDMA_RESTRACK_CM_ID] = "cm_id", 396fccec5b8SSteve Wise [RDMA_RESTRACK_MR] = "mr", 397ffd321e4SLeon Romanovsky [RDMA_RESTRACK_CTX] = "ctx", 398391c6bd5SNeta Ostrovsky [RDMA_RESTRACK_SRQ] = "srq", 399bf3c5a93SLeon Romanovsky }; 400bf3c5a93SLeon Romanovsky 401bf3c5a93SLeon Romanovsky struct nlattr *table_attr; 402bf3c5a93SLeon Romanovsky int ret, i, curr; 403bf3c5a93SLeon Romanovsky 404bf3c5a93SLeon Romanovsky if (fill_nldev_handle(msg, device)) 405bf3c5a93SLeon Romanovsky return -EMSGSIZE; 406bf3c5a93SLeon Romanovsky 407ae0be8deSMichal Kubecek table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); 408bf3c5a93SLeon Romanovsky if (!table_attr) 409bf3c5a93SLeon Romanovsky return -EMSGSIZE; 410bf3c5a93SLeon Romanovsky 411bf3c5a93SLeon Romanovsky for (i = 0; i < RDMA_RESTRACK_MAX; i++) { 412bf3c5a93SLeon Romanovsky if (!names[i]) 413bf3c5a93SLeon Romanovsky continue; 41460c78668SLeon Romanovsky curr = rdma_restrack_count(device, i); 415bf3c5a93SLeon Romanovsky ret = fill_res_info_entry(msg, names[i], curr); 416bf3c5a93SLeon Romanovsky if (ret) 417bf3c5a93SLeon Romanovsky goto err; 418bf3c5a93SLeon Romanovsky } 419bf3c5a93SLeon Romanovsky 420bf3c5a93SLeon Romanovsky nla_nest_end(msg, table_attr); 421bf3c5a93SLeon Romanovsky return 0; 422bf3c5a93SLeon Romanovsky 423bf3c5a93SLeon Romanovsky err: 424bf3c5a93SLeon Romanovsky nla_nest_cancel(msg, table_attr); 425bf3c5a93SLeon Romanovsky return ret; 426bf3c5a93SLeon Romanovsky } 427bf3c5a93SLeon Romanovsky 42800313983SSteve Wise static int fill_res_name_pid(struct sk_buff *msg, 42900313983SSteve Wise struct rdma_restrack_entry *res) 43000313983SSteve Wise { 431ac71ffcfSLeon Romanovsky int err = 0; 432ac71ffcfSLeon Romanovsky 43300313983SSteve Wise /* 43400313983SSteve Wise * For user resources, user is should read /proc/PID/comm to get the 43500313983SSteve Wise * name of the task file. 43600313983SSteve Wise */ 43700313983SSteve Wise if (rdma_is_kernel_res(res)) { 438ac71ffcfSLeon Romanovsky err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, 439ac71ffcfSLeon Romanovsky res->kern_name); 44000313983SSteve Wise } else { 441ac71ffcfSLeon Romanovsky pid_t pid; 442ac71ffcfSLeon Romanovsky 443ac71ffcfSLeon Romanovsky pid = task_pid_vnr(res->task); 444ac71ffcfSLeon Romanovsky /* 445ac71ffcfSLeon Romanovsky * Task is dead and in zombie state. 446ac71ffcfSLeon Romanovsky * There is no need to print PID anymore. 447ac71ffcfSLeon Romanovsky */ 448ac71ffcfSLeon Romanovsky if (pid) 449ac71ffcfSLeon Romanovsky /* 450ac71ffcfSLeon Romanovsky * This part is racy, task can be killed and PID will 451ac71ffcfSLeon Romanovsky * be zero right here but it is ok, next query won't 452ac71ffcfSLeon Romanovsky * return PID. We don't promise real-time reflection 453ac71ffcfSLeon Romanovsky * of SW objects. 454ac71ffcfSLeon Romanovsky */ 455ac71ffcfSLeon Romanovsky err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid); 45600313983SSteve Wise } 457ac71ffcfSLeon Romanovsky 458ac71ffcfSLeon Romanovsky return err ? -EMSGSIZE : 0; 45900313983SSteve Wise } 46000313983SSteve Wise 46165959522SMaor Gottlieb static int fill_res_qp_entry_query(struct sk_buff *msg, 46265959522SMaor Gottlieb struct rdma_restrack_entry *res, 46365959522SMaor Gottlieb struct ib_device *dev, 46465959522SMaor Gottlieb struct ib_qp *qp) 465b5fa635aSLeon Romanovsky { 466b5fa635aSLeon Romanovsky struct ib_qp_init_attr qp_init_attr; 467b5fa635aSLeon Romanovsky struct ib_qp_attr qp_attr; 468b5fa635aSLeon Romanovsky int ret; 469b5fa635aSLeon Romanovsky 470b5fa635aSLeon Romanovsky ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr); 471b5fa635aSLeon Romanovsky if (ret) 472b5fa635aSLeon Romanovsky return ret; 473b5fa635aSLeon Romanovsky 474b5fa635aSLeon Romanovsky if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) { 475b5fa635aSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, 476b5fa635aSLeon Romanovsky qp_attr.dest_qp_num)) 477b5fa635aSLeon Romanovsky goto err; 478b5fa635aSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, 479b5fa635aSLeon Romanovsky qp_attr.rq_psn)) 480b5fa635aSLeon Romanovsky goto err; 481b5fa635aSLeon Romanovsky } 482b5fa635aSLeon Romanovsky 483b5fa635aSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) 484b5fa635aSLeon Romanovsky goto err; 485b5fa635aSLeon Romanovsky 486b5fa635aSLeon Romanovsky if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC || 487b5fa635aSLeon Romanovsky qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) { 488b5fa635aSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, 489b5fa635aSLeon Romanovsky qp_attr.path_mig_state)) 490b5fa635aSLeon Romanovsky goto err; 491b5fa635aSLeon Romanovsky } 492b5fa635aSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) 493b5fa635aSLeon Romanovsky goto err; 494b5fa635aSLeon Romanovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) 495b5fa635aSLeon Romanovsky goto err; 496b5fa635aSLeon Romanovsky 4975cc34116SMaor Gottlieb if (dev->ops.fill_res_qp_entry) 4985cc34116SMaor Gottlieb return dev->ops.fill_res_qp_entry(msg, qp); 49900313983SSteve Wise return 0; 50000313983SSteve Wise 501c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE; 50200313983SSteve Wise } 50300313983SSteve Wise 50465959522SMaor Gottlieb static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, 50565959522SMaor Gottlieb struct rdma_restrack_entry *res, uint32_t port) 50665959522SMaor Gottlieb { 50765959522SMaor Gottlieb struct ib_qp *qp = container_of(res, struct ib_qp, res); 50865959522SMaor Gottlieb struct ib_device *dev = qp->device; 50965959522SMaor Gottlieb int ret; 51065959522SMaor Gottlieb 51165959522SMaor Gottlieb if (port && port != qp->port) 51265959522SMaor Gottlieb return -EAGAIN; 51365959522SMaor Gottlieb 51465959522SMaor Gottlieb /* In create_qp() port is not set yet */ 51565959522SMaor Gottlieb if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) 51665959522SMaor Gottlieb return -EINVAL; 51765959522SMaor Gottlieb 51865959522SMaor Gottlieb ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); 51965959522SMaor Gottlieb if (ret) 52065959522SMaor Gottlieb return -EMSGSIZE; 52165959522SMaor Gottlieb 52265959522SMaor Gottlieb if (!rdma_is_kernel_res(res) && 52365959522SMaor Gottlieb nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) 52465959522SMaor Gottlieb return -EMSGSIZE; 52565959522SMaor Gottlieb 52665959522SMaor Gottlieb ret = fill_res_name_pid(msg, res); 52765959522SMaor Gottlieb if (ret) 52865959522SMaor Gottlieb return -EMSGSIZE; 52965959522SMaor Gottlieb 53065959522SMaor Gottlieb return fill_res_qp_entry_query(msg, res, dev, qp); 53165959522SMaor Gottlieb } 53265959522SMaor Gottlieb 53365959522SMaor Gottlieb static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, 53465959522SMaor Gottlieb struct rdma_restrack_entry *res, uint32_t port) 53565959522SMaor Gottlieb { 53665959522SMaor Gottlieb struct ib_qp *qp = container_of(res, struct ib_qp, res); 53765959522SMaor Gottlieb struct ib_device *dev = qp->device; 53865959522SMaor Gottlieb 53965959522SMaor Gottlieb if (port && port != qp->port) 54065959522SMaor Gottlieb return -EAGAIN; 54165959522SMaor Gottlieb if (!dev->ops.fill_res_qp_entry_raw) 54265959522SMaor Gottlieb return -EINVAL; 54365959522SMaor Gottlieb return dev->ops.fill_res_qp_entry_raw(msg, qp); 54465959522SMaor Gottlieb } 54565959522SMaor Gottlieb 546659067b0SLeon Romanovsky static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, 54700313983SSteve Wise struct rdma_restrack_entry *res, uint32_t port) 54800313983SSteve Wise { 54900313983SSteve Wise struct rdma_id_private *id_priv = 55000313983SSteve Wise container_of(res, struct rdma_id_private, res); 55102da3750SLeon Romanovsky struct ib_device *dev = id_priv->id.device; 55200313983SSteve Wise struct rdma_cm_id *cm_id = &id_priv->id; 55300313983SSteve Wise 55400313983SSteve Wise if (port && port != cm_id->port_num) 555ecacb375SMark Zhang return -EAGAIN; 55600313983SSteve Wise 55700313983SSteve Wise if (cm_id->port_num && 55800313983SSteve Wise nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) 55900313983SSteve Wise goto err; 56000313983SSteve Wise 56100313983SSteve Wise if (id_priv->qp_num) { 56200313983SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) 56300313983SSteve Wise goto err; 56400313983SSteve Wise if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) 565b5fa635aSLeon Romanovsky goto err; 566b5fa635aSLeon Romanovsky } 567b5fa635aSLeon Romanovsky 56800313983SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) 56900313983SSteve Wise goto err; 57000313983SSteve Wise 57100313983SSteve Wise if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) 57200313983SSteve Wise goto err; 57300313983SSteve Wise 57400313983SSteve Wise if (cm_id->route.addr.src_addr.ss_family && 57500313983SSteve Wise nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, 57600313983SSteve Wise sizeof(cm_id->route.addr.src_addr), 57700313983SSteve Wise &cm_id->route.addr.src_addr)) 57800313983SSteve Wise goto err; 57900313983SSteve Wise if (cm_id->route.addr.dst_addr.ss_family && 58000313983SSteve Wise nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, 58100313983SSteve Wise sizeof(cm_id->route.addr.dst_addr), 58200313983SSteve Wise &cm_id->route.addr.dst_addr)) 58300313983SSteve Wise goto err; 58400313983SSteve Wise 585517b773eSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id)) 586517b773eSLeon Romanovsky goto err; 587517b773eSLeon Romanovsky 58800313983SSteve Wise if (fill_res_name_pid(msg, res)) 58900313983SSteve Wise goto err; 59000313983SSteve Wise 591211cd945SMaor Gottlieb if (dev->ops.fill_res_cm_id_entry) 592211cd945SMaor Gottlieb return dev->ops.fill_res_cm_id_entry(msg, cm_id); 593b5fa635aSLeon Romanovsky return 0; 594b5fa635aSLeon Romanovsky 595c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE; 596b5fa635aSLeon Romanovsky } 597b5fa635aSLeon Romanovsky 598659067b0SLeon Romanovsky static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, 599a34fc089SSteve Wise struct rdma_restrack_entry *res, uint32_t port) 600a34fc089SSteve Wise { 601a34fc089SSteve Wise struct ib_cq *cq = container_of(res, struct ib_cq, res); 60202da3750SLeon Romanovsky struct ib_device *dev = cq->device; 603a34fc089SSteve Wise 604a34fc089SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) 60565959522SMaor Gottlieb return -EMSGSIZE; 606a34fc089SSteve Wise if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 60725a0ad85SSteve Wise atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) 60865959522SMaor Gottlieb return -EMSGSIZE; 609a34fc089SSteve Wise 610a34fc089SSteve Wise /* Poll context is only valid for kernel CQs */ 611a34fc089SSteve Wise if (rdma_is_kernel_res(res) && 612a34fc089SSteve Wise nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) 61365959522SMaor Gottlieb return -EMSGSIZE; 614a34fc089SSteve Wise 615f8fc8cd9SYamin Friedman if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) 61665959522SMaor Gottlieb return -EMSGSIZE; 617f8fc8cd9SYamin Friedman 618517b773eSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) 61965959522SMaor Gottlieb return -EMSGSIZE; 620c3d02788SLeon Romanovsky if (!rdma_is_kernel_res(res) && 621c3d02788SLeon Romanovsky nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, 6225bd48c18SJason Gunthorpe cq->uobject->uevent.uobject.context->res.id)) 62365959522SMaor Gottlieb return -EMSGSIZE; 624517b773eSLeon Romanovsky 625a34fc089SSteve Wise if (fill_res_name_pid(msg, res)) 62665959522SMaor Gottlieb return -EMSGSIZE; 627a34fc089SSteve Wise 62865959522SMaor Gottlieb return (dev->ops.fill_res_cq_entry) ? 62965959522SMaor Gottlieb dev->ops.fill_res_cq_entry(msg, cq) : 0; 63065959522SMaor Gottlieb } 631a34fc089SSteve Wise 63265959522SMaor Gottlieb static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, 63365959522SMaor Gottlieb struct rdma_restrack_entry *res, uint32_t port) 63465959522SMaor Gottlieb { 63565959522SMaor Gottlieb struct ib_cq *cq = container_of(res, struct ib_cq, res); 63665959522SMaor Gottlieb struct ib_device *dev = cq->device; 63765959522SMaor Gottlieb 63865959522SMaor Gottlieb if (!dev->ops.fill_res_cq_entry_raw) 63965959522SMaor Gottlieb return -EINVAL; 64065959522SMaor Gottlieb return dev->ops.fill_res_cq_entry_raw(msg, cq); 641a34fc089SSteve Wise } 642a34fc089SSteve Wise 643659067b0SLeon Romanovsky static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, 644fccec5b8SSteve Wise struct rdma_restrack_entry *res, uint32_t port) 645fccec5b8SSteve Wise { 646fccec5b8SSteve Wise struct ib_mr *mr = container_of(res, struct ib_mr, res); 64702da3750SLeon Romanovsky struct ib_device *dev = mr->pd->device; 648fccec5b8SSteve Wise 649659067b0SLeon Romanovsky if (has_cap_net_admin) { 650fccec5b8SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) 65165959522SMaor Gottlieb return -EMSGSIZE; 652fccec5b8SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) 65365959522SMaor Gottlieb return -EMSGSIZE; 654fccec5b8SSteve Wise } 655fccec5b8SSteve Wise 65625a0ad85SSteve Wise if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, 65725a0ad85SSteve Wise RDMA_NLDEV_ATTR_PAD)) 65865959522SMaor Gottlieb return -EMSGSIZE; 659fccec5b8SSteve Wise 660517b773eSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) 66165959522SMaor Gottlieb return -EMSGSIZE; 662517b773eSLeon Romanovsky 663c3d02788SLeon Romanovsky if (!rdma_is_kernel_res(res) && 664c3d02788SLeon Romanovsky nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) 66565959522SMaor Gottlieb return -EMSGSIZE; 666c3d02788SLeon Romanovsky 667fccec5b8SSteve Wise if (fill_res_name_pid(msg, res)) 66865959522SMaor Gottlieb return -EMSGSIZE; 669fccec5b8SSteve Wise 67065959522SMaor Gottlieb return (dev->ops.fill_res_mr_entry) ? 67165959522SMaor Gottlieb dev->ops.fill_res_mr_entry(msg, mr) : 67265959522SMaor Gottlieb 0; 67365959522SMaor Gottlieb } 674fccec5b8SSteve Wise 67565959522SMaor Gottlieb static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, 67665959522SMaor Gottlieb struct rdma_restrack_entry *res, uint32_t port) 67765959522SMaor Gottlieb { 67865959522SMaor Gottlieb struct ib_mr *mr = container_of(res, struct ib_mr, res); 67965959522SMaor Gottlieb struct ib_device *dev = mr->pd->device; 68065959522SMaor Gottlieb 68165959522SMaor Gottlieb if (!dev->ops.fill_res_mr_entry_raw) 68265959522SMaor Gottlieb return -EINVAL; 68365959522SMaor Gottlieb return dev->ops.fill_res_mr_entry_raw(msg, mr); 684fccec5b8SSteve Wise } 685fccec5b8SSteve Wise 686659067b0SLeon Romanovsky static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, 68729cf1351SSteve Wise struct rdma_restrack_entry *res, uint32_t port) 68829cf1351SSteve Wise { 68929cf1351SSteve Wise struct ib_pd *pd = container_of(res, struct ib_pd, res); 69029cf1351SSteve Wise 691659067b0SLeon Romanovsky if (has_cap_net_admin) { 69229cf1351SSteve Wise if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, 69329cf1351SSteve Wise pd->local_dma_lkey)) 69429cf1351SSteve Wise goto err; 69529cf1351SSteve Wise if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && 69629cf1351SSteve Wise nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, 69729cf1351SSteve Wise pd->unsafe_global_rkey)) 69829cf1351SSteve Wise goto err; 69929cf1351SSteve Wise } 70029cf1351SSteve Wise if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 70125a0ad85SSteve Wise atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) 70229cf1351SSteve Wise goto err; 70329cf1351SSteve Wise 704517b773eSLeon Romanovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id)) 705517b773eSLeon Romanovsky goto err; 706517b773eSLeon Romanovsky 707c3d02788SLeon Romanovsky if (!rdma_is_kernel_res(res) && 708c3d02788SLeon Romanovsky nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, 709c3d02788SLeon Romanovsky pd->uobject->context->res.id)) 710c3d02788SLeon Romanovsky goto err; 711c3d02788SLeon Romanovsky 71224fd6d6fSMaor Gottlieb return fill_res_name_pid(msg, res); 71329cf1351SSteve Wise 714c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE; 71529cf1351SSteve Wise } 71629cf1351SSteve Wise 71712ce208fSNeta Ostrovsky static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin, 71812ce208fSNeta Ostrovsky struct rdma_restrack_entry *res, uint32_t port) 71912ce208fSNeta Ostrovsky { 72012ce208fSNeta Ostrovsky struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res); 72112ce208fSNeta Ostrovsky 72212ce208fSNeta Ostrovsky if (rdma_is_kernel_res(res)) 72312ce208fSNeta Ostrovsky return 0; 72412ce208fSNeta Ostrovsky 72512ce208fSNeta Ostrovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id)) 72612ce208fSNeta Ostrovsky return -EMSGSIZE; 72712ce208fSNeta Ostrovsky 72812ce208fSNeta Ostrovsky return fill_res_name_pid(msg, res); 72912ce208fSNeta Ostrovsky } 73012ce208fSNeta Ostrovsky 731c6c11ad3SNeta Ostrovsky static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range, 732c6c11ad3SNeta Ostrovsky uint32_t max_range) 733c6c11ad3SNeta Ostrovsky { 734c6c11ad3SNeta Ostrovsky struct nlattr *entry_attr; 735c6c11ad3SNeta Ostrovsky 736c6c11ad3SNeta Ostrovsky if (!min_range) 737c6c11ad3SNeta Ostrovsky return 0; 738c6c11ad3SNeta Ostrovsky 739c6c11ad3SNeta Ostrovsky entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); 740c6c11ad3SNeta Ostrovsky if (!entry_attr) 741c6c11ad3SNeta Ostrovsky return -EMSGSIZE; 742c6c11ad3SNeta Ostrovsky 743c6c11ad3SNeta Ostrovsky if (min_range == max_range) { 744c6c11ad3SNeta Ostrovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range)) 745c6c11ad3SNeta Ostrovsky goto err; 746c6c11ad3SNeta Ostrovsky } else { 747c6c11ad3SNeta Ostrovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range)) 748c6c11ad3SNeta Ostrovsky goto err; 749c6c11ad3SNeta Ostrovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range)) 750c6c11ad3SNeta Ostrovsky goto err; 751c6c11ad3SNeta Ostrovsky } 752c6c11ad3SNeta Ostrovsky nla_nest_end(msg, entry_attr); 753c6c11ad3SNeta Ostrovsky return 0; 754c6c11ad3SNeta Ostrovsky 755c6c11ad3SNeta Ostrovsky err: 756c6c11ad3SNeta Ostrovsky nla_nest_cancel(msg, entry_attr); 757c6c11ad3SNeta Ostrovsky return -EMSGSIZE; 758c6c11ad3SNeta Ostrovsky } 759c6c11ad3SNeta Ostrovsky 760c6c11ad3SNeta Ostrovsky static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq) 761c6c11ad3SNeta Ostrovsky { 762c6c11ad3SNeta Ostrovsky uint32_t min_range = 0, prev = 0; 763c6c11ad3SNeta Ostrovsky struct rdma_restrack_entry *res; 764c6c11ad3SNeta Ostrovsky struct rdma_restrack_root *rt; 765c6c11ad3SNeta Ostrovsky struct nlattr *table_attr; 766c6c11ad3SNeta Ostrovsky struct ib_qp *qp = NULL; 767c6c11ad3SNeta Ostrovsky unsigned long id = 0; 768c6c11ad3SNeta Ostrovsky 769c6c11ad3SNeta Ostrovsky table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); 770c6c11ad3SNeta Ostrovsky if (!table_attr) 771c6c11ad3SNeta Ostrovsky return -EMSGSIZE; 772c6c11ad3SNeta Ostrovsky 773c6c11ad3SNeta Ostrovsky rt = &srq->device->res[RDMA_RESTRACK_QP]; 774c6c11ad3SNeta Ostrovsky xa_lock(&rt->xa); 775c6c11ad3SNeta Ostrovsky xa_for_each(&rt->xa, id, res) { 776c6c11ad3SNeta Ostrovsky if (!rdma_restrack_get(res)) 777c6c11ad3SNeta Ostrovsky continue; 778c6c11ad3SNeta Ostrovsky 779c6c11ad3SNeta Ostrovsky qp = container_of(res, struct ib_qp, res); 780c6c11ad3SNeta Ostrovsky if (!qp->srq || (qp->srq->res.id != srq->res.id)) { 781c6c11ad3SNeta Ostrovsky rdma_restrack_put(res); 782c6c11ad3SNeta Ostrovsky continue; 783c6c11ad3SNeta Ostrovsky } 784c6c11ad3SNeta Ostrovsky 785c6c11ad3SNeta Ostrovsky if (qp->qp_num < prev) 786c6c11ad3SNeta Ostrovsky /* qp_num should be ascending */ 787c6c11ad3SNeta Ostrovsky goto err_loop; 788c6c11ad3SNeta Ostrovsky 789c6c11ad3SNeta Ostrovsky if (min_range == 0) { 790c6c11ad3SNeta Ostrovsky min_range = qp->qp_num; 791c6c11ad3SNeta Ostrovsky } else if (qp->qp_num > (prev + 1)) { 792c6c11ad3SNeta Ostrovsky if (fill_res_range_qp_entry(msg, min_range, prev)) 793c6c11ad3SNeta Ostrovsky goto err_loop; 794c6c11ad3SNeta Ostrovsky 795c6c11ad3SNeta Ostrovsky min_range = qp->qp_num; 796c6c11ad3SNeta Ostrovsky } 797c6c11ad3SNeta Ostrovsky prev = qp->qp_num; 798c6c11ad3SNeta Ostrovsky rdma_restrack_put(res); 799c6c11ad3SNeta Ostrovsky } 800c6c11ad3SNeta Ostrovsky 801c6c11ad3SNeta Ostrovsky xa_unlock(&rt->xa); 802c6c11ad3SNeta Ostrovsky 803c6c11ad3SNeta Ostrovsky if (fill_res_range_qp_entry(msg, min_range, prev)) 804c6c11ad3SNeta Ostrovsky goto err; 805c6c11ad3SNeta Ostrovsky 806c6c11ad3SNeta Ostrovsky nla_nest_end(msg, table_attr); 807c6c11ad3SNeta Ostrovsky return 0; 808c6c11ad3SNeta Ostrovsky 809c6c11ad3SNeta Ostrovsky err_loop: 810c6c11ad3SNeta Ostrovsky rdma_restrack_put(res); 811c6c11ad3SNeta Ostrovsky xa_unlock(&rt->xa); 812c6c11ad3SNeta Ostrovsky err: 813c6c11ad3SNeta Ostrovsky nla_nest_cancel(msg, table_attr); 814c6c11ad3SNeta Ostrovsky return -EMSGSIZE; 815c6c11ad3SNeta Ostrovsky } 816c6c11ad3SNeta Ostrovsky 817391c6bd5SNeta Ostrovsky static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin, 818391c6bd5SNeta Ostrovsky struct rdma_restrack_entry *res, uint32_t port) 819391c6bd5SNeta Ostrovsky { 820391c6bd5SNeta Ostrovsky struct ib_srq *srq = container_of(res, struct ib_srq, res); 821391c6bd5SNeta Ostrovsky 822391c6bd5SNeta Ostrovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id)) 823391c6bd5SNeta Ostrovsky goto err; 824391c6bd5SNeta Ostrovsky 825391c6bd5SNeta Ostrovsky if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type)) 826391c6bd5SNeta Ostrovsky goto err; 827391c6bd5SNeta Ostrovsky 828391c6bd5SNeta Ostrovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id)) 829391c6bd5SNeta Ostrovsky goto err; 830391c6bd5SNeta Ostrovsky 831391c6bd5SNeta Ostrovsky if (ib_srq_has_cq(srq->srq_type)) { 832391c6bd5SNeta Ostrovsky if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, 833391c6bd5SNeta Ostrovsky srq->ext.cq->res.id)) 834391c6bd5SNeta Ostrovsky goto err; 835391c6bd5SNeta Ostrovsky } 836391c6bd5SNeta Ostrovsky 837c6c11ad3SNeta Ostrovsky if (fill_res_srq_qps(msg, srq)) 838c6c11ad3SNeta Ostrovsky goto err; 839c6c11ad3SNeta Ostrovsky 840391c6bd5SNeta Ostrovsky return fill_res_name_pid(msg, res); 841391c6bd5SNeta Ostrovsky 842391c6bd5SNeta Ostrovsky err: 843391c6bd5SNeta Ostrovsky return -EMSGSIZE; 844391c6bd5SNeta Ostrovsky } 845391c6bd5SNeta Ostrovsky 846c4ffee7cSMark Zhang static int fill_stat_counter_mode(struct sk_buff *msg, 847c4ffee7cSMark Zhang struct rdma_counter *counter) 848c4ffee7cSMark Zhang { 849c4ffee7cSMark Zhang struct rdma_counter_mode *m = &counter->mode; 850c4ffee7cSMark Zhang 851c4ffee7cSMark Zhang if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode)) 852c4ffee7cSMark Zhang return -EMSGSIZE; 853c4ffee7cSMark Zhang 8547c97f3adSMark Zhang if (m->mode == RDMA_COUNTER_MODE_AUTO) { 855c4ffee7cSMark Zhang if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) && 856c4ffee7cSMark Zhang nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type)) 857c4ffee7cSMark Zhang return -EMSGSIZE; 858c4ffee7cSMark Zhang 8597c97f3adSMark Zhang if ((m->mask & RDMA_COUNTER_MASK_PID) && 8607c97f3adSMark Zhang fill_res_name_pid(msg, &counter->res)) 8617c97f3adSMark Zhang return -EMSGSIZE; 8627c97f3adSMark Zhang } 8637c97f3adSMark Zhang 864c4ffee7cSMark Zhang return 0; 865c4ffee7cSMark Zhang } 866c4ffee7cSMark Zhang 867c4ffee7cSMark Zhang static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn) 868c4ffee7cSMark Zhang { 869c4ffee7cSMark Zhang struct nlattr *entry_attr; 870c4ffee7cSMark Zhang 871c4ffee7cSMark Zhang entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); 872c4ffee7cSMark Zhang if (!entry_attr) 873c4ffee7cSMark Zhang return -EMSGSIZE; 874c4ffee7cSMark Zhang 875c4ffee7cSMark Zhang if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) 876c4ffee7cSMark Zhang goto err; 877c4ffee7cSMark Zhang 878c4ffee7cSMark Zhang nla_nest_end(msg, entry_attr); 879c4ffee7cSMark Zhang return 0; 880c4ffee7cSMark Zhang 881c4ffee7cSMark Zhang err: 882c4ffee7cSMark Zhang nla_nest_cancel(msg, entry_attr); 883c4ffee7cSMark Zhang return -EMSGSIZE; 884c4ffee7cSMark Zhang } 885c4ffee7cSMark Zhang 886c4ffee7cSMark Zhang static int fill_stat_counter_qps(struct sk_buff *msg, 887c4ffee7cSMark Zhang struct rdma_counter *counter) 888c4ffee7cSMark Zhang { 889c4ffee7cSMark Zhang struct rdma_restrack_entry *res; 890c4ffee7cSMark Zhang struct rdma_restrack_root *rt; 891c4ffee7cSMark Zhang struct nlattr *table_attr; 892c4ffee7cSMark Zhang struct ib_qp *qp = NULL; 893c4ffee7cSMark Zhang unsigned long id = 0; 894c4ffee7cSMark Zhang int ret = 0; 895c4ffee7cSMark Zhang 896c4ffee7cSMark Zhang table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); 897ea5ef136SYuan Can if (!table_attr) 898ea5ef136SYuan Can return -EMSGSIZE; 899c4ffee7cSMark Zhang 900c4ffee7cSMark Zhang rt = &counter->device->res[RDMA_RESTRACK_QP]; 901c4ffee7cSMark Zhang xa_lock(&rt->xa); 902c4ffee7cSMark Zhang xa_for_each(&rt->xa, id, res) { 903c4ffee7cSMark Zhang qp = container_of(res, struct ib_qp, res); 904c4ffee7cSMark Zhang if (!qp->counter || (qp->counter->id != counter->id)) 905c4ffee7cSMark Zhang continue; 906c4ffee7cSMark Zhang 907c4ffee7cSMark Zhang ret = fill_stat_counter_qp_entry(msg, qp->qp_num); 908c4ffee7cSMark Zhang if (ret) 909c4ffee7cSMark Zhang goto err; 910c4ffee7cSMark Zhang } 911c4ffee7cSMark Zhang 912c4ffee7cSMark Zhang xa_unlock(&rt->xa); 913c4ffee7cSMark Zhang nla_nest_end(msg, table_attr); 914c4ffee7cSMark Zhang return 0; 915c4ffee7cSMark Zhang 916c4ffee7cSMark Zhang err: 917c4ffee7cSMark Zhang xa_unlock(&rt->xa); 918c4ffee7cSMark Zhang nla_nest_cancel(msg, table_attr); 919c4ffee7cSMark Zhang return ret; 920c4ffee7cSMark Zhang } 921c4ffee7cSMark Zhang 9224061ff7aSErez Alfasi int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name, 9234061ff7aSErez Alfasi u64 value) 924c4ffee7cSMark Zhang { 925c4ffee7cSMark Zhang struct nlattr *entry_attr; 926c4ffee7cSMark Zhang 927c4ffee7cSMark Zhang entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); 928c4ffee7cSMark Zhang if (!entry_attr) 929c4ffee7cSMark Zhang return -EMSGSIZE; 930c4ffee7cSMark Zhang 931c4ffee7cSMark Zhang if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, 932c4ffee7cSMark Zhang name)) 933c4ffee7cSMark Zhang goto err; 934c4ffee7cSMark Zhang if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE, 935c4ffee7cSMark Zhang value, RDMA_NLDEV_ATTR_PAD)) 936c4ffee7cSMark Zhang goto err; 937c4ffee7cSMark Zhang 938c4ffee7cSMark Zhang nla_nest_end(msg, entry_attr); 939c4ffee7cSMark Zhang return 0; 940c4ffee7cSMark Zhang 941c4ffee7cSMark Zhang err: 942c4ffee7cSMark Zhang nla_nest_cancel(msg, entry_attr); 943c4ffee7cSMark Zhang return -EMSGSIZE; 944c4ffee7cSMark Zhang } 9454061ff7aSErez Alfasi EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry); 9464061ff7aSErez Alfasi 9474061ff7aSErez Alfasi static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, 9484061ff7aSErez Alfasi struct rdma_restrack_entry *res, uint32_t port) 9494061ff7aSErez Alfasi { 9504061ff7aSErez Alfasi struct ib_mr *mr = container_of(res, struct ib_mr, res); 9514061ff7aSErez Alfasi struct ib_device *dev = mr->pd->device; 9524061ff7aSErez Alfasi 9534061ff7aSErez Alfasi if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) 9544061ff7aSErez Alfasi goto err; 9554061ff7aSErez Alfasi 956f4434529SMaor Gottlieb if (dev->ops.fill_stat_mr_entry) 957f4434529SMaor Gottlieb return dev->ops.fill_stat_mr_entry(msg, mr); 9584061ff7aSErez Alfasi return 0; 9594061ff7aSErez Alfasi 9604061ff7aSErez Alfasi err: 9614061ff7aSErez Alfasi return -EMSGSIZE; 9624061ff7aSErez Alfasi } 963c4ffee7cSMark Zhang 964c4ffee7cSMark Zhang static int fill_stat_counter_hwcounters(struct sk_buff *msg, 965c4ffee7cSMark Zhang struct rdma_counter *counter) 966c4ffee7cSMark Zhang { 967c4ffee7cSMark Zhang struct rdma_hw_stats *st = counter->stats; 968c4ffee7cSMark Zhang struct nlattr *table_attr; 969c4ffee7cSMark Zhang int i; 970c4ffee7cSMark Zhang 971c4ffee7cSMark Zhang table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); 972c4ffee7cSMark Zhang if (!table_attr) 973c4ffee7cSMark Zhang return -EMSGSIZE; 974c4ffee7cSMark Zhang 9750dc89684SAharon Landau mutex_lock(&st->lock); 9760dc89684SAharon Landau for (i = 0; i < st->num_counters; i++) { 9770dc89684SAharon Landau if (test_bit(i, st->is_disabled)) 9780dc89684SAharon Landau continue; 97913f30b0fSAharon Landau if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name, 98013f30b0fSAharon Landau st->value[i])) 981c4ffee7cSMark Zhang goto err; 9820dc89684SAharon Landau } 9830dc89684SAharon Landau mutex_unlock(&st->lock); 984c4ffee7cSMark Zhang 985c4ffee7cSMark Zhang nla_nest_end(msg, table_attr); 986c4ffee7cSMark Zhang return 0; 987c4ffee7cSMark Zhang 988c4ffee7cSMark Zhang err: 9890dc89684SAharon Landau mutex_unlock(&st->lock); 990c4ffee7cSMark Zhang nla_nest_cancel(msg, table_attr); 991c4ffee7cSMark Zhang return -EMSGSIZE; 992c4ffee7cSMark Zhang } 993c4ffee7cSMark Zhang 994c4ffee7cSMark Zhang static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin, 995c4ffee7cSMark Zhang struct rdma_restrack_entry *res, 996c4ffee7cSMark Zhang uint32_t port) 997c4ffee7cSMark Zhang { 998c4ffee7cSMark Zhang struct rdma_counter *counter = 999c4ffee7cSMark Zhang container_of(res, struct rdma_counter, res); 1000c4ffee7cSMark Zhang 1001c4ffee7cSMark Zhang if (port && port != counter->port) 1002a15542bbSMark Zhang return -EAGAIN; 1003c4ffee7cSMark Zhang 1004c4ffee7cSMark Zhang /* Dump it even query failed */ 1005c4ffee7cSMark Zhang rdma_counter_query_stats(counter); 1006c4ffee7cSMark Zhang 1007c4ffee7cSMark Zhang if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) || 1008c4ffee7cSMark Zhang nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) || 1009c4ffee7cSMark Zhang fill_stat_counter_mode(msg, counter) || 1010c4ffee7cSMark Zhang fill_stat_counter_qps(msg, counter) || 1011c4ffee7cSMark Zhang fill_stat_counter_hwcounters(msg, counter)) 1012c4ffee7cSMark Zhang return -EMSGSIZE; 1013c4ffee7cSMark Zhang 1014c4ffee7cSMark Zhang return 0; 1015c4ffee7cSMark Zhang } 1016c4ffee7cSMark Zhang 1017e5c9469eSLeon Romanovsky static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1018e5c9469eSLeon Romanovsky struct netlink_ext_ack *extack) 1019e5c9469eSLeon Romanovsky { 1020e5c9469eSLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1021e5c9469eSLeon Romanovsky struct ib_device *device; 1022e5c9469eSLeon Romanovsky struct sk_buff *msg; 1023e5c9469eSLeon Romanovsky u32 index; 1024e5c9469eSLeon Romanovsky int err; 1025e5c9469eSLeon Romanovsky 10268cb08174SJohannes Berg err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1027e5c9469eSLeon Romanovsky nldev_policy, extack); 1028e5c9469eSLeon Romanovsky if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1029e5c9469eSLeon Romanovsky return -EINVAL; 1030e5c9469eSLeon Romanovsky 1031e5c9469eSLeon Romanovsky index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1032e5c9469eSLeon Romanovsky 103337eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 1034e5c9469eSLeon Romanovsky if (!device) 1035e5c9469eSLeon Romanovsky return -EINVAL; 1036e5c9469eSLeon Romanovsky 1037e5c9469eSLeon Romanovsky msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1038f8978bd9SLeon Romanovsky if (!msg) { 1039f8978bd9SLeon Romanovsky err = -ENOMEM; 1040f8978bd9SLeon Romanovsky goto err; 1041f8978bd9SLeon Romanovsky } 1042e5c9469eSLeon Romanovsky 1043e5c9469eSLeon Romanovsky nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1044e5c9469eSLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 1045e5c9469eSLeon Romanovsky 0, 0); 1046*67e6272dSOr Har-Toov if (!nlh) { 1047*67e6272dSOr Har-Toov err = -EMSGSIZE; 1048*67e6272dSOr Har-Toov goto err_free; 1049*67e6272dSOr Har-Toov } 1050e5c9469eSLeon Romanovsky 1051e5c9469eSLeon Romanovsky err = fill_dev_info(msg, device); 1052f8978bd9SLeon Romanovsky if (err) 1053f8978bd9SLeon Romanovsky goto err_free; 1054e5c9469eSLeon Romanovsky 1055e5c9469eSLeon Romanovsky nlmsg_end(msg, nlh); 1056e5c9469eSLeon Romanovsky 105701b67117SParav Pandit ib_device_put(device); 10581d2fedd8SParav Pandit return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1059f8978bd9SLeon Romanovsky 1060f8978bd9SLeon Romanovsky err_free: 1061f8978bd9SLeon Romanovsky nlmsg_free(msg); 1062f8978bd9SLeon Romanovsky err: 106301b67117SParav Pandit ib_device_put(device); 1064f8978bd9SLeon Romanovsky return err; 1065e5c9469eSLeon Romanovsky } 1066e5c9469eSLeon Romanovsky 106705d940d3SLeon Romanovsky static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 106805d940d3SLeon Romanovsky struct netlink_ext_ack *extack) 106905d940d3SLeon Romanovsky { 107005d940d3SLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 107105d940d3SLeon Romanovsky struct ib_device *device; 107205d940d3SLeon Romanovsky u32 index; 107305d940d3SLeon Romanovsky int err; 107405d940d3SLeon Romanovsky 10758cb08174SJohannes Berg err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 10768cb08174SJohannes Berg nldev_policy, extack); 107705d940d3SLeon Romanovsky if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 107805d940d3SLeon Romanovsky return -EINVAL; 107905d940d3SLeon Romanovsky 108005d940d3SLeon Romanovsky index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 108137eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 108205d940d3SLeon Romanovsky if (!device) 108305d940d3SLeon Romanovsky return -EINVAL; 108405d940d3SLeon Romanovsky 108505d940d3SLeon Romanovsky if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) { 108605d940d3SLeon Romanovsky char name[IB_DEVICE_NAME_MAX] = {}; 108705d940d3SLeon Romanovsky 1088872f6903SFrancis Laniel nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], 108905d940d3SLeon Romanovsky IB_DEVICE_NAME_MAX); 10907aefa623SJason Gunthorpe if (strlen(name) == 0) { 10917aefa623SJason Gunthorpe err = -EINVAL; 10927aefa623SJason Gunthorpe goto done; 10937aefa623SJason Gunthorpe } 109405d940d3SLeon Romanovsky err = ib_device_rename(device, name); 10952e5b8a01SParav Pandit goto done; 109605d940d3SLeon Romanovsky } 109705d940d3SLeon Romanovsky 10982e5b8a01SParav Pandit if (tb[RDMA_NLDEV_NET_NS_FD]) { 10992e5b8a01SParav Pandit u32 ns_fd; 11002e5b8a01SParav Pandit 11012e5b8a01SParav Pandit ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]); 11022e5b8a01SParav Pandit err = ib_device_set_netns_put(skb, device, ns_fd); 11032e5b8a01SParav Pandit goto put_done; 11042e5b8a01SParav Pandit } 11052e5b8a01SParav Pandit 1106f8fc8cd9SYamin Friedman if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) { 1107f8fc8cd9SYamin Friedman u8 use_dim; 1108f8fc8cd9SYamin Friedman 1109f8fc8cd9SYamin Friedman use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]); 1110f8fc8cd9SYamin Friedman err = ib_device_set_dim(device, use_dim); 1111f8fc8cd9SYamin Friedman goto done; 1112f8fc8cd9SYamin Friedman } 1113f8fc8cd9SYamin Friedman 11142e5b8a01SParav Pandit done: 111501b67117SParav Pandit ib_device_put(device); 11162e5b8a01SParav Pandit put_done: 111705d940d3SLeon Romanovsky return err; 111805d940d3SLeon Romanovsky } 111905d940d3SLeon Romanovsky 1120b4c598a6SLeon Romanovsky static int _nldev_get_dumpit(struct ib_device *device, 1121b4c598a6SLeon Romanovsky struct sk_buff *skb, 1122b4c598a6SLeon Romanovsky struct netlink_callback *cb, 1123b4c598a6SLeon Romanovsky unsigned int idx) 1124b4c598a6SLeon Romanovsky { 1125b4c598a6SLeon Romanovsky int start = cb->args[0]; 1126b4c598a6SLeon Romanovsky struct nlmsghdr *nlh; 1127b4c598a6SLeon Romanovsky 1128b4c598a6SLeon Romanovsky if (idx < start) 1129b4c598a6SLeon Romanovsky return 0; 1130b4c598a6SLeon Romanovsky 1131b4c598a6SLeon Romanovsky nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1132b4c598a6SLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 1133b4c598a6SLeon Romanovsky 0, NLM_F_MULTI); 1134b4c598a6SLeon Romanovsky 1135*67e6272dSOr Har-Toov if (!nlh || fill_dev_info(skb, device)) { 1136b4c598a6SLeon Romanovsky nlmsg_cancel(skb, nlh); 1137b4c598a6SLeon Romanovsky goto out; 1138b4c598a6SLeon Romanovsky } 1139b4c598a6SLeon Romanovsky 1140b4c598a6SLeon Romanovsky nlmsg_end(skb, nlh); 1141b4c598a6SLeon Romanovsky 1142b4c598a6SLeon Romanovsky idx++; 1143b4c598a6SLeon Romanovsky 1144b4c598a6SLeon Romanovsky out: cb->args[0] = idx; 1145b4c598a6SLeon Romanovsky return skb->len; 1146b4c598a6SLeon Romanovsky } 1147b4c598a6SLeon Romanovsky 1148b4c598a6SLeon Romanovsky static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 1149b4c598a6SLeon Romanovsky { 1150b4c598a6SLeon Romanovsky /* 1151b4c598a6SLeon Romanovsky * There is no need to take lock, because 115237eeab55SParav Pandit * we are relying on ib_core's locking. 1153b4c598a6SLeon Romanovsky */ 1154b4c598a6SLeon Romanovsky return ib_enum_all_devs(_nldev_get_dumpit, skb, cb); 1155b4c598a6SLeon Romanovsky } 1156b4c598a6SLeon Romanovsky 1157c3f66f7bSLeon Romanovsky static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1158c3f66f7bSLeon Romanovsky struct netlink_ext_ack *extack) 1159c3f66f7bSLeon Romanovsky { 1160c3f66f7bSLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1161c3f66f7bSLeon Romanovsky struct ib_device *device; 1162c3f66f7bSLeon Romanovsky struct sk_buff *msg; 1163c3f66f7bSLeon Romanovsky u32 index; 1164c3f66f7bSLeon Romanovsky u32 port; 1165c3f66f7bSLeon Romanovsky int err; 1166c3f66f7bSLeon Romanovsky 11678cb08174SJohannes Berg err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1168c3f66f7bSLeon Romanovsky nldev_policy, extack); 1169287683d0SLeon Romanovsky if (err || 1170287683d0SLeon Romanovsky !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || 1171287683d0SLeon Romanovsky !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 1172c3f66f7bSLeon Romanovsky return -EINVAL; 1173c3f66f7bSLeon Romanovsky 1174c3f66f7bSLeon Romanovsky index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 117537eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 1176c3f66f7bSLeon Romanovsky if (!device) 1177c3f66f7bSLeon Romanovsky return -EINVAL; 1178c3f66f7bSLeon Romanovsky 1179c3f66f7bSLeon Romanovsky port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1180f8978bd9SLeon Romanovsky if (!rdma_is_port_valid(device, port)) { 1181f8978bd9SLeon Romanovsky err = -EINVAL; 1182f8978bd9SLeon Romanovsky goto err; 1183f8978bd9SLeon Romanovsky } 1184c3f66f7bSLeon Romanovsky 1185c3f66f7bSLeon Romanovsky msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1186f8978bd9SLeon Romanovsky if (!msg) { 1187f8978bd9SLeon Romanovsky err = -ENOMEM; 1188f8978bd9SLeon Romanovsky goto err; 1189f8978bd9SLeon Romanovsky } 1190c3f66f7bSLeon Romanovsky 1191c3f66f7bSLeon Romanovsky nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1192c3f66f7bSLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 1193c3f66f7bSLeon Romanovsky 0, 0); 1194*67e6272dSOr Har-Toov if (!nlh) { 1195*67e6272dSOr Har-Toov err = -EMSGSIZE; 1196*67e6272dSOr Har-Toov goto err_free; 1197*67e6272dSOr Har-Toov } 1198c3f66f7bSLeon Romanovsky 11995b2cc79dSLeon Romanovsky err = fill_port_info(msg, device, port, sock_net(skb->sk)); 1200f8978bd9SLeon Romanovsky if (err) 1201f8978bd9SLeon Romanovsky goto err_free; 1202c3f66f7bSLeon Romanovsky 1203c3f66f7bSLeon Romanovsky nlmsg_end(msg, nlh); 120401b67117SParav Pandit ib_device_put(device); 1205c3f66f7bSLeon Romanovsky 12061d2fedd8SParav Pandit return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1207f8978bd9SLeon Romanovsky 1208f8978bd9SLeon Romanovsky err_free: 1209f8978bd9SLeon Romanovsky nlmsg_free(msg); 1210f8978bd9SLeon Romanovsky err: 121101b67117SParav Pandit ib_device_put(device); 1212f8978bd9SLeon Romanovsky return err; 1213c3f66f7bSLeon Romanovsky } 1214c3f66f7bSLeon Romanovsky 12157d02f605SLeon Romanovsky static int nldev_port_get_dumpit(struct sk_buff *skb, 12167d02f605SLeon Romanovsky struct netlink_callback *cb) 12177d02f605SLeon Romanovsky { 12187d02f605SLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 12197d02f605SLeon Romanovsky struct ib_device *device; 12207d02f605SLeon Romanovsky int start = cb->args[0]; 12217d02f605SLeon Romanovsky struct nlmsghdr *nlh; 12227d02f605SLeon Romanovsky u32 idx = 0; 12237d02f605SLeon Romanovsky u32 ifindex; 12247d02f605SLeon Romanovsky int err; 1225ea1075edSJason Gunthorpe unsigned int p; 12267d02f605SLeon Romanovsky 12278cb08174SJohannes Berg err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 12287d02f605SLeon Romanovsky nldev_policy, NULL); 12297d02f605SLeon Romanovsky if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 12307d02f605SLeon Romanovsky return -EINVAL; 12317d02f605SLeon Romanovsky 12327d02f605SLeon Romanovsky ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 123337eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), ifindex); 12347d02f605SLeon Romanovsky if (!device) 12357d02f605SLeon Romanovsky return -EINVAL; 12367d02f605SLeon Romanovsky 1237ea1075edSJason Gunthorpe rdma_for_each_port (device, p) { 12387d02f605SLeon Romanovsky /* 12397d02f605SLeon Romanovsky * The dumpit function returns all information from specific 12407d02f605SLeon Romanovsky * index. This specific index is taken from the netlink 12417d02f605SLeon Romanovsky * messages request sent by user and it is available 12427d02f605SLeon Romanovsky * in cb->args[0]. 12437d02f605SLeon Romanovsky * 12447d02f605SLeon Romanovsky * Usually, the user doesn't fill this field and it causes 12457d02f605SLeon Romanovsky * to return everything. 12467d02f605SLeon Romanovsky * 12477d02f605SLeon Romanovsky */ 12487d02f605SLeon Romanovsky if (idx < start) { 12497d02f605SLeon Romanovsky idx++; 12507d02f605SLeon Romanovsky continue; 12517d02f605SLeon Romanovsky } 12527d02f605SLeon Romanovsky 12537d02f605SLeon Romanovsky nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 12547d02f605SLeon Romanovsky cb->nlh->nlmsg_seq, 12557d02f605SLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 12567d02f605SLeon Romanovsky RDMA_NLDEV_CMD_PORT_GET), 12577d02f605SLeon Romanovsky 0, NLM_F_MULTI); 12587d02f605SLeon Romanovsky 1259*67e6272dSOr Har-Toov if (!nlh || fill_port_info(skb, device, p, sock_net(skb->sk))) { 12607d02f605SLeon Romanovsky nlmsg_cancel(skb, nlh); 12617d02f605SLeon Romanovsky goto out; 12627d02f605SLeon Romanovsky } 12637d02f605SLeon Romanovsky idx++; 12647d02f605SLeon Romanovsky nlmsg_end(skb, nlh); 12657d02f605SLeon Romanovsky } 12667d02f605SLeon Romanovsky 1267f8978bd9SLeon Romanovsky out: 126801b67117SParav Pandit ib_device_put(device); 1269f8978bd9SLeon Romanovsky cb->args[0] = idx; 12707d02f605SLeon Romanovsky return skb->len; 12717d02f605SLeon Romanovsky } 12727d02f605SLeon Romanovsky 1273bf3c5a93SLeon Romanovsky static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1274bf3c5a93SLeon Romanovsky struct netlink_ext_ack *extack) 1275bf3c5a93SLeon Romanovsky { 1276bf3c5a93SLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1277bf3c5a93SLeon Romanovsky struct ib_device *device; 1278bf3c5a93SLeon Romanovsky struct sk_buff *msg; 1279bf3c5a93SLeon Romanovsky u32 index; 1280bf3c5a93SLeon Romanovsky int ret; 1281bf3c5a93SLeon Romanovsky 12828cb08174SJohannes Berg ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1283bf3c5a93SLeon Romanovsky nldev_policy, extack); 1284bf3c5a93SLeon Romanovsky if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1285bf3c5a93SLeon Romanovsky return -EINVAL; 1286bf3c5a93SLeon Romanovsky 1287bf3c5a93SLeon Romanovsky index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 128837eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 1289bf3c5a93SLeon Romanovsky if (!device) 1290bf3c5a93SLeon Romanovsky return -EINVAL; 1291bf3c5a93SLeon Romanovsky 1292bf3c5a93SLeon Romanovsky msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1293f34727a1SDan Carpenter if (!msg) { 1294f34727a1SDan Carpenter ret = -ENOMEM; 1295bf3c5a93SLeon Romanovsky goto err; 1296f34727a1SDan Carpenter } 1297bf3c5a93SLeon Romanovsky 1298bf3c5a93SLeon Romanovsky nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1299bf3c5a93SLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 1300bf3c5a93SLeon Romanovsky 0, 0); 1301*67e6272dSOr Har-Toov if (!nlh) { 1302*67e6272dSOr Har-Toov ret = -EMSGSIZE; 1303*67e6272dSOr Har-Toov goto err_free; 1304*67e6272dSOr Har-Toov } 1305bf3c5a93SLeon Romanovsky 1306bf3c5a93SLeon Romanovsky ret = fill_res_info(msg, device); 1307bf3c5a93SLeon Romanovsky if (ret) 1308bf3c5a93SLeon Romanovsky goto err_free; 1309bf3c5a93SLeon Romanovsky 1310bf3c5a93SLeon Romanovsky nlmsg_end(msg, nlh); 131101b67117SParav Pandit ib_device_put(device); 13121d2fedd8SParav Pandit return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1313bf3c5a93SLeon Romanovsky 1314bf3c5a93SLeon Romanovsky err_free: 1315bf3c5a93SLeon Romanovsky nlmsg_free(msg); 1316bf3c5a93SLeon Romanovsky err: 131701b67117SParav Pandit ib_device_put(device); 1318bf3c5a93SLeon Romanovsky return ret; 1319bf3c5a93SLeon Romanovsky } 1320bf3c5a93SLeon Romanovsky 1321bf3c5a93SLeon Romanovsky static int _nldev_res_get_dumpit(struct ib_device *device, 1322bf3c5a93SLeon Romanovsky struct sk_buff *skb, 1323bf3c5a93SLeon Romanovsky struct netlink_callback *cb, 1324bf3c5a93SLeon Romanovsky unsigned int idx) 1325bf3c5a93SLeon Romanovsky { 1326bf3c5a93SLeon Romanovsky int start = cb->args[0]; 1327bf3c5a93SLeon Romanovsky struct nlmsghdr *nlh; 1328bf3c5a93SLeon Romanovsky 1329bf3c5a93SLeon Romanovsky if (idx < start) 1330bf3c5a93SLeon Romanovsky return 0; 1331bf3c5a93SLeon Romanovsky 1332bf3c5a93SLeon Romanovsky nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1333bf3c5a93SLeon Romanovsky RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 1334bf3c5a93SLeon Romanovsky 0, NLM_F_MULTI); 1335bf3c5a93SLeon Romanovsky 1336*67e6272dSOr Har-Toov if (!nlh || fill_res_info(skb, device)) { 1337bf3c5a93SLeon Romanovsky nlmsg_cancel(skb, nlh); 1338bf3c5a93SLeon Romanovsky goto out; 1339bf3c5a93SLeon Romanovsky } 1340bf3c5a93SLeon Romanovsky nlmsg_end(skb, nlh); 1341bf3c5a93SLeon Romanovsky 1342bf3c5a93SLeon Romanovsky idx++; 1343bf3c5a93SLeon Romanovsky 1344bf3c5a93SLeon Romanovsky out: 1345bf3c5a93SLeon Romanovsky cb->args[0] = idx; 1346bf3c5a93SLeon Romanovsky return skb->len; 1347bf3c5a93SLeon Romanovsky } 1348bf3c5a93SLeon Romanovsky 1349bf3c5a93SLeon Romanovsky static int nldev_res_get_dumpit(struct sk_buff *skb, 1350bf3c5a93SLeon Romanovsky struct netlink_callback *cb) 1351bf3c5a93SLeon Romanovsky { 1352bf3c5a93SLeon Romanovsky return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb); 1353bf3c5a93SLeon Romanovsky } 1354bf3c5a93SLeon Romanovsky 1355d12ff624SSteve Wise struct nldev_fill_res_entry { 1356d12ff624SSteve Wise enum rdma_nldev_attr nldev_attr; 1357c5dfe0eaSLeon Romanovsky u8 flags; 1358c5dfe0eaSLeon Romanovsky u32 entry; 1359c5dfe0eaSLeon Romanovsky u32 id; 1360c5dfe0eaSLeon Romanovsky }; 1361c5dfe0eaSLeon Romanovsky 1362c5dfe0eaSLeon Romanovsky enum nldev_res_flags { 1363c5dfe0eaSLeon Romanovsky NLDEV_PER_DEV = 1 << 0, 1364d12ff624SSteve Wise }; 1365d12ff624SSteve Wise 1366d12ff624SSteve Wise static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { 1367d12ff624SSteve Wise [RDMA_RESTRACK_QP] = { 1368d12ff624SSteve Wise .nldev_attr = RDMA_NLDEV_ATTR_RES_QP, 1369c5dfe0eaSLeon Romanovsky .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY, 13701b8b7788SLeon Romanovsky .id = RDMA_NLDEV_ATTR_RES_LQPN, 1371d12ff624SSteve Wise }, 137200313983SSteve Wise [RDMA_RESTRACK_CM_ID] = { 137300313983SSteve Wise .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID, 1374c5dfe0eaSLeon Romanovsky .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, 1375517b773eSLeon Romanovsky .id = RDMA_NLDEV_ATTR_RES_CM_IDN, 137600313983SSteve Wise }, 1377a34fc089SSteve Wise [RDMA_RESTRACK_CQ] = { 1378a34fc089SSteve Wise .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ, 1379c5dfe0eaSLeon Romanovsky .flags = NLDEV_PER_DEV, 1380c5dfe0eaSLeon Romanovsky .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY, 1381517b773eSLeon Romanovsky .id = RDMA_NLDEV_ATTR_RES_CQN, 1382a34fc089SSteve Wise }, 1383fccec5b8SSteve Wise [RDMA_RESTRACK_MR] = { 1384fccec5b8SSteve Wise .nldev_attr = RDMA_NLDEV_ATTR_RES_MR, 1385c5dfe0eaSLeon Romanovsky .flags = NLDEV_PER_DEV, 1386c5dfe0eaSLeon Romanovsky .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY, 1387517b773eSLeon Romanovsky .id = RDMA_NLDEV_ATTR_RES_MRN, 1388fccec5b8SSteve Wise }, 138929cf1351SSteve Wise [RDMA_RESTRACK_PD] = { 139029cf1351SSteve Wise .nldev_attr = RDMA_NLDEV_ATTR_RES_PD, 1391c5dfe0eaSLeon Romanovsky .flags = NLDEV_PER_DEV, 1392c5dfe0eaSLeon Romanovsky .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY, 1393517b773eSLeon Romanovsky .id = RDMA_NLDEV_ATTR_RES_PDN, 139429cf1351SSteve Wise }, 1395c4ffee7cSMark Zhang [RDMA_RESTRACK_COUNTER] = { 1396c4ffee7cSMark Zhang .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER, 1397c4ffee7cSMark Zhang .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY, 1398c4ffee7cSMark Zhang .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID, 1399c4ffee7cSMark Zhang }, 140012ce208fSNeta Ostrovsky [RDMA_RESTRACK_CTX] = { 140112ce208fSNeta Ostrovsky .nldev_attr = RDMA_NLDEV_ATTR_RES_CTX, 140212ce208fSNeta Ostrovsky .flags = NLDEV_PER_DEV, 140312ce208fSNeta Ostrovsky .entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY, 140412ce208fSNeta Ostrovsky .id = RDMA_NLDEV_ATTR_RES_CTXN, 140512ce208fSNeta Ostrovsky }, 1406391c6bd5SNeta Ostrovsky [RDMA_RESTRACK_SRQ] = { 1407391c6bd5SNeta Ostrovsky .nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ, 1408391c6bd5SNeta Ostrovsky .flags = NLDEV_PER_DEV, 1409391c6bd5SNeta Ostrovsky .entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY, 1410391c6bd5SNeta Ostrovsky .id = RDMA_NLDEV_ATTR_RES_SRQN, 1411391c6bd5SNeta Ostrovsky }, 1412391c6bd5SNeta Ostrovsky 1413d12ff624SSteve Wise }; 1414d12ff624SSteve Wise 1415c5dfe0eaSLeon Romanovsky static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1416c5dfe0eaSLeon Romanovsky struct netlink_ext_ack *extack, 1417fb910690SErez Alfasi enum rdma_restrack_type res_type, 1418fb910690SErez Alfasi res_fill_func_t fill_func) 1419c5dfe0eaSLeon Romanovsky { 1420c5dfe0eaSLeon Romanovsky const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; 1421c5dfe0eaSLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1422c5dfe0eaSLeon Romanovsky struct rdma_restrack_entry *res; 1423c5dfe0eaSLeon Romanovsky struct ib_device *device; 1424c5dfe0eaSLeon Romanovsky u32 index, id, port = 0; 1425c5dfe0eaSLeon Romanovsky bool has_cap_net_admin; 1426c5dfe0eaSLeon Romanovsky struct sk_buff *msg; 1427c5dfe0eaSLeon Romanovsky int ret; 1428c5dfe0eaSLeon Romanovsky 14298cb08174SJohannes Berg ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1430c5dfe0eaSLeon Romanovsky nldev_policy, extack); 1431c5dfe0eaSLeon Romanovsky if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id]) 1432c5dfe0eaSLeon Romanovsky return -EINVAL; 1433c5dfe0eaSLeon Romanovsky 1434c5dfe0eaSLeon Romanovsky index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 143537eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 1436c5dfe0eaSLeon Romanovsky if (!device) 1437c5dfe0eaSLeon Romanovsky return -EINVAL; 1438c5dfe0eaSLeon Romanovsky 1439c5dfe0eaSLeon Romanovsky if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 1440c5dfe0eaSLeon Romanovsky port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1441c5dfe0eaSLeon Romanovsky if (!rdma_is_port_valid(device, port)) { 1442c5dfe0eaSLeon Romanovsky ret = -EINVAL; 1443c5dfe0eaSLeon Romanovsky goto err; 1444c5dfe0eaSLeon Romanovsky } 1445c5dfe0eaSLeon Romanovsky } 1446c5dfe0eaSLeon Romanovsky 1447c5dfe0eaSLeon Romanovsky if ((port && fe->flags & NLDEV_PER_DEV) || 1448c5dfe0eaSLeon Romanovsky (!port && ~fe->flags & NLDEV_PER_DEV)) { 1449c5dfe0eaSLeon Romanovsky ret = -EINVAL; 1450c5dfe0eaSLeon Romanovsky goto err; 1451c5dfe0eaSLeon Romanovsky } 1452c5dfe0eaSLeon Romanovsky 1453c5dfe0eaSLeon Romanovsky id = nla_get_u32(tb[fe->id]); 1454c5dfe0eaSLeon Romanovsky res = rdma_restrack_get_byid(device, res_type, id); 1455c5dfe0eaSLeon Romanovsky if (IS_ERR(res)) { 1456c5dfe0eaSLeon Romanovsky ret = PTR_ERR(res); 1457c5dfe0eaSLeon Romanovsky goto err; 1458c5dfe0eaSLeon Romanovsky } 1459c5dfe0eaSLeon Romanovsky 1460c5dfe0eaSLeon Romanovsky msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1461c5dfe0eaSLeon Romanovsky if (!msg) { 1462c5dfe0eaSLeon Romanovsky ret = -ENOMEM; 1463ab59ca3eSChristophe JAILLET goto err_get; 1464c5dfe0eaSLeon Romanovsky } 1465c5dfe0eaSLeon Romanovsky 1466c5dfe0eaSLeon Romanovsky nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 146765959522SMaor Gottlieb RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 146865959522SMaor Gottlieb RDMA_NL_GET_OP(nlh->nlmsg_type)), 1469c5dfe0eaSLeon Romanovsky 0, 0); 1470c5dfe0eaSLeon Romanovsky 1471*67e6272dSOr Har-Toov if (!nlh || fill_nldev_handle(msg, device)) { 1472c5dfe0eaSLeon Romanovsky ret = -EMSGSIZE; 1473c5dfe0eaSLeon Romanovsky goto err_free; 1474c5dfe0eaSLeon Romanovsky } 1475c5dfe0eaSLeon Romanovsky 1476c5dfe0eaSLeon Romanovsky has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN); 1477fb910690SErez Alfasi 1478fb910690SErez Alfasi ret = fill_func(msg, has_cap_net_admin, res, port); 1479c5dfe0eaSLeon Romanovsky if (ret) 1480c5dfe0eaSLeon Romanovsky goto err_free; 1481c5dfe0eaSLeon Romanovsky 148250bbe3d3SMaor Gottlieb rdma_restrack_put(res); 1483c5dfe0eaSLeon Romanovsky nlmsg_end(msg, nlh); 1484c5dfe0eaSLeon Romanovsky ib_device_put(device); 14851d2fedd8SParav Pandit return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1486c5dfe0eaSLeon Romanovsky 1487c5dfe0eaSLeon Romanovsky err_free: 1488c5dfe0eaSLeon Romanovsky nlmsg_free(msg); 1489c5dfe0eaSLeon Romanovsky err_get: 1490c5dfe0eaSLeon Romanovsky rdma_restrack_put(res); 1491c5dfe0eaSLeon Romanovsky err: 1492c5dfe0eaSLeon Romanovsky ib_device_put(device); 1493c5dfe0eaSLeon Romanovsky return ret; 1494c5dfe0eaSLeon Romanovsky } 1495c5dfe0eaSLeon Romanovsky 1496d12ff624SSteve Wise static int res_get_common_dumpit(struct sk_buff *skb, 1497d12ff624SSteve Wise struct netlink_callback *cb, 1498fb910690SErez Alfasi enum rdma_restrack_type res_type, 1499fb910690SErez Alfasi res_fill_func_t fill_func) 1500b5fa635aSLeon Romanovsky { 1501d12ff624SSteve Wise const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; 1502b5fa635aSLeon Romanovsky struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1503b5fa635aSLeon Romanovsky struct rdma_restrack_entry *res; 15047c77c6a9SLeon Romanovsky struct rdma_restrack_root *rt; 1505b5fa635aSLeon Romanovsky int err, ret = 0, idx = 0; 1506b5fa635aSLeon Romanovsky struct nlattr *table_attr; 1507c5dfe0eaSLeon Romanovsky struct nlattr *entry_attr; 1508b5fa635aSLeon Romanovsky struct ib_device *device; 1509b5fa635aSLeon Romanovsky int start = cb->args[0]; 1510659067b0SLeon Romanovsky bool has_cap_net_admin; 1511b5fa635aSLeon Romanovsky struct nlmsghdr *nlh; 1512fd47c2f9SLeon Romanovsky unsigned long id; 1513b5fa635aSLeon Romanovsky u32 index, port = 0; 1514d12ff624SSteve Wise bool filled = false; 1515b5fa635aSLeon Romanovsky 15168cb08174SJohannes Berg err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1517b5fa635aSLeon Romanovsky nldev_policy, NULL); 1518b5fa635aSLeon Romanovsky /* 1519d12ff624SSteve Wise * Right now, we are expecting the device index to get res information, 1520b5fa635aSLeon Romanovsky * but it is possible to extend this code to return all devices in 1521b5fa635aSLeon Romanovsky * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX. 1522b5fa635aSLeon Romanovsky * if it doesn't exist, we will iterate over all devices. 1523b5fa635aSLeon Romanovsky * 1524b5fa635aSLeon Romanovsky * But it is not needed for now. 1525b5fa635aSLeon Romanovsky */ 1526b5fa635aSLeon Romanovsky if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1527b5fa635aSLeon Romanovsky return -EINVAL; 1528b5fa635aSLeon Romanovsky 1529b5fa635aSLeon Romanovsky index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 153037eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 1531b5fa635aSLeon Romanovsky if (!device) 1532b5fa635aSLeon Romanovsky return -EINVAL; 1533b5fa635aSLeon Romanovsky 1534b5fa635aSLeon Romanovsky /* 1535b5fa635aSLeon Romanovsky * If no PORT_INDEX is supplied, we will return all QPs from that device 1536b5fa635aSLeon Romanovsky */ 1537b5fa635aSLeon Romanovsky if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 1538b5fa635aSLeon Romanovsky port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1539b5fa635aSLeon Romanovsky if (!rdma_is_port_valid(device, port)) { 1540b5fa635aSLeon Romanovsky ret = -EINVAL; 1541b5fa635aSLeon Romanovsky goto err_index; 1542b5fa635aSLeon Romanovsky } 1543b5fa635aSLeon Romanovsky } 1544b5fa635aSLeon Romanovsky 1545b5fa635aSLeon Romanovsky nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 154665959522SMaor Gottlieb RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 154765959522SMaor Gottlieb RDMA_NL_GET_OP(cb->nlh->nlmsg_type)), 1548b5fa635aSLeon Romanovsky 0, NLM_F_MULTI); 1549b5fa635aSLeon Romanovsky 1550*67e6272dSOr Har-Toov if (!nlh || fill_nldev_handle(skb, device)) { 1551b5fa635aSLeon Romanovsky ret = -EMSGSIZE; 1552b5fa635aSLeon Romanovsky goto err; 1553b5fa635aSLeon Romanovsky } 1554b5fa635aSLeon Romanovsky 1555ae0be8deSMichal Kubecek table_attr = nla_nest_start_noflag(skb, fe->nldev_attr); 1556b5fa635aSLeon Romanovsky if (!table_attr) { 1557b5fa635aSLeon Romanovsky ret = -EMSGSIZE; 1558b5fa635aSLeon Romanovsky goto err; 1559b5fa635aSLeon Romanovsky } 1560b5fa635aSLeon Romanovsky 1561659067b0SLeon Romanovsky has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN); 1562659067b0SLeon Romanovsky 15637c77c6a9SLeon Romanovsky rt = &device->res[res_type]; 15647c77c6a9SLeon Romanovsky xa_lock(&rt->xa); 1565fd47c2f9SLeon Romanovsky /* 1566fd47c2f9SLeon Romanovsky * FIXME: if the skip ahead is something common this loop should 1567fd47c2f9SLeon Romanovsky * use xas_for_each & xas_pause to optimize, we can have a lot of 1568fd47c2f9SLeon Romanovsky * objects. 1569fd47c2f9SLeon Romanovsky */ 15707c77c6a9SLeon Romanovsky xa_for_each(&rt->xa, id, res) { 1571f2a0e45fSLeon Romanovsky if (idx < start || !rdma_restrack_get(res)) 1572b5fa635aSLeon Romanovsky goto next; 1573b5fa635aSLeon Romanovsky 15747c77c6a9SLeon Romanovsky xa_unlock(&rt->xa); 15757c77c6a9SLeon Romanovsky 1576d12ff624SSteve Wise filled = true; 1577b5fa635aSLeon Romanovsky 1578ae0be8deSMichal Kubecek entry_attr = nla_nest_start_noflag(skb, fe->entry); 1579c5dfe0eaSLeon Romanovsky if (!entry_attr) { 1580c5dfe0eaSLeon Romanovsky ret = -EMSGSIZE; 1581c5dfe0eaSLeon Romanovsky rdma_restrack_put(res); 15827c77c6a9SLeon Romanovsky goto msg_full; 1583c5dfe0eaSLeon Romanovsky } 1584c5dfe0eaSLeon Romanovsky 1585fb910690SErez Alfasi ret = fill_func(skb, has_cap_net_admin, res, port); 1586fb910690SErez Alfasi 1587b5fa635aSLeon Romanovsky rdma_restrack_put(res); 1588b5fa635aSLeon Romanovsky 15897c77c6a9SLeon Romanovsky if (ret) { 1590c5dfe0eaSLeon Romanovsky nla_nest_cancel(skb, entry_attr); 1591b5fa635aSLeon Romanovsky if (ret == -EMSGSIZE) 15927c77c6a9SLeon Romanovsky goto msg_full; 1593c5dfe0eaSLeon Romanovsky if (ret == -EAGAIN) 15947c77c6a9SLeon Romanovsky goto again; 1595b5fa635aSLeon Romanovsky goto res_err; 15967c77c6a9SLeon Romanovsky } 1597c5dfe0eaSLeon Romanovsky nla_nest_end(skb, entry_attr); 15987c77c6a9SLeon Romanovsky again: xa_lock(&rt->xa); 1599b5fa635aSLeon Romanovsky next: idx++; 1600b5fa635aSLeon Romanovsky } 16017c77c6a9SLeon Romanovsky xa_unlock(&rt->xa); 1602b5fa635aSLeon Romanovsky 16037c77c6a9SLeon Romanovsky msg_full: 1604b5fa635aSLeon Romanovsky nla_nest_end(skb, table_attr); 1605b5fa635aSLeon Romanovsky nlmsg_end(skb, nlh); 1606b5fa635aSLeon Romanovsky cb->args[0] = idx; 1607b5fa635aSLeon Romanovsky 1608b5fa635aSLeon Romanovsky /* 1609d12ff624SSteve Wise * No more entries to fill, cancel the message and 1610b5fa635aSLeon Romanovsky * return 0 to mark end of dumpit. 1611b5fa635aSLeon Romanovsky */ 1612d12ff624SSteve Wise if (!filled) 1613b5fa635aSLeon Romanovsky goto err; 1614b5fa635aSLeon Romanovsky 161501b67117SParav Pandit ib_device_put(device); 1616b5fa635aSLeon Romanovsky return skb->len; 1617b5fa635aSLeon Romanovsky 1618b5fa635aSLeon Romanovsky res_err: 1619b5fa635aSLeon Romanovsky nla_nest_cancel(skb, table_attr); 1620b5fa635aSLeon Romanovsky 1621b5fa635aSLeon Romanovsky err: 1622b5fa635aSLeon Romanovsky nlmsg_cancel(skb, nlh); 1623b5fa635aSLeon Romanovsky 1624b5fa635aSLeon Romanovsky err_index: 162501b67117SParav Pandit ib_device_put(device); 1626b5fa635aSLeon Romanovsky return ret; 1627b5fa635aSLeon Romanovsky } 1628b5fa635aSLeon Romanovsky 1629f732e713SLeon Romanovsky #define RES_GET_FUNCS(name, type) \ 1630f732e713SLeon Romanovsky static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \ 1631f732e713SLeon Romanovsky struct netlink_callback *cb) \ 1632f732e713SLeon Romanovsky { \ 1633fb910690SErez Alfasi return res_get_common_dumpit(skb, cb, type, \ 1634fb910690SErez Alfasi fill_res_##name##_entry); \ 1635c5dfe0eaSLeon Romanovsky } \ 1636c5dfe0eaSLeon Romanovsky static int nldev_res_get_##name##_doit(struct sk_buff *skb, \ 1637c5dfe0eaSLeon Romanovsky struct nlmsghdr *nlh, \ 1638c5dfe0eaSLeon Romanovsky struct netlink_ext_ack *extack) \ 1639c5dfe0eaSLeon Romanovsky { \ 1640fb910690SErez Alfasi return res_get_common_doit(skb, nlh, extack, type, \ 1641fb910690SErez Alfasi fill_res_##name##_entry); \ 1642d12ff624SSteve Wise } 1643d12ff624SSteve Wise 1644f732e713SLeon Romanovsky RES_GET_FUNCS(qp, RDMA_RESTRACK_QP); 164565959522SMaor Gottlieb RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP); 1646f732e713SLeon Romanovsky RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID); 1647f732e713SLeon Romanovsky RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ); 164865959522SMaor Gottlieb RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ); 1649f732e713SLeon Romanovsky RES_GET_FUNCS(pd, RDMA_RESTRACK_PD); 1650f732e713SLeon Romanovsky RES_GET_FUNCS(mr, RDMA_RESTRACK_MR); 165165959522SMaor Gottlieb RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR); 1652c4ffee7cSMark Zhang RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER); 165312ce208fSNeta Ostrovsky RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX); 1654391c6bd5SNeta Ostrovsky RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ); 165529cf1351SSteve Wise 16563856ec4bSSteve Wise static LIST_HEAD(link_ops); 16573856ec4bSSteve Wise static DECLARE_RWSEM(link_ops_rwsem); 16583856ec4bSSteve Wise 16593856ec4bSSteve Wise static const struct rdma_link_ops *link_ops_get(const char *type) 16603856ec4bSSteve Wise { 16613856ec4bSSteve Wise const struct rdma_link_ops *ops; 16623856ec4bSSteve Wise 16633856ec4bSSteve Wise list_for_each_entry(ops, &link_ops, list) { 16643856ec4bSSteve Wise if (!strcmp(ops->type, type)) 16653856ec4bSSteve Wise goto out; 16663856ec4bSSteve Wise } 16673856ec4bSSteve Wise ops = NULL; 16683856ec4bSSteve Wise out: 16693856ec4bSSteve Wise return ops; 16703856ec4bSSteve Wise } 16713856ec4bSSteve Wise 16723856ec4bSSteve Wise void rdma_link_register(struct rdma_link_ops *ops) 16733856ec4bSSteve Wise { 16743856ec4bSSteve Wise down_write(&link_ops_rwsem); 1675afc1990eSDan Carpenter if (WARN_ON_ONCE(link_ops_get(ops->type))) 16763856ec4bSSteve Wise goto out; 16773856ec4bSSteve Wise list_add(&ops->list, &link_ops); 16783856ec4bSSteve Wise out: 16793856ec4bSSteve Wise up_write(&link_ops_rwsem); 16803856ec4bSSteve Wise } 16813856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_register); 16823856ec4bSSteve Wise 16833856ec4bSSteve Wise void rdma_link_unregister(struct rdma_link_ops *ops) 16843856ec4bSSteve Wise { 16853856ec4bSSteve Wise down_write(&link_ops_rwsem); 16863856ec4bSSteve Wise list_del(&ops->list); 16873856ec4bSSteve Wise up_write(&link_ops_rwsem); 16883856ec4bSSteve Wise } 16893856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_unregister); 16903856ec4bSSteve Wise 16913856ec4bSSteve Wise static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 16923856ec4bSSteve Wise struct netlink_ext_ack *extack) 16933856ec4bSSteve Wise { 16943856ec4bSSteve Wise struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 16953856ec4bSSteve Wise char ibdev_name[IB_DEVICE_NAME_MAX]; 16963856ec4bSSteve Wise const struct rdma_link_ops *ops; 16973856ec4bSSteve Wise char ndev_name[IFNAMSIZ]; 16983856ec4bSSteve Wise struct net_device *ndev; 16993856ec4bSSteve Wise char type[IFNAMSIZ]; 17003856ec4bSSteve Wise int err; 17013856ec4bSSteve Wise 17028cb08174SJohannes Berg err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 17033856ec4bSSteve Wise nldev_policy, extack); 17043856ec4bSSteve Wise if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] || 17053856ec4bSSteve Wise !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME]) 17063856ec4bSSteve Wise return -EINVAL; 17073856ec4bSSteve Wise 1708872f6903SFrancis Laniel nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME], 17093856ec4bSSteve Wise sizeof(ibdev_name)); 17107aefa623SJason Gunthorpe if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0) 17113856ec4bSSteve Wise return -EINVAL; 17123856ec4bSSteve Wise 1713872f6903SFrancis Laniel nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type)); 1714872f6903SFrancis Laniel nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME], 17153856ec4bSSteve Wise sizeof(ndev_name)); 17163856ec4bSSteve Wise 17177a54f78dSParav Pandit ndev = dev_get_by_name(sock_net(skb->sk), ndev_name); 17183856ec4bSSteve Wise if (!ndev) 17193856ec4bSSteve Wise return -ENODEV; 17203856ec4bSSteve Wise 17213856ec4bSSteve Wise down_read(&link_ops_rwsem); 17223856ec4bSSteve Wise ops = link_ops_get(type); 17233856ec4bSSteve Wise #ifdef CONFIG_MODULES 17243856ec4bSSteve Wise if (!ops) { 17253856ec4bSSteve Wise up_read(&link_ops_rwsem); 17263856ec4bSSteve Wise request_module("rdma-link-%s", type); 17273856ec4bSSteve Wise down_read(&link_ops_rwsem); 17283856ec4bSSteve Wise ops = link_ops_get(type); 17293856ec4bSSteve Wise } 17303856ec4bSSteve Wise #endif 17313856ec4bSSteve Wise err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL; 17323856ec4bSSteve Wise up_read(&link_ops_rwsem); 17333856ec4bSSteve Wise dev_put(ndev); 17343856ec4bSSteve Wise 17353856ec4bSSteve Wise return err; 17363856ec4bSSteve Wise } 17373856ec4bSSteve Wise 17383856ec4bSSteve Wise static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 17393856ec4bSSteve Wise struct netlink_ext_ack *extack) 17403856ec4bSSteve Wise { 17413856ec4bSSteve Wise struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 17423856ec4bSSteve Wise struct ib_device *device; 17433856ec4bSSteve Wise u32 index; 17443856ec4bSSteve Wise int err; 17453856ec4bSSteve Wise 17468cb08174SJohannes Berg err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 17473856ec4bSSteve Wise nldev_policy, extack); 17483856ec4bSSteve Wise if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 17493856ec4bSSteve Wise return -EINVAL; 17503856ec4bSSteve Wise 17513856ec4bSSteve Wise index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 175237eeab55SParav Pandit device = ib_device_get_by_index(sock_net(skb->sk), index); 17533856ec4bSSteve Wise if (!device) 17543856ec4bSSteve Wise return -EINVAL; 17553856ec4bSSteve Wise 1756e945c653SJason Gunthorpe if (!(device->attrs.kernel_cap_flags & IBK_ALLOW_USER_UNREG)) { 17573856ec4bSSteve Wise ib_device_put(device); 17583856ec4bSSteve Wise return -EINVAL; 17593856ec4bSSteve Wise } 17603856ec4bSSteve Wise 17613856ec4bSSteve Wise ib_unregister_device_and_put(device); 17623856ec4bSSteve Wise return 0; 17633856ec4bSSteve Wise } 17643856ec4bSSteve Wise 17650e2d00ebSJason Gunthorpe static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh, 17660e2d00ebSJason Gunthorpe struct netlink_ext_ack *extack) 17670e2d00ebSJason Gunthorpe { 17680e2d00ebSJason Gunthorpe struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 176934d65cd8SDoug Ledford char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE]; 17700e2d00ebSJason Gunthorpe struct ib_client_nl_info data = {}; 17710e2d00ebSJason Gunthorpe struct ib_device *ibdev = NULL; 17720e2d00ebSJason Gunthorpe struct sk_buff *msg; 17730e2d00ebSJason Gunthorpe u32 index; 17740e2d00ebSJason Gunthorpe int err; 17750e2d00ebSJason Gunthorpe 17760e2d00ebSJason Gunthorpe err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, 17770e2d00ebSJason Gunthorpe extack); 17780e2d00ebSJason Gunthorpe if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE]) 17790e2d00ebSJason Gunthorpe return -EINVAL; 17800e2d00ebSJason Gunthorpe 1781872f6903SFrancis Laniel nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE], 178234d65cd8SDoug Ledford sizeof(client_name)); 17830e2d00ebSJason Gunthorpe 17840e2d00ebSJason Gunthorpe if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) { 17850e2d00ebSJason Gunthorpe index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 17860e2d00ebSJason Gunthorpe ibdev = ib_device_get_by_index(sock_net(skb->sk), index); 17870e2d00ebSJason Gunthorpe if (!ibdev) 17880e2d00ebSJason Gunthorpe return -EINVAL; 17890e2d00ebSJason Gunthorpe 17900e2d00ebSJason Gunthorpe if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 17910e2d00ebSJason Gunthorpe data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 17920e2d00ebSJason Gunthorpe if (!rdma_is_port_valid(ibdev, data.port)) { 17930e2d00ebSJason Gunthorpe err = -EINVAL; 17940e2d00ebSJason Gunthorpe goto out_put; 17950e2d00ebSJason Gunthorpe } 17960e2d00ebSJason Gunthorpe } else { 17970e2d00ebSJason Gunthorpe data.port = -1; 17980e2d00ebSJason Gunthorpe } 17990e2d00ebSJason Gunthorpe } else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 18000e2d00ebSJason Gunthorpe return -EINVAL; 18010e2d00ebSJason Gunthorpe } 18020e2d00ebSJason Gunthorpe 18030e2d00ebSJason Gunthorpe msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 18040e2d00ebSJason Gunthorpe if (!msg) { 18050e2d00ebSJason Gunthorpe err = -ENOMEM; 18060e2d00ebSJason Gunthorpe goto out_put; 18070e2d00ebSJason Gunthorpe } 18080e2d00ebSJason Gunthorpe nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 18090e2d00ebSJason Gunthorpe RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 18100e2d00ebSJason Gunthorpe RDMA_NLDEV_CMD_GET_CHARDEV), 18110e2d00ebSJason Gunthorpe 0, 0); 1812*67e6272dSOr Har-Toov if (!nlh) { 1813*67e6272dSOr Har-Toov err = -EMSGSIZE; 1814*67e6272dSOr Har-Toov goto out_nlmsg; 1815*67e6272dSOr Har-Toov } 18160e2d00ebSJason Gunthorpe 18170e2d00ebSJason Gunthorpe data.nl_msg = msg; 18180e2d00ebSJason Gunthorpe err = ib_get_client_nl_info(ibdev, client_name, &data); 18190e2d00ebSJason Gunthorpe if (err) 18200e2d00ebSJason Gunthorpe goto out_nlmsg; 18210e2d00ebSJason Gunthorpe 18220e2d00ebSJason Gunthorpe err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV, 18230e2d00ebSJason Gunthorpe huge_encode_dev(data.cdev->devt), 18240e2d00ebSJason Gunthorpe RDMA_NLDEV_ATTR_PAD); 18250e2d00ebSJason Gunthorpe if (err) 18260e2d00ebSJason Gunthorpe goto out_data; 18270e2d00ebSJason Gunthorpe err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi, 18280e2d00ebSJason Gunthorpe RDMA_NLDEV_ATTR_PAD); 18290e2d00ebSJason Gunthorpe if (err) 18300e2d00ebSJason Gunthorpe goto out_data; 18310e2d00ebSJason Gunthorpe if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME, 18320e2d00ebSJason Gunthorpe dev_name(data.cdev))) { 18330e2d00ebSJason Gunthorpe err = -EMSGSIZE; 18340e2d00ebSJason Gunthorpe goto out_data; 18350e2d00ebSJason Gunthorpe } 18360e2d00ebSJason Gunthorpe 18370e2d00ebSJason Gunthorpe nlmsg_end(msg, nlh); 18380e2d00ebSJason Gunthorpe put_device(data.cdev); 18390e2d00ebSJason Gunthorpe if (ibdev) 18400e2d00ebSJason Gunthorpe ib_device_put(ibdev); 18411d2fedd8SParav Pandit return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 18420e2d00ebSJason Gunthorpe 18430e2d00ebSJason Gunthorpe out_data: 18440e2d00ebSJason Gunthorpe put_device(data.cdev); 18450e2d00ebSJason Gunthorpe out_nlmsg: 18460e2d00ebSJason Gunthorpe nlmsg_free(msg); 18470e2d00ebSJason Gunthorpe out_put: 18480e2d00ebSJason Gunthorpe if (ibdev) 18490e2d00ebSJason Gunthorpe ib_device_put(ibdev); 18500e2d00ebSJason Gunthorpe return err; 18510e2d00ebSJason Gunthorpe } 18520e2d00ebSJason Gunthorpe 18534d7ba8ceSParav Pandit static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 18544d7ba8ceSParav Pandit struct netlink_ext_ack *extack) 1855cb7e0e13SParav Pandit { 1856cb7e0e13SParav Pandit struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 18574d7ba8ceSParav Pandit struct sk_buff *msg; 1858cb7e0e13SParav Pandit int err; 1859cb7e0e13SParav Pandit 18604d7ba8ceSParav Pandit err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 18614d7ba8ceSParav Pandit nldev_policy, extack); 1862cb7e0e13SParav Pandit if (err) 1863cb7e0e13SParav Pandit return err; 1864cb7e0e13SParav Pandit 18654d7ba8ceSParav Pandit msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 18664d7ba8ceSParav Pandit if (!msg) 18674d7ba8ceSParav Pandit return -ENOMEM; 18684d7ba8ceSParav Pandit 18694d7ba8ceSParav Pandit nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1870cb7e0e13SParav Pandit RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1871cb7e0e13SParav Pandit RDMA_NLDEV_CMD_SYS_GET), 1872cb7e0e13SParav Pandit 0, 0); 1873*67e6272dSOr Har-Toov if (!nlh) { 1874*67e6272dSOr Har-Toov nlmsg_free(msg); 1875*67e6272dSOr Har-Toov return -EMSGSIZE; 1876*67e6272dSOr Har-Toov } 1877cb7e0e13SParav Pandit 18784d7ba8ceSParav Pandit err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, 1879cb7e0e13SParav Pandit (u8)ib_devices_shared_netns); 1880cb7e0e13SParav Pandit if (err) { 18814d7ba8ceSParav Pandit nlmsg_free(msg); 1882cb7e0e13SParav Pandit return err; 1883cb7e0e13SParav Pandit } 18846cc9e215SGal Pressman 18856cc9e215SGal Pressman /* 18866cc9e215SGal Pressman * Copy-on-fork is supported. 18876cc9e215SGal Pressman * See commits: 18886cc9e215SGal Pressman * 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes") 18896cc9e215SGal Pressman * 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm") 18906cc9e215SGal Pressman * for more details. Don't backport this without them. 18916cc9e215SGal Pressman * 18926cc9e215SGal Pressman * Return value ignored on purpose, assume copy-on-fork is not 18936cc9e215SGal Pressman * supported in case of failure. 18946cc9e215SGal Pressman */ 18956cc9e215SGal Pressman nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1); 18966cc9e215SGal Pressman 18974d7ba8ceSParav Pandit nlmsg_end(msg, nlh); 18981d2fedd8SParav Pandit return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1899cb7e0e13SParav Pandit } 1900cb7e0e13SParav Pandit 19012b34c558SParav Pandit static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 19022b34c558SParav Pandit struct netlink_ext_ack *extack) 19032b34c558SParav Pandit { 19042b34c558SParav Pandit struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 19052b34c558SParav Pandit u8 enable; 19062b34c558SParav Pandit int err; 19072b34c558SParav Pandit 19082b34c558SParav Pandit err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 19092b34c558SParav Pandit nldev_policy, extack); 19102b34c558SParav Pandit if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]) 19112b34c558SParav Pandit return -EINVAL; 19122b34c558SParav Pandit 19132b34c558SParav Pandit enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]); 19142b34c558SParav Pandit /* Only 0 and 1 are supported */ 19152b34c558SParav Pandit if (enable > 1) 19162b34c558SParav Pandit return -EINVAL; 19172b34c558SParav Pandit 19182b34c558SParav Pandit err = rdma_compatdev_set(enable); 19192b34c558SParav Pandit return err; 19202b34c558SParav Pandit } 19212b34c558SParav Pandit 1922822cf785SAharon Landau static int nldev_stat_set_mode_doit(struct sk_buff *msg, 1923822cf785SAharon Landau struct netlink_ext_ack *extack, 1924822cf785SAharon Landau struct nlattr *tb[], 1925822cf785SAharon Landau struct ib_device *device, u32 port) 1926822cf785SAharon Landau { 1927822cf785SAharon Landau u32 mode, mask = 0, qpn, cntn = 0; 1928822cf785SAharon Landau int ret; 1929822cf785SAharon Landau 1930822cf785SAharon Landau /* Currently only counter for QP is supported */ 1931d821f7c1SLeon Romanovsky if (!tb[RDMA_NLDEV_ATTR_STAT_RES] || 1932d821f7c1SLeon Romanovsky nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) 1933822cf785SAharon Landau return -EINVAL; 1934822cf785SAharon Landau 1935822cf785SAharon Landau mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]); 1936822cf785SAharon Landau if (mode == RDMA_COUNTER_MODE_AUTO) { 1937822cf785SAharon Landau if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]) 1938822cf785SAharon Landau mask = nla_get_u32( 1939822cf785SAharon Landau tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]); 1940822cf785SAharon Landau return rdma_counter_set_auto_mode(device, port, mask, extack); 1941822cf785SAharon Landau } 1942822cf785SAharon Landau 1943822cf785SAharon Landau if (!tb[RDMA_NLDEV_ATTR_RES_LQPN]) 1944822cf785SAharon Landau return -EINVAL; 1945822cf785SAharon Landau 1946822cf785SAharon Landau qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); 1947822cf785SAharon Landau if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) { 1948822cf785SAharon Landau cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); 1949822cf785SAharon Landau ret = rdma_counter_bind_qpn(device, port, qpn, cntn); 1950822cf785SAharon Landau if (ret) 1951822cf785SAharon Landau return ret; 1952822cf785SAharon Landau } else { 1953822cf785SAharon Landau ret = rdma_counter_bind_qpn_alloc(device, port, qpn, &cntn); 1954822cf785SAharon Landau if (ret) 1955822cf785SAharon Landau return ret; 1956822cf785SAharon Landau } 1957822cf785SAharon Landau 1958822cf785SAharon Landau if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || 1959822cf785SAharon Landau nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { 1960822cf785SAharon Landau ret = -EMSGSIZE; 1961822cf785SAharon Landau goto err_fill; 1962822cf785SAharon Landau } 1963822cf785SAharon Landau 1964822cf785SAharon Landau return 0; 1965822cf785SAharon Landau 1966822cf785SAharon Landau err_fill: 1967822cf785SAharon Landau rdma_counter_unbind_qpn(device, port, qpn, cntn); 1968822cf785SAharon Landau return ret; 1969822cf785SAharon Landau } 1970822cf785SAharon Landau 19713c3c1f14SAharon Landau static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[], 19723c3c1f14SAharon Landau struct ib_device *device, 19733c3c1f14SAharon Landau u32 port) 19743c3c1f14SAharon Landau { 19753c3c1f14SAharon Landau struct rdma_hw_stats *stats; 19763c3c1f14SAharon Landau struct nlattr *entry_attr; 19773c3c1f14SAharon Landau unsigned long *target; 197887e0eacbSDan Carpenter int rem, i, ret = 0; 197987e0eacbSDan Carpenter u32 index; 19803c3c1f14SAharon Landau 19813c3c1f14SAharon Landau stats = ib_get_hw_stats_port(device, port); 19823c3c1f14SAharon Landau if (!stats) 19833c3c1f14SAharon Landau return -EINVAL; 19843c3c1f14SAharon Landau 19853c3c1f14SAharon Landau target = kcalloc(BITS_TO_LONGS(stats->num_counters), 19863c3c1f14SAharon Landau sizeof(*stats->is_disabled), GFP_KERNEL); 19873c3c1f14SAharon Landau if (!target) 19883c3c1f14SAharon Landau return -ENOMEM; 19893c3c1f14SAharon Landau 19903c3c1f14SAharon Landau nla_for_each_nested(entry_attr, tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS], 19913c3c1f14SAharon Landau rem) { 19923c3c1f14SAharon Landau index = nla_get_u32(entry_attr); 19933c3c1f14SAharon Landau if ((index >= stats->num_counters) || 19943c3c1f14SAharon Landau !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) { 19953c3c1f14SAharon Landau ret = -EINVAL; 19963c3c1f14SAharon Landau goto out; 19973c3c1f14SAharon Landau } 19983c3c1f14SAharon Landau 19993c3c1f14SAharon Landau set_bit(index, target); 20003c3c1f14SAharon Landau } 20013c3c1f14SAharon Landau 20023c3c1f14SAharon Landau for (i = 0; i < stats->num_counters; i++) { 20033c3c1f14SAharon Landau if (!(stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL)) 20043c3c1f14SAharon Landau continue; 20053c3c1f14SAharon Landau 20063c3c1f14SAharon Landau ret = rdma_counter_modify(device, port, i, test_bit(i, target)); 20073c3c1f14SAharon Landau if (ret) 20083c3c1f14SAharon Landau goto out; 20093c3c1f14SAharon Landau } 20103c3c1f14SAharon Landau 20113c3c1f14SAharon Landau out: 20123c3c1f14SAharon Landau kfree(target); 20133c3c1f14SAharon Landau return ret; 20143c3c1f14SAharon Landau } 20153c3c1f14SAharon Landau 2016b47ae6f8SMark Zhang static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 2017b47ae6f8SMark Zhang struct netlink_ext_ack *extack) 2018b47ae6f8SMark Zhang { 2019b47ae6f8SMark Zhang struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2020b47ae6f8SMark Zhang struct ib_device *device; 2021b47ae6f8SMark Zhang struct sk_buff *msg; 2022822cf785SAharon Landau u32 index, port; 2023b47ae6f8SMark Zhang int ret; 2024b47ae6f8SMark Zhang 2025822cf785SAharon Landau ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, 2026822cf785SAharon Landau extack); 2027822cf785SAharon Landau if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || 2028822cf785SAharon Landau !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 2029b47ae6f8SMark Zhang return -EINVAL; 2030b47ae6f8SMark Zhang 2031b47ae6f8SMark Zhang index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 2032b47ae6f8SMark Zhang device = ib_device_get_by_index(sock_net(skb->sk), index); 2033b47ae6f8SMark Zhang if (!device) 2034b47ae6f8SMark Zhang return -EINVAL; 2035b47ae6f8SMark Zhang 2036b47ae6f8SMark Zhang port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 2037b47ae6f8SMark Zhang if (!rdma_is_port_valid(device, port)) { 2038b47ae6f8SMark Zhang ret = -EINVAL; 2039822cf785SAharon Landau goto err_put_device; 2040822cf785SAharon Landau } 2041822cf785SAharon Landau 20423c3c1f14SAharon Landau if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] && 20433c3c1f14SAharon Landau !tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) { 2044822cf785SAharon Landau ret = -EINVAL; 2045822cf785SAharon Landau goto err_put_device; 2046b47ae6f8SMark Zhang } 2047b47ae6f8SMark Zhang 2048b47ae6f8SMark Zhang msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2049b47ae6f8SMark Zhang if (!msg) { 2050b47ae6f8SMark Zhang ret = -ENOMEM; 2051822cf785SAharon Landau goto err_put_device; 2052b47ae6f8SMark Zhang } 2053b47ae6f8SMark Zhang nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 2054b47ae6f8SMark Zhang RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 2055b47ae6f8SMark Zhang RDMA_NLDEV_CMD_STAT_SET), 2056b47ae6f8SMark Zhang 0, 0); 2057*67e6272dSOr Har-Toov if (!nlh || fill_nldev_handle(msg, device) || 2058822cf785SAharon Landau nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { 2059b389327dSMark Zhang ret = -EMSGSIZE; 2060822cf785SAharon Landau goto err_free_msg; 2061b389327dSMark Zhang } 2062822cf785SAharon Landau 20633c3c1f14SAharon Landau if (tb[RDMA_NLDEV_ATTR_STAT_MODE]) { 2064822cf785SAharon Landau ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port); 2065822cf785SAharon Landau if (ret) 2066822cf785SAharon Landau goto err_free_msg; 20673c3c1f14SAharon Landau } 20683c3c1f14SAharon Landau 20693c3c1f14SAharon Landau if (tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) { 20703c3c1f14SAharon Landau ret = nldev_stat_set_counter_dynamic_doit(tb, device, port); 20713c3c1f14SAharon Landau if (ret) 20723c3c1f14SAharon Landau goto err_free_msg; 20733c3c1f14SAharon Landau } 2074b47ae6f8SMark Zhang 2075b47ae6f8SMark Zhang nlmsg_end(msg, nlh); 2076b47ae6f8SMark Zhang ib_device_put(device); 20771d2fedd8SParav Pandit return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 2078b47ae6f8SMark Zhang 2079822cf785SAharon Landau err_free_msg: 2080b47ae6f8SMark Zhang nlmsg_free(msg); 2081822cf785SAharon Landau err_put_device: 2082b47ae6f8SMark Zhang ib_device_put(device); 2083b47ae6f8SMark Zhang return ret; 2084b47ae6f8SMark Zhang } 2085b47ae6f8SMark Zhang 2086b389327dSMark Zhang static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 2087b389327dSMark Zhang struct netlink_ext_ack *extack) 2088b389327dSMark Zhang { 2089b389327dSMark Zhang struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2090b389327dSMark Zhang struct ib_device *device; 2091b389327dSMark Zhang struct sk_buff *msg; 2092b389327dSMark Zhang u32 index, port, qpn, cntn; 2093b389327dSMark Zhang int ret; 2094b389327dSMark Zhang 2095b389327dSMark Zhang ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 2096b389327dSMark Zhang nldev_policy, extack); 2097b389327dSMark Zhang if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] || 2098b389327dSMark Zhang !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || 2099b389327dSMark Zhang !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] || 2100b389327dSMark Zhang !tb[RDMA_NLDEV_ATTR_RES_LQPN]) 2101b389327dSMark Zhang return -EINVAL; 2102b389327dSMark Zhang 2103b389327dSMark Zhang if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) 2104b389327dSMark Zhang return -EINVAL; 2105b389327dSMark Zhang 2106b389327dSMark Zhang index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 2107b389327dSMark Zhang device = ib_device_get_by_index(sock_net(skb->sk), index); 2108b389327dSMark Zhang if (!device) 2109b389327dSMark Zhang return -EINVAL; 2110b389327dSMark Zhang 2111b389327dSMark Zhang port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 2112b389327dSMark Zhang if (!rdma_is_port_valid(device, port)) { 2113b389327dSMark Zhang ret = -EINVAL; 2114b389327dSMark Zhang goto err; 2115b389327dSMark Zhang } 2116b389327dSMark Zhang 2117b389327dSMark Zhang msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2118b389327dSMark Zhang if (!msg) { 2119b389327dSMark Zhang ret = -ENOMEM; 2120b389327dSMark Zhang goto err; 2121b389327dSMark Zhang } 2122b389327dSMark Zhang nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 2123b389327dSMark Zhang RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 2124b389327dSMark Zhang RDMA_NLDEV_CMD_STAT_SET), 2125b389327dSMark Zhang 0, 0); 2126*67e6272dSOr Har-Toov if (!nlh) { 2127*67e6272dSOr Har-Toov ret = -EMSGSIZE; 2128*67e6272dSOr Har-Toov goto err_fill; 2129*67e6272dSOr Har-Toov } 2130b389327dSMark Zhang 2131b389327dSMark Zhang cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); 2132b389327dSMark Zhang qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); 2133b389327dSMark Zhang if (fill_nldev_handle(msg, device) || 2134b389327dSMark Zhang nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || 2135b389327dSMark Zhang nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || 2136b389327dSMark Zhang nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { 2137b389327dSMark Zhang ret = -EMSGSIZE; 2138b389327dSMark Zhang goto err_fill; 2139b389327dSMark Zhang } 2140b389327dSMark Zhang 2141594e6c5dSLeon Romanovsky ret = rdma_counter_unbind_qpn(device, port, qpn, cntn); 2142594e6c5dSLeon Romanovsky if (ret) 2143594e6c5dSLeon Romanovsky goto err_fill; 2144594e6c5dSLeon Romanovsky 2145b389327dSMark Zhang nlmsg_end(msg, nlh); 2146b389327dSMark Zhang ib_device_put(device); 21471d2fedd8SParav Pandit return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 2148b389327dSMark Zhang 2149b389327dSMark Zhang err_fill: 2150b389327dSMark Zhang nlmsg_free(msg); 2151b389327dSMark Zhang err: 2152b389327dSMark Zhang ib_device_put(device); 2153b389327dSMark Zhang return ret; 2154b389327dSMark Zhang } 2155b389327dSMark Zhang 21566e7be47aSMark Zhang static int stat_get_doit_default_counter(struct sk_buff *skb, 21576e7be47aSMark Zhang struct nlmsghdr *nlh, 21586e7be47aSMark Zhang struct netlink_ext_ack *extack, 21596e7be47aSMark Zhang struct nlattr *tb[]) 21606e7be47aSMark Zhang { 21616e7be47aSMark Zhang struct rdma_hw_stats *stats; 21626e7be47aSMark Zhang struct nlattr *table_attr; 21636e7be47aSMark Zhang struct ib_device *device; 21646e7be47aSMark Zhang int ret, num_cnts, i; 21656e7be47aSMark Zhang struct sk_buff *msg; 21666e7be47aSMark Zhang u32 index, port; 21676e7be47aSMark Zhang u64 v; 21686e7be47aSMark Zhang 21696e7be47aSMark Zhang if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 21706e7be47aSMark Zhang return -EINVAL; 21716e7be47aSMark Zhang 21726e7be47aSMark Zhang index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 21736e7be47aSMark Zhang device = ib_device_get_by_index(sock_net(skb->sk), index); 21746e7be47aSMark Zhang if (!device) 21756e7be47aSMark Zhang return -EINVAL; 21766e7be47aSMark Zhang 21774b5f4d3fSJason Gunthorpe if (!device->ops.alloc_hw_port_stats || !device->ops.get_hw_stats) { 21786e7be47aSMark Zhang ret = -EINVAL; 21796e7be47aSMark Zhang goto err; 21806e7be47aSMark Zhang } 21816e7be47aSMark Zhang 21826e7be47aSMark Zhang port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 2183d8a58838SJason Gunthorpe stats = ib_get_hw_stats_port(device, port); 2184d8a58838SJason Gunthorpe if (!stats) { 21856e7be47aSMark Zhang ret = -EINVAL; 21866e7be47aSMark Zhang goto err; 21876e7be47aSMark Zhang } 21886e7be47aSMark Zhang 21896e7be47aSMark Zhang msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 21906e7be47aSMark Zhang if (!msg) { 21916e7be47aSMark Zhang ret = -ENOMEM; 21926e7be47aSMark Zhang goto err; 21936e7be47aSMark Zhang } 21946e7be47aSMark Zhang 21956e7be47aSMark Zhang nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 21966e7be47aSMark Zhang RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 21976e7be47aSMark Zhang RDMA_NLDEV_CMD_STAT_GET), 21986e7be47aSMark Zhang 0, 0); 21996e7be47aSMark Zhang 2200*67e6272dSOr Har-Toov if (!nlh || fill_nldev_handle(msg, device) || 22016e7be47aSMark Zhang nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { 22026e7be47aSMark Zhang ret = -EMSGSIZE; 22036e7be47aSMark Zhang goto err_msg; 22046e7be47aSMark Zhang } 22056e7be47aSMark Zhang 22066e7be47aSMark Zhang mutex_lock(&stats->lock); 22076e7be47aSMark Zhang 22086e7be47aSMark Zhang num_cnts = device->ops.get_hw_stats(device, stats, port, 0); 22096e7be47aSMark Zhang if (num_cnts < 0) { 22106e7be47aSMark Zhang ret = -EINVAL; 22116e7be47aSMark Zhang goto err_stats; 22126e7be47aSMark Zhang } 22136e7be47aSMark Zhang 22146e7be47aSMark Zhang table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); 22156e7be47aSMark Zhang if (!table_attr) { 22166e7be47aSMark Zhang ret = -EMSGSIZE; 22176e7be47aSMark Zhang goto err_stats; 22186e7be47aSMark Zhang } 22196e7be47aSMark Zhang for (i = 0; i < num_cnts; i++) { 22200dc89684SAharon Landau if (test_bit(i, stats->is_disabled)) 22210dc89684SAharon Landau continue; 22220dc89684SAharon Landau 22236e7be47aSMark Zhang v = stats->value[i] + 22246e7be47aSMark Zhang rdma_counter_get_hwstat_value(device, port, i); 222513f30b0fSAharon Landau if (rdma_nl_stat_hwcounter_entry(msg, 222613f30b0fSAharon Landau stats->descs[i].name, v)) { 22276e7be47aSMark Zhang ret = -EMSGSIZE; 22286e7be47aSMark Zhang goto err_table; 22296e7be47aSMark Zhang } 22306e7be47aSMark Zhang } 22316e7be47aSMark Zhang nla_nest_end(msg, table_attr); 22326e7be47aSMark Zhang 22336e7be47aSMark Zhang mutex_unlock(&stats->lock); 22346e7be47aSMark Zhang nlmsg_end(msg, nlh); 22356e7be47aSMark Zhang ib_device_put(device); 22361d2fedd8SParav Pandit return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 22376e7be47aSMark Zhang 22386e7be47aSMark Zhang err_table: 22396e7be47aSMark Zhang nla_nest_cancel(msg, table_attr); 22406e7be47aSMark Zhang err_stats: 22416e7be47aSMark Zhang mutex_unlock(&stats->lock); 22426e7be47aSMark Zhang err_msg: 22436e7be47aSMark Zhang nlmsg_free(msg); 22446e7be47aSMark Zhang err: 22456e7be47aSMark Zhang ib_device_put(device); 22466e7be47aSMark Zhang return ret; 22476e7be47aSMark Zhang } 22486e7be47aSMark Zhang 224983c2c1fcSMark Zhang static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, 225083c2c1fcSMark Zhang struct netlink_ext_ack *extack, struct nlattr *tb[]) 225183c2c1fcSMark Zhang 225283c2c1fcSMark Zhang { 225383c2c1fcSMark Zhang static enum rdma_nl_counter_mode mode; 225483c2c1fcSMark Zhang static enum rdma_nl_counter_mask mask; 225583c2c1fcSMark Zhang struct ib_device *device; 225683c2c1fcSMark Zhang struct sk_buff *msg; 225783c2c1fcSMark Zhang u32 index, port; 225883c2c1fcSMark Zhang int ret; 225983c2c1fcSMark Zhang 226083c2c1fcSMark Zhang if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) 226183c2c1fcSMark Zhang return nldev_res_get_counter_doit(skb, nlh, extack); 226283c2c1fcSMark Zhang 226383c2c1fcSMark Zhang if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] || 226483c2c1fcSMark Zhang !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 226583c2c1fcSMark Zhang return -EINVAL; 226683c2c1fcSMark Zhang 226783c2c1fcSMark Zhang index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 226883c2c1fcSMark Zhang device = ib_device_get_by_index(sock_net(skb->sk), index); 226983c2c1fcSMark Zhang if (!device) 227083c2c1fcSMark Zhang return -EINVAL; 227183c2c1fcSMark Zhang 227283c2c1fcSMark Zhang port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 227383c2c1fcSMark Zhang if (!rdma_is_port_valid(device, port)) { 227483c2c1fcSMark Zhang ret = -EINVAL; 227583c2c1fcSMark Zhang goto err; 227683c2c1fcSMark Zhang } 227783c2c1fcSMark Zhang 227883c2c1fcSMark Zhang msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 227983c2c1fcSMark Zhang if (!msg) { 228083c2c1fcSMark Zhang ret = -ENOMEM; 228183c2c1fcSMark Zhang goto err; 228283c2c1fcSMark Zhang } 228383c2c1fcSMark Zhang 228483c2c1fcSMark Zhang nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 228583c2c1fcSMark Zhang RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 228683c2c1fcSMark Zhang RDMA_NLDEV_CMD_STAT_GET), 228783c2c1fcSMark Zhang 0, 0); 2288*67e6272dSOr Har-Toov if (!nlh) { 2289*67e6272dSOr Har-Toov ret = -EMSGSIZE; 2290*67e6272dSOr Har-Toov goto err_msg; 2291*67e6272dSOr Har-Toov } 229283c2c1fcSMark Zhang 229383c2c1fcSMark Zhang ret = rdma_counter_get_mode(device, port, &mode, &mask); 229483c2c1fcSMark Zhang if (ret) 229583c2c1fcSMark Zhang goto err_msg; 229683c2c1fcSMark Zhang 229783c2c1fcSMark Zhang if (fill_nldev_handle(msg, device) || 229883c2c1fcSMark Zhang nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || 2299932727c5SDan Carpenter nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) { 2300932727c5SDan Carpenter ret = -EMSGSIZE; 230183c2c1fcSMark Zhang goto err_msg; 2302932727c5SDan Carpenter } 230383c2c1fcSMark Zhang 230483c2c1fcSMark Zhang if ((mode == RDMA_COUNTER_MODE_AUTO) && 2305932727c5SDan Carpenter nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { 2306932727c5SDan Carpenter ret = -EMSGSIZE; 230783c2c1fcSMark Zhang goto err_msg; 2308932727c5SDan Carpenter } 230983c2c1fcSMark Zhang 231083c2c1fcSMark Zhang nlmsg_end(msg, nlh); 231183c2c1fcSMark Zhang ib_device_put(device); 23121d2fedd8SParav Pandit return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 231383c2c1fcSMark Zhang 231483c2c1fcSMark Zhang err_msg: 231583c2c1fcSMark Zhang nlmsg_free(msg); 231683c2c1fcSMark Zhang err: 231783c2c1fcSMark Zhang ib_device_put(device); 231883c2c1fcSMark Zhang return ret; 231983c2c1fcSMark Zhang } 232083c2c1fcSMark Zhang 2321c4ffee7cSMark Zhang static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 2322c4ffee7cSMark Zhang struct netlink_ext_ack *extack) 2323c4ffee7cSMark Zhang { 2324c4ffee7cSMark Zhang struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2325c4ffee7cSMark Zhang int ret; 2326c4ffee7cSMark Zhang 2327c4ffee7cSMark Zhang ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 2328c4ffee7cSMark Zhang nldev_policy, extack); 23296e7be47aSMark Zhang if (ret) 2330c4ffee7cSMark Zhang return -EINVAL; 2331c4ffee7cSMark Zhang 23326e7be47aSMark Zhang if (!tb[RDMA_NLDEV_ATTR_STAT_RES]) 23336e7be47aSMark Zhang return stat_get_doit_default_counter(skb, nlh, extack, tb); 23346e7be47aSMark Zhang 2335c4ffee7cSMark Zhang switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) { 2336c4ffee7cSMark Zhang case RDMA_NLDEV_ATTR_RES_QP: 233783c2c1fcSMark Zhang ret = stat_get_doit_qp(skb, nlh, extack, tb); 2338c4ffee7cSMark Zhang break; 23394061ff7aSErez Alfasi case RDMA_NLDEV_ATTR_RES_MR: 23404061ff7aSErez Alfasi ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR, 23414061ff7aSErez Alfasi fill_stat_mr_entry); 23424061ff7aSErez Alfasi break; 2343c4ffee7cSMark Zhang default: 2344c4ffee7cSMark Zhang ret = -EINVAL; 2345c4ffee7cSMark Zhang break; 2346c4ffee7cSMark Zhang } 2347c4ffee7cSMark Zhang 2348c4ffee7cSMark Zhang return ret; 2349c4ffee7cSMark Zhang } 2350c4ffee7cSMark Zhang 2351c4ffee7cSMark Zhang static int nldev_stat_get_dumpit(struct sk_buff *skb, 2352c4ffee7cSMark Zhang struct netlink_callback *cb) 2353c4ffee7cSMark Zhang { 2354c4ffee7cSMark Zhang struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2355c4ffee7cSMark Zhang int ret; 2356c4ffee7cSMark Zhang 2357c4ffee7cSMark Zhang ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 2358c4ffee7cSMark Zhang nldev_policy, NULL); 2359c4ffee7cSMark Zhang if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES]) 2360c4ffee7cSMark Zhang return -EINVAL; 2361c4ffee7cSMark Zhang 2362c4ffee7cSMark Zhang switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) { 2363c4ffee7cSMark Zhang case RDMA_NLDEV_ATTR_RES_QP: 2364c4ffee7cSMark Zhang ret = nldev_res_get_counter_dumpit(skb, cb); 2365c4ffee7cSMark Zhang break; 23664061ff7aSErez Alfasi case RDMA_NLDEV_ATTR_RES_MR: 23674061ff7aSErez Alfasi ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR, 23684061ff7aSErez Alfasi fill_stat_mr_entry); 23694061ff7aSErez Alfasi break; 2370c4ffee7cSMark Zhang default: 2371c4ffee7cSMark Zhang ret = -EINVAL; 2372c4ffee7cSMark Zhang break; 2373c4ffee7cSMark Zhang } 2374c4ffee7cSMark Zhang 2375c4ffee7cSMark Zhang return ret; 2376c4ffee7cSMark Zhang } 2377c4ffee7cSMark Zhang 23787301d0a9SAharon Landau static int nldev_stat_get_counter_status_doit(struct sk_buff *skb, 23797301d0a9SAharon Landau struct nlmsghdr *nlh, 23807301d0a9SAharon Landau struct netlink_ext_ack *extack) 23817301d0a9SAharon Landau { 23827301d0a9SAharon Landau struct nlattr *tb[RDMA_NLDEV_ATTR_MAX], *table, *entry; 23837301d0a9SAharon Landau struct rdma_hw_stats *stats; 23847301d0a9SAharon Landau struct ib_device *device; 23857301d0a9SAharon Landau struct sk_buff *msg; 23867301d0a9SAharon Landau u32 devid, port; 23877301d0a9SAharon Landau int ret, i; 23887301d0a9SAharon Landau 23897301d0a9SAharon Landau ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 23907301d0a9SAharon Landau nldev_policy, extack); 23917301d0a9SAharon Landau if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || 23927301d0a9SAharon Landau !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 23937301d0a9SAharon Landau return -EINVAL; 23947301d0a9SAharon Landau 23957301d0a9SAharon Landau devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 23967301d0a9SAharon Landau device = ib_device_get_by_index(sock_net(skb->sk), devid); 23977301d0a9SAharon Landau if (!device) 23987301d0a9SAharon Landau return -EINVAL; 23997301d0a9SAharon Landau 24007301d0a9SAharon Landau port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 24017301d0a9SAharon Landau if (!rdma_is_port_valid(device, port)) { 24027301d0a9SAharon Landau ret = -EINVAL; 24037301d0a9SAharon Landau goto err; 24047301d0a9SAharon Landau } 24057301d0a9SAharon Landau 24067301d0a9SAharon Landau stats = ib_get_hw_stats_port(device, port); 24077301d0a9SAharon Landau if (!stats) { 24087301d0a9SAharon Landau ret = -EINVAL; 24097301d0a9SAharon Landau goto err; 24107301d0a9SAharon Landau } 24117301d0a9SAharon Landau 24127301d0a9SAharon Landau msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 24137301d0a9SAharon Landau if (!msg) { 24147301d0a9SAharon Landau ret = -ENOMEM; 24157301d0a9SAharon Landau goto err; 24167301d0a9SAharon Landau } 24177301d0a9SAharon Landau 24187301d0a9SAharon Landau nlh = nlmsg_put( 24197301d0a9SAharon Landau msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 24207301d0a9SAharon Landau RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET_STATUS), 24217301d0a9SAharon Landau 0, 0); 24227301d0a9SAharon Landau 24237301d0a9SAharon Landau ret = -EMSGSIZE; 2424*67e6272dSOr Har-Toov if (!nlh || fill_nldev_handle(msg, device) || 24257301d0a9SAharon Landau nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) 24267301d0a9SAharon Landau goto err_msg; 24277301d0a9SAharon Landau 24287301d0a9SAharon Landau table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); 24297301d0a9SAharon Landau if (!table) 24307301d0a9SAharon Landau goto err_msg; 24317301d0a9SAharon Landau 24327301d0a9SAharon Landau mutex_lock(&stats->lock); 24337301d0a9SAharon Landau for (i = 0; i < stats->num_counters; i++) { 24347301d0a9SAharon Landau entry = nla_nest_start(msg, 24357301d0a9SAharon Landau RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); 24367301d0a9SAharon Landau if (!entry) 24377301d0a9SAharon Landau goto err_msg_table; 24387301d0a9SAharon Landau 24397301d0a9SAharon Landau if (nla_put_string(msg, 24407301d0a9SAharon Landau RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, 24417301d0a9SAharon Landau stats->descs[i].name) || 24427301d0a9SAharon Landau nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i)) 24437301d0a9SAharon Landau goto err_msg_entry; 24447301d0a9SAharon Landau 24457301d0a9SAharon Landau if ((stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) && 24467301d0a9SAharon Landau (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, 24477301d0a9SAharon Landau !test_bit(i, stats->is_disabled)))) 24487301d0a9SAharon Landau goto err_msg_entry; 24497301d0a9SAharon Landau 24507301d0a9SAharon Landau nla_nest_end(msg, entry); 24517301d0a9SAharon Landau } 24527301d0a9SAharon Landau mutex_unlock(&stats->lock); 24537301d0a9SAharon Landau 24547301d0a9SAharon Landau nla_nest_end(msg, table); 24557301d0a9SAharon Landau nlmsg_end(msg, nlh); 24567301d0a9SAharon Landau ib_device_put(device); 24577301d0a9SAharon Landau return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 24587301d0a9SAharon Landau 24597301d0a9SAharon Landau err_msg_entry: 24607301d0a9SAharon Landau nla_nest_cancel(msg, entry); 24617301d0a9SAharon Landau err_msg_table: 24627301d0a9SAharon Landau mutex_unlock(&stats->lock); 24637301d0a9SAharon Landau nla_nest_cancel(msg, table); 24647301d0a9SAharon Landau err_msg: 24657301d0a9SAharon Landau nlmsg_free(msg); 24667301d0a9SAharon Landau err: 24677301d0a9SAharon Landau ib_device_put(device); 24687301d0a9SAharon Landau return ret; 24697301d0a9SAharon Landau } 24707301d0a9SAharon Landau 2471d0e312feSLeon Romanovsky static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { 2472b4c598a6SLeon Romanovsky [RDMA_NLDEV_CMD_GET] = { 2473e5c9469eSLeon Romanovsky .doit = nldev_get_doit, 2474b4c598a6SLeon Romanovsky .dump = nldev_get_dumpit, 2475b4c598a6SLeon Romanovsky }, 24760e2d00ebSJason Gunthorpe [RDMA_NLDEV_CMD_GET_CHARDEV] = { 24770e2d00ebSJason Gunthorpe .doit = nldev_get_chardev, 24780e2d00ebSJason Gunthorpe }, 247905d940d3SLeon Romanovsky [RDMA_NLDEV_CMD_SET] = { 248005d940d3SLeon Romanovsky .doit = nldev_set_doit, 248105d940d3SLeon Romanovsky .flags = RDMA_NL_ADMIN_PERM, 248205d940d3SLeon Romanovsky }, 24833856ec4bSSteve Wise [RDMA_NLDEV_CMD_NEWLINK] = { 24843856ec4bSSteve Wise .doit = nldev_newlink, 24853856ec4bSSteve Wise .flags = RDMA_NL_ADMIN_PERM, 24863856ec4bSSteve Wise }, 24873856ec4bSSteve Wise [RDMA_NLDEV_CMD_DELLINK] = { 24883856ec4bSSteve Wise .doit = nldev_dellink, 24893856ec4bSSteve Wise .flags = RDMA_NL_ADMIN_PERM, 24903856ec4bSSteve Wise }, 24917d02f605SLeon Romanovsky [RDMA_NLDEV_CMD_PORT_GET] = { 2492c3f66f7bSLeon Romanovsky .doit = nldev_port_get_doit, 24937d02f605SLeon Romanovsky .dump = nldev_port_get_dumpit, 24947d02f605SLeon Romanovsky }, 2495bf3c5a93SLeon Romanovsky [RDMA_NLDEV_CMD_RES_GET] = { 2496bf3c5a93SLeon Romanovsky .doit = nldev_res_get_doit, 2497bf3c5a93SLeon Romanovsky .dump = nldev_res_get_dumpit, 2498bf3c5a93SLeon Romanovsky }, 2499b5fa635aSLeon Romanovsky [RDMA_NLDEV_CMD_RES_QP_GET] = { 2500c5dfe0eaSLeon Romanovsky .doit = nldev_res_get_qp_doit, 2501b5fa635aSLeon Romanovsky .dump = nldev_res_get_qp_dumpit, 2502b5fa635aSLeon Romanovsky }, 250300313983SSteve Wise [RDMA_NLDEV_CMD_RES_CM_ID_GET] = { 2504c5dfe0eaSLeon Romanovsky .doit = nldev_res_get_cm_id_doit, 250500313983SSteve Wise .dump = nldev_res_get_cm_id_dumpit, 250600313983SSteve Wise }, 2507a34fc089SSteve Wise [RDMA_NLDEV_CMD_RES_CQ_GET] = { 2508c5dfe0eaSLeon Romanovsky .doit = nldev_res_get_cq_doit, 2509a34fc089SSteve Wise .dump = nldev_res_get_cq_dumpit, 2510a34fc089SSteve Wise }, 2511fccec5b8SSteve Wise [RDMA_NLDEV_CMD_RES_MR_GET] = { 2512c5dfe0eaSLeon Romanovsky .doit = nldev_res_get_mr_doit, 2513fccec5b8SSteve Wise .dump = nldev_res_get_mr_dumpit, 2514fccec5b8SSteve Wise }, 251529cf1351SSteve Wise [RDMA_NLDEV_CMD_RES_PD_GET] = { 2516c5dfe0eaSLeon Romanovsky .doit = nldev_res_get_pd_doit, 251729cf1351SSteve Wise .dump = nldev_res_get_pd_dumpit, 251829cf1351SSteve Wise }, 251912ce208fSNeta Ostrovsky [RDMA_NLDEV_CMD_RES_CTX_GET] = { 252012ce208fSNeta Ostrovsky .doit = nldev_res_get_ctx_doit, 252112ce208fSNeta Ostrovsky .dump = nldev_res_get_ctx_dumpit, 252212ce208fSNeta Ostrovsky }, 2523391c6bd5SNeta Ostrovsky [RDMA_NLDEV_CMD_RES_SRQ_GET] = { 2524391c6bd5SNeta Ostrovsky .doit = nldev_res_get_srq_doit, 2525391c6bd5SNeta Ostrovsky .dump = nldev_res_get_srq_dumpit, 2526391c6bd5SNeta Ostrovsky }, 2527cb7e0e13SParav Pandit [RDMA_NLDEV_CMD_SYS_GET] = { 25284d7ba8ceSParav Pandit .doit = nldev_sys_get_doit, 2529cb7e0e13SParav Pandit }, 25302b34c558SParav Pandit [RDMA_NLDEV_CMD_SYS_SET] = { 25312b34c558SParav Pandit .doit = nldev_set_sys_set_doit, 2532b47ae6f8SMark Zhang }, 2533b47ae6f8SMark Zhang [RDMA_NLDEV_CMD_STAT_SET] = { 2534b47ae6f8SMark Zhang .doit = nldev_stat_set_doit, 25352b34c558SParav Pandit .flags = RDMA_NL_ADMIN_PERM, 25362b34c558SParav Pandit }, 2537c4ffee7cSMark Zhang [RDMA_NLDEV_CMD_STAT_GET] = { 2538c4ffee7cSMark Zhang .doit = nldev_stat_get_doit, 2539c4ffee7cSMark Zhang .dump = nldev_stat_get_dumpit, 2540c4ffee7cSMark Zhang }, 2541b389327dSMark Zhang [RDMA_NLDEV_CMD_STAT_DEL] = { 2542b389327dSMark Zhang .doit = nldev_stat_del_doit, 2543b389327dSMark Zhang .flags = RDMA_NL_ADMIN_PERM, 2544b389327dSMark Zhang }, 254565959522SMaor Gottlieb [RDMA_NLDEV_CMD_RES_QP_GET_RAW] = { 254665959522SMaor Gottlieb .doit = nldev_res_get_qp_raw_doit, 254765959522SMaor Gottlieb .dump = nldev_res_get_qp_raw_dumpit, 254865959522SMaor Gottlieb .flags = RDMA_NL_ADMIN_PERM, 254965959522SMaor Gottlieb }, 255065959522SMaor Gottlieb [RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = { 255165959522SMaor Gottlieb .doit = nldev_res_get_cq_raw_doit, 255265959522SMaor Gottlieb .dump = nldev_res_get_cq_raw_dumpit, 255365959522SMaor Gottlieb .flags = RDMA_NL_ADMIN_PERM, 255465959522SMaor Gottlieb }, 255565959522SMaor Gottlieb [RDMA_NLDEV_CMD_RES_MR_GET_RAW] = { 255665959522SMaor Gottlieb .doit = nldev_res_get_mr_raw_doit, 255765959522SMaor Gottlieb .dump = nldev_res_get_mr_raw_dumpit, 255865959522SMaor Gottlieb .flags = RDMA_NL_ADMIN_PERM, 255965959522SMaor Gottlieb }, 25607301d0a9SAharon Landau [RDMA_NLDEV_CMD_STAT_GET_STATUS] = { 25617301d0a9SAharon Landau .doit = nldev_stat_get_counter_status_doit, 25627301d0a9SAharon Landau }, 2563b4c598a6SLeon Romanovsky }; 2564b4c598a6SLeon Romanovsky 25656c80b41aSLeon Romanovsky void __init nldev_init(void) 25666c80b41aSLeon Romanovsky { 2567b4c598a6SLeon Romanovsky rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); 25686c80b41aSLeon Romanovsky } 25696c80b41aSLeon Romanovsky 25706c80b41aSLeon Romanovsky void __exit nldev_exit(void) 25716c80b41aSLeon Romanovsky { 25726c80b41aSLeon Romanovsky rdma_nl_unregister(RDMA_NL_NLDEV); 25736c80b41aSLeon Romanovsky } 2574e3bf14bdSJason Gunthorpe 2575e3bf14bdSJason Gunthorpe MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5); 2576