xref: /openbmc/linux/drivers/infiniband/core/nldev.c (revision b389327d)
16c80b41aSLeon Romanovsky /*
26c80b41aSLeon Romanovsky  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
36c80b41aSLeon Romanovsky  *
46c80b41aSLeon Romanovsky  * Redistribution and use in source and binary forms, with or without
56c80b41aSLeon Romanovsky  * modification, are permitted provided that the following conditions are met:
66c80b41aSLeon Romanovsky  *
76c80b41aSLeon Romanovsky  * 1. Redistributions of source code must retain the above copyright
86c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer.
96c80b41aSLeon Romanovsky  * 2. Redistributions in binary form must reproduce the above copyright
106c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer in the
116c80b41aSLeon Romanovsky  *    documentation and/or other materials provided with the distribution.
126c80b41aSLeon Romanovsky  * 3. Neither the names of the copyright holders nor the names of its
136c80b41aSLeon Romanovsky  *    contributors may be used to endorse or promote products derived from
146c80b41aSLeon Romanovsky  *    this software without specific prior written permission.
156c80b41aSLeon Romanovsky  *
166c80b41aSLeon Romanovsky  * Alternatively, this software may be distributed under the terms of the
176c80b41aSLeon Romanovsky  * GNU General Public License ("GPL") version 2 as published by the Free
186c80b41aSLeon Romanovsky  * Software Foundation.
196c80b41aSLeon Romanovsky  *
206c80b41aSLeon Romanovsky  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
216c80b41aSLeon Romanovsky  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
226c80b41aSLeon Romanovsky  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
236c80b41aSLeon Romanovsky  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
246c80b41aSLeon Romanovsky  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
256c80b41aSLeon Romanovsky  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
266c80b41aSLeon Romanovsky  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
276c80b41aSLeon Romanovsky  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
286c80b41aSLeon Romanovsky  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
296c80b41aSLeon Romanovsky  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
306c80b41aSLeon Romanovsky  * POSSIBILITY OF SUCH DAMAGE.
316c80b41aSLeon Romanovsky  */
326c80b41aSLeon Romanovsky 
33e3bf14bdSJason Gunthorpe #include <linux/module.h>
34bf3c5a93SLeon Romanovsky #include <linux/pid.h>
35bf3c5a93SLeon Romanovsky #include <linux/pid_namespace.h>
363856ec4bSSteve Wise #include <linux/mutex.h>
37b4c598a6SLeon Romanovsky #include <net/netlink.h>
3800313983SSteve Wise #include <rdma/rdma_cm.h>
396c80b41aSLeon Romanovsky #include <rdma/rdma_netlink.h>
406c80b41aSLeon Romanovsky 
416c80b41aSLeon Romanovsky #include "core_priv.h"
4200313983SSteve Wise #include "cma_priv.h"
4341eda65cSLeon Romanovsky #include "restrack.h"
446c80b41aSLeon Romanovsky 
45696de2e9SDoug Ledford /*
46696de2e9SDoug Ledford  * Sort array elements by the netlink attribute name
47696de2e9SDoug Ledford  */
48b4c598a6SLeon Romanovsky static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
49696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV]		= { .type = NLA_U64 },
50696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV_ABI]		= { .type = NLA_U64 },
51696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV_NAME]		= { .type = NLA_NUL_STRING,
5234d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
53696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV_TYPE]		= { .type = NLA_NUL_STRING,
5434d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE },
55b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_INDEX]		= { .type = NLA_U32 },
56b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_NAME]		= { .type = NLA_NUL_STRING,
5734d65cd8SDoug Ledford 					.len = IB_DEVICE_NAME_MAX },
58696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DEV_NODE_TYPE]		= { .type = NLA_U8 },
59696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DEV_PROTOCOL]		= { .type = NLA_NUL_STRING,
6034d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
61696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER]		= { .type = NLA_NESTED },
62696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_ENTRY]		= { .type = NLA_NESTED },
63696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE]	= { .type = NLA_U8 },
64696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_STRING]		= { .type = NLA_NUL_STRING,
6534d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
66696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_S32]		= { .type = NLA_S32 },
67696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_S64]		= { .type = NLA_S64 },
68696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_U32]		= { .type = NLA_U32 },
69696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_U64]		= { .type = NLA_U64 },
708621a7e3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_FW_VERSION]		= { .type = NLA_NUL_STRING,
7134d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
7280a06dd3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_LID]			= { .type = NLA_U32 },
73696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_LINK_TYPE]		= { .type = NLA_NUL_STRING,
7434d65cd8SDoug Ledford 					.len = IFNAMSIZ },
7534840feaSLeon Romanovsky 	[RDMA_NLDEV_ATTR_LMC]			= { .type = NLA_U8 },
765b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_INDEX]		= { .type = NLA_U32 },
775b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_NAME]		= { .type = NLA_NUL_STRING,
785b2cc79dSLeon Romanovsky 					.len = IFNAMSIZ },
79696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_NODE_GUID]		= { .type = NLA_U64 },
80696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_PORT_INDEX]		= { .type = NLA_U32 },
81696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_PORT_PHYS_STATE]	= { .type = NLA_U8 },
82696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_PORT_STATE]		= { .type = NLA_U8 },
83696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CM_ID]		= { .type = NLA_NESTED },
84517b773eSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CM_IDN]		= { .type = NLA_U32 },
85696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY]	= { .type = NLA_NESTED },
86696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQ]		= { .type = NLA_NESTED },
87696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQE]		= { .type = NLA_U32 },
88696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQN]		= { .type = NLA_U32 },
89696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQ_ENTRY]		= { .type = NLA_NESTED },
90c3d02788SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CTXN]		= { .type = NLA_U32 },
91696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_DST_ADDR]		= {
92696de2e9SDoug Ledford 			.len = sizeof(struct __kernel_sockaddr_storage) },
93696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_IOVA]		= { .type = NLA_U64 },
94696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_KERN_NAME]		= { .type = NLA_NUL_STRING,
9534d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
96696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_LKEY]		= { .type = NLA_U32 },
97696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY]	= { .type = NLA_U32 },
98696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_LQPN]		= { .type = NLA_U32 },
99696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MR]		= { .type = NLA_NESTED },
100696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MRLEN]		= { .type = NLA_U64 },
101696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MRN]		= { .type = NLA_U32 },
102696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MR_ENTRY]		= { .type = NLA_NESTED },
103696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE]	= { .type = NLA_U8 },
104696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PD]		= { .type = NLA_NESTED },
105696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PDN]		= { .type = NLA_U32 },
106696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PD_ENTRY]		= { .type = NLA_NESTED },
107696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PID]		= { .type = NLA_U32 },
108696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_POLL_CTX]		= { .type = NLA_U8 },
109696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PS]		= { .type = NLA_U32 },
110696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_QP]		= { .type = NLA_NESTED },
111696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_QP_ENTRY]		= { .type = NLA_NESTED },
112696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_RKEY]		= { .type = NLA_U32 },
113696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_RQPN]		= { .type = NLA_U32 },
114696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_RQ_PSN]		= { .type = NLA_U32 },
115696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SQ_PSN]		= { .type = NLA_U32 },
116696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SRC_ADDR]		= {
117696de2e9SDoug Ledford 			.len = sizeof(struct __kernel_sockaddr_storage) },
118696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_STATE]		= { .type = NLA_U8 },
119696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY]		= { .type = NLA_NESTED },
120696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY]	= { .type = NLA_NESTED },
121696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 },
122696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
12334d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
124696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_TYPE]		= { .type = NLA_U8 },
125696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
126696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_USECNT]		= { .type = NLA_U64 },
127696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_SM_LID]		= { .type = NLA_U32 },
128696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_SUBNET_PREFIX]		= { .type = NLA_U64 },
129b47ae6f8SMark Zhang 	[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]	= { .type = NLA_U32 },
130b47ae6f8SMark Zhang 	[RDMA_NLDEV_ATTR_STAT_MODE]		= { .type = NLA_U32 },
131b47ae6f8SMark Zhang 	[RDMA_NLDEV_ATTR_STAT_RES]		= { .type = NLA_U32 },
132c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_COUNTER]		= { .type = NLA_NESTED },
133c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY]	= { .type = NLA_NESTED },
134c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]       = { .type = NLA_U32 },
135c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]       = { .type = NLA_NESTED },
136c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY]  = { .type = NLA_NESTED },
137c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING },
138c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 },
139696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_SYS_IMAGE_GUID]	= { .type = NLA_U64 },
1408f71bb00SJason Gunthorpe 	[RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID]	= { .type = NLA_U32 },
141696de2e9SDoug Ledford 	[RDMA_NLDEV_NET_NS_FD]			= { .type = NLA_U32 },
142696de2e9SDoug Ledford 	[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]	= { .type = NLA_U8 },
143b4c598a6SLeon Romanovsky };
144b4c598a6SLeon Romanovsky 
14573937e8aSSteve Wise static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
14673937e8aSSteve Wise 				      enum rdma_nldev_print_type print_type)
14773937e8aSSteve Wise {
14873937e8aSSteve Wise 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
14973937e8aSSteve Wise 		return -EMSGSIZE;
15073937e8aSSteve Wise 	if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
15173937e8aSSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
15273937e8aSSteve Wise 		return -EMSGSIZE;
15373937e8aSSteve Wise 
15473937e8aSSteve Wise 	return 0;
15573937e8aSSteve Wise }
15673937e8aSSteve Wise 
15773937e8aSSteve Wise static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
15873937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
15973937e8aSSteve Wise 				   u32 value)
16073937e8aSSteve Wise {
16173937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
16273937e8aSSteve Wise 		return -EMSGSIZE;
16373937e8aSSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
16473937e8aSSteve Wise 		return -EMSGSIZE;
16573937e8aSSteve Wise 
16673937e8aSSteve Wise 	return 0;
16773937e8aSSteve Wise }
16873937e8aSSteve Wise 
16973937e8aSSteve Wise static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
17073937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
17173937e8aSSteve Wise 				   u64 value)
17273937e8aSSteve Wise {
17373937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
17473937e8aSSteve Wise 		return -EMSGSIZE;
17573937e8aSSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
17673937e8aSSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
17773937e8aSSteve Wise 		return -EMSGSIZE;
17873937e8aSSteve Wise 
17973937e8aSSteve Wise 	return 0;
18073937e8aSSteve Wise }
18173937e8aSSteve Wise 
18273937e8aSSteve Wise int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
18373937e8aSSteve Wise {
18473937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
18573937e8aSSteve Wise 				       value);
18673937e8aSSteve Wise }
18773937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32);
18873937e8aSSteve Wise 
18973937e8aSSteve Wise int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
19073937e8aSSteve Wise 			       u32 value)
19173937e8aSSteve Wise {
19273937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
19373937e8aSSteve Wise 				       value);
19473937e8aSSteve Wise }
19573937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
19673937e8aSSteve Wise 
19773937e8aSSteve Wise int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
19873937e8aSSteve Wise {
19973937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
20073937e8aSSteve Wise 				       value);
20173937e8aSSteve Wise }
20273937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64);
20373937e8aSSteve Wise 
20473937e8aSSteve Wise int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
20573937e8aSSteve Wise {
20673937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
20773937e8aSSteve Wise 				       value);
20873937e8aSSteve Wise }
20973937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
21073937e8aSSteve Wise 
211c2409810SLeon Romanovsky static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
212b4c598a6SLeon Romanovsky {
213b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
214b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
215896de009SJason Gunthorpe 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
216896de009SJason Gunthorpe 			   dev_name(&device->dev)))
217b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
218c2409810SLeon Romanovsky 
219c2409810SLeon Romanovsky 	return 0;
220c2409810SLeon Romanovsky }
221c2409810SLeon Romanovsky 
222c2409810SLeon Romanovsky static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
223c2409810SLeon Romanovsky {
224c2409810SLeon Romanovsky 	char fw[IB_FW_VERSION_NAME_MAX];
2259e886b39SLeon Romanovsky 	int ret = 0;
2269e886b39SLeon Romanovsky 	u8 port;
227c2409810SLeon Romanovsky 
228c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
229c2409810SLeon Romanovsky 		return -EMSGSIZE;
230c2409810SLeon Romanovsky 
231b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
232b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
233ac505253SLeon Romanovsky 
234ac505253SLeon Romanovsky 	BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
235ac505253SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
23625a0ad85SSteve Wise 			      device->attrs.device_cap_flags,
23725a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
238ac505253SLeon Romanovsky 		return -EMSGSIZE;
239ac505253SLeon Romanovsky 
2408621a7e3SLeon Romanovsky 	ib_get_device_fw_str(device, fw);
2415b2cc79dSLeon Romanovsky 	/* Device without FW has strlen(fw) = 0 */
2428621a7e3SLeon Romanovsky 	if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
2438621a7e3SLeon Romanovsky 		return -EMSGSIZE;
2448621a7e3SLeon Romanovsky 
2451aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
24625a0ad85SSteve Wise 			      be64_to_cpu(device->node_guid),
24725a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2481aaff896SLeon Romanovsky 		return -EMSGSIZE;
2491aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
25025a0ad85SSteve Wise 			      be64_to_cpu(device->attrs.sys_image_guid),
25125a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2521aaff896SLeon Romanovsky 		return -EMSGSIZE;
2531bb77b8cSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
2541bb77b8cSLeon Romanovsky 		return -EMSGSIZE;
2559e886b39SLeon Romanovsky 
2569e886b39SLeon Romanovsky 	/*
2579e886b39SLeon Romanovsky 	 * Link type is determined on first port and mlx4 device
2589e886b39SLeon Romanovsky 	 * which can potentially have two different link type for the same
2599e886b39SLeon Romanovsky 	 * IB device is considered as better to be avoided in the future,
2609e886b39SLeon Romanovsky 	 */
2619e886b39SLeon Romanovsky 	port = rdma_start_port(device);
2629e886b39SLeon Romanovsky 	if (rdma_cap_opa_mad(device, port))
2639e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
2649e886b39SLeon Romanovsky 	else if (rdma_protocol_ib(device, port))
2659e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
2669e886b39SLeon Romanovsky 	else if (rdma_protocol_iwarp(device, port))
2679e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
2689e886b39SLeon Romanovsky 	else if (rdma_protocol_roce(device, port))
2699e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
2709e886b39SLeon Romanovsky 	else if (rdma_protocol_usnic(device, port))
2719e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
2729e886b39SLeon Romanovsky 				     "usnic");
2739e886b39SLeon Romanovsky 	return ret;
274b4c598a6SLeon Romanovsky }
275b4c598a6SLeon Romanovsky 
2767d02f605SLeon Romanovsky static int fill_port_info(struct sk_buff *msg,
2775b2cc79dSLeon Romanovsky 			  struct ib_device *device, u32 port,
2785b2cc79dSLeon Romanovsky 			  const struct net *net)
2797d02f605SLeon Romanovsky {
2805b2cc79dSLeon Romanovsky 	struct net_device *netdev = NULL;
281ac505253SLeon Romanovsky 	struct ib_port_attr attr;
282ac505253SLeon Romanovsky 	int ret;
2834fa2813dSMichael Guralnik 	u64 cap_flags = 0;
284ac505253SLeon Romanovsky 
285c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
2867d02f605SLeon Romanovsky 		return -EMSGSIZE;
287c2409810SLeon Romanovsky 
2887d02f605SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
2897d02f605SLeon Romanovsky 		return -EMSGSIZE;
290ac505253SLeon Romanovsky 
291ac505253SLeon Romanovsky 	ret = ib_query_port(device, port, &attr);
292ac505253SLeon Romanovsky 	if (ret)
293ac505253SLeon Romanovsky 		return ret;
294ac505253SLeon Romanovsky 
295dd8028f1SLeon Romanovsky 	if (rdma_protocol_ib(device, port)) {
2964fa2813dSMichael Guralnik 		BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
2974fa2813dSMichael Guralnik 				sizeof(attr.port_cap_flags2)) > sizeof(u64));
2984fa2813dSMichael Guralnik 		cap_flags = attr.port_cap_flags |
2994fa2813dSMichael Guralnik 			((u64)attr.port_cap_flags2 << 32);
300ac505253SLeon Romanovsky 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
3014fa2813dSMichael Guralnik 				      cap_flags, RDMA_NLDEV_ATTR_PAD))
302ac505253SLeon Romanovsky 			return -EMSGSIZE;
303dd8028f1SLeon Romanovsky 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
30425a0ad85SSteve Wise 				      attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
30512026fbbSLeon Romanovsky 			return -EMSGSIZE;
30680a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
30780a06dd3SLeon Romanovsky 			return -EMSGSIZE;
30880a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
30980a06dd3SLeon Romanovsky 			return -EMSGSIZE;
31034840feaSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
31134840feaSLeon Romanovsky 			return -EMSGSIZE;
31280a06dd3SLeon Romanovsky 	}
3135654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
3145654e49dSLeon Romanovsky 		return -EMSGSIZE;
3155654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
3165654e49dSLeon Romanovsky 		return -EMSGSIZE;
3175b2cc79dSLeon Romanovsky 
318c2261dd7SJason Gunthorpe 	netdev = ib_device_get_netdev(device, port);
3195b2cc79dSLeon Romanovsky 	if (netdev && net_eq(dev_net(netdev), net)) {
3205b2cc79dSLeon Romanovsky 		ret = nla_put_u32(msg,
3215b2cc79dSLeon Romanovsky 				  RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
3225b2cc79dSLeon Romanovsky 		if (ret)
3235b2cc79dSLeon Romanovsky 			goto out;
3245b2cc79dSLeon Romanovsky 		ret = nla_put_string(msg,
3255b2cc79dSLeon Romanovsky 				     RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
3265b2cc79dSLeon Romanovsky 	}
3275b2cc79dSLeon Romanovsky 
3285b2cc79dSLeon Romanovsky out:
3295b2cc79dSLeon Romanovsky 	if (netdev)
3305b2cc79dSLeon Romanovsky 		dev_put(netdev);
3315b2cc79dSLeon Romanovsky 	return ret;
3327d02f605SLeon Romanovsky }
3337d02f605SLeon Romanovsky 
334bf3c5a93SLeon Romanovsky static int fill_res_info_entry(struct sk_buff *msg,
335bf3c5a93SLeon Romanovsky 			       const char *name, u64 curr)
336bf3c5a93SLeon Romanovsky {
337bf3c5a93SLeon Romanovsky 	struct nlattr *entry_attr;
338bf3c5a93SLeon Romanovsky 
339ae0be8deSMichal Kubecek 	entry_attr = nla_nest_start_noflag(msg,
340ae0be8deSMichal Kubecek 					   RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
341bf3c5a93SLeon Romanovsky 	if (!entry_attr)
342bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
343bf3c5a93SLeon Romanovsky 
344bf3c5a93SLeon Romanovsky 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
345bf3c5a93SLeon Romanovsky 		goto err;
34625a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
34725a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
348bf3c5a93SLeon Romanovsky 		goto err;
349bf3c5a93SLeon Romanovsky 
350bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, entry_attr);
351bf3c5a93SLeon Romanovsky 	return 0;
352bf3c5a93SLeon Romanovsky 
353bf3c5a93SLeon Romanovsky err:
354bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, entry_attr);
355bf3c5a93SLeon Romanovsky 	return -EMSGSIZE;
356bf3c5a93SLeon Romanovsky }
357bf3c5a93SLeon Romanovsky 
358bf3c5a93SLeon Romanovsky static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
359bf3c5a93SLeon Romanovsky {
360bf3c5a93SLeon Romanovsky 	static const char * const names[RDMA_RESTRACK_MAX] = {
361bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_PD] = "pd",
362bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_CQ] = "cq",
363bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_QP] = "qp",
36400313983SSteve Wise 		[RDMA_RESTRACK_CM_ID] = "cm_id",
365fccec5b8SSteve Wise 		[RDMA_RESTRACK_MR] = "mr",
366ffd321e4SLeon Romanovsky 		[RDMA_RESTRACK_CTX] = "ctx",
367bf3c5a93SLeon Romanovsky 	};
368bf3c5a93SLeon Romanovsky 
369bf3c5a93SLeon Romanovsky 	struct nlattr *table_attr;
370bf3c5a93SLeon Romanovsky 	int ret, i, curr;
371bf3c5a93SLeon Romanovsky 
372bf3c5a93SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
373bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
374bf3c5a93SLeon Romanovsky 
375ae0be8deSMichal Kubecek 	table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
376bf3c5a93SLeon Romanovsky 	if (!table_attr)
377bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
378bf3c5a93SLeon Romanovsky 
379bf3c5a93SLeon Romanovsky 	for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
380bf3c5a93SLeon Romanovsky 		if (!names[i])
381bf3c5a93SLeon Romanovsky 			continue;
3820ad699c0SLeon Romanovsky 		curr = rdma_restrack_count(device, i,
3830ad699c0SLeon Romanovsky 					   task_active_pid_ns(current));
384bf3c5a93SLeon Romanovsky 		ret = fill_res_info_entry(msg, names[i], curr);
385bf3c5a93SLeon Romanovsky 		if (ret)
386bf3c5a93SLeon Romanovsky 			goto err;
387bf3c5a93SLeon Romanovsky 	}
388bf3c5a93SLeon Romanovsky 
389bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, table_attr);
390bf3c5a93SLeon Romanovsky 	return 0;
391bf3c5a93SLeon Romanovsky 
392bf3c5a93SLeon Romanovsky err:
393bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, table_attr);
394bf3c5a93SLeon Romanovsky 	return ret;
395bf3c5a93SLeon Romanovsky }
396bf3c5a93SLeon Romanovsky 
39700313983SSteve Wise static int fill_res_name_pid(struct sk_buff *msg,
39800313983SSteve Wise 			     struct rdma_restrack_entry *res)
39900313983SSteve Wise {
40000313983SSteve Wise 	/*
40100313983SSteve Wise 	 * For user resources, user is should read /proc/PID/comm to get the
40200313983SSteve Wise 	 * name of the task file.
40300313983SSteve Wise 	 */
40400313983SSteve Wise 	if (rdma_is_kernel_res(res)) {
40500313983SSteve Wise 		if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
40600313983SSteve Wise 		    res->kern_name))
40700313983SSteve Wise 			return -EMSGSIZE;
40800313983SSteve Wise 	} else {
40900313983SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
41000313983SSteve Wise 		    task_pid_vnr(res->task)))
41100313983SSteve Wise 			return -EMSGSIZE;
41200313983SSteve Wise 	}
41300313983SSteve Wise 	return 0;
41400313983SSteve Wise }
41500313983SSteve Wise 
41602da3750SLeon Romanovsky static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
41702da3750SLeon Romanovsky 			   struct rdma_restrack_entry *res)
41802da3750SLeon Romanovsky {
41902da3750SLeon Romanovsky 	if (!dev->ops.fill_res_entry)
42002da3750SLeon Romanovsky 		return false;
42102da3750SLeon Romanovsky 	return dev->ops.fill_res_entry(msg, res);
42202da3750SLeon Romanovsky }
42302da3750SLeon Romanovsky 
424659067b0SLeon Romanovsky static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
425d12ff624SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
426b5fa635aSLeon Romanovsky {
427d12ff624SSteve Wise 	struct ib_qp *qp = container_of(res, struct ib_qp, res);
42802da3750SLeon Romanovsky 	struct ib_device *dev = qp->device;
429b5fa635aSLeon Romanovsky 	struct ib_qp_init_attr qp_init_attr;
430b5fa635aSLeon Romanovsky 	struct ib_qp_attr qp_attr;
431b5fa635aSLeon Romanovsky 	int ret;
432b5fa635aSLeon Romanovsky 
433b5fa635aSLeon Romanovsky 	ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
434b5fa635aSLeon Romanovsky 	if (ret)
435b5fa635aSLeon Romanovsky 		return ret;
436b5fa635aSLeon Romanovsky 
437b5fa635aSLeon Romanovsky 	if (port && port != qp_attr.port_num)
438c5dfe0eaSLeon Romanovsky 		return -EAGAIN;
439b5fa635aSLeon Romanovsky 
440b5fa635aSLeon Romanovsky 	/* In create_qp() port is not set yet */
441b5fa635aSLeon Romanovsky 	if (qp_attr.port_num &&
442b5fa635aSLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
443b5fa635aSLeon Romanovsky 		goto err;
444b5fa635aSLeon Romanovsky 
445b5fa635aSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
446b5fa635aSLeon Romanovsky 		goto err;
447b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
448b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
449b5fa635aSLeon Romanovsky 				qp_attr.dest_qp_num))
450b5fa635aSLeon Romanovsky 			goto err;
451b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
452b5fa635aSLeon Romanovsky 				qp_attr.rq_psn))
453b5fa635aSLeon Romanovsky 			goto err;
454b5fa635aSLeon Romanovsky 	}
455b5fa635aSLeon Romanovsky 
456b5fa635aSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
457b5fa635aSLeon Romanovsky 		goto err;
458b5fa635aSLeon Romanovsky 
459b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
460b5fa635aSLeon Romanovsky 	    qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
461b5fa635aSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
462b5fa635aSLeon Romanovsky 			       qp_attr.path_mig_state))
463b5fa635aSLeon Romanovsky 			goto err;
464b5fa635aSLeon Romanovsky 	}
465b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
466b5fa635aSLeon Romanovsky 		goto err;
467b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
468b5fa635aSLeon Romanovsky 		goto err;
469b5fa635aSLeon Romanovsky 
470c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
471c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
472c3d02788SLeon Romanovsky 		goto err;
473c3d02788SLeon Romanovsky 
47400313983SSteve Wise 	if (fill_res_name_pid(msg, res))
475b5fa635aSLeon Romanovsky 		goto err;
47600313983SSteve Wise 
47702da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
478da5c8507SSteve Wise 		goto err;
479da5c8507SSteve Wise 
48000313983SSteve Wise 	return 0;
48100313983SSteve Wise 
482c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
48300313983SSteve Wise }
48400313983SSteve Wise 
485659067b0SLeon Romanovsky static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
48600313983SSteve Wise 				struct rdma_restrack_entry *res, uint32_t port)
48700313983SSteve Wise {
48800313983SSteve Wise 	struct rdma_id_private *id_priv =
48900313983SSteve Wise 				container_of(res, struct rdma_id_private, res);
49002da3750SLeon Romanovsky 	struct ib_device *dev = id_priv->id.device;
49100313983SSteve Wise 	struct rdma_cm_id *cm_id = &id_priv->id;
49200313983SSteve Wise 
49300313983SSteve Wise 	if (port && port != cm_id->port_num)
49400313983SSteve Wise 		return 0;
49500313983SSteve Wise 
49600313983SSteve Wise 	if (cm_id->port_num &&
49700313983SSteve Wise 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
49800313983SSteve Wise 		goto err;
49900313983SSteve Wise 
50000313983SSteve Wise 	if (id_priv->qp_num) {
50100313983SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
50200313983SSteve Wise 			goto err;
50300313983SSteve Wise 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
504b5fa635aSLeon Romanovsky 			goto err;
505b5fa635aSLeon Romanovsky 	}
506b5fa635aSLeon Romanovsky 
50700313983SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
50800313983SSteve Wise 		goto err;
50900313983SSteve Wise 
51000313983SSteve Wise 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
51100313983SSteve Wise 		goto err;
51200313983SSteve Wise 
51300313983SSteve Wise 	if (cm_id->route.addr.src_addr.ss_family &&
51400313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
51500313983SSteve Wise 		    sizeof(cm_id->route.addr.src_addr),
51600313983SSteve Wise 		    &cm_id->route.addr.src_addr))
51700313983SSteve Wise 		goto err;
51800313983SSteve Wise 	if (cm_id->route.addr.dst_addr.ss_family &&
51900313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
52000313983SSteve Wise 		    sizeof(cm_id->route.addr.dst_addr),
52100313983SSteve Wise 		    &cm_id->route.addr.dst_addr))
52200313983SSteve Wise 		goto err;
52300313983SSteve Wise 
524517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
525517b773eSLeon Romanovsky 		goto err;
526517b773eSLeon Romanovsky 
52700313983SSteve Wise 	if (fill_res_name_pid(msg, res))
52800313983SSteve Wise 		goto err;
52900313983SSteve Wise 
53002da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
531da5c8507SSteve Wise 		goto err;
532da5c8507SSteve Wise 
533b5fa635aSLeon Romanovsky 	return 0;
534b5fa635aSLeon Romanovsky 
535c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE;
536b5fa635aSLeon Romanovsky }
537b5fa635aSLeon Romanovsky 
538659067b0SLeon Romanovsky static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
539a34fc089SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
540a34fc089SSteve Wise {
541a34fc089SSteve Wise 	struct ib_cq *cq = container_of(res, struct ib_cq, res);
54202da3750SLeon Romanovsky 	struct ib_device *dev = cq->device;
543a34fc089SSteve Wise 
544a34fc089SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
545a34fc089SSteve Wise 		goto err;
546a34fc089SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
54725a0ad85SSteve Wise 			      atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
548a34fc089SSteve Wise 		goto err;
549a34fc089SSteve Wise 
550a34fc089SSteve Wise 	/* Poll context is only valid for kernel CQs */
551a34fc089SSteve Wise 	if (rdma_is_kernel_res(res) &&
552a34fc089SSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
553a34fc089SSteve Wise 		goto err;
554a34fc089SSteve Wise 
555517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
556517b773eSLeon Romanovsky 		goto err;
557c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
558c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
559c3d02788SLeon Romanovsky 			cq->uobject->context->res.id))
560c3d02788SLeon Romanovsky 		goto err;
561517b773eSLeon Romanovsky 
562a34fc089SSteve Wise 	if (fill_res_name_pid(msg, res))
563a34fc089SSteve Wise 		goto err;
564a34fc089SSteve Wise 
56502da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
566da5c8507SSteve Wise 		goto err;
567da5c8507SSteve Wise 
568a34fc089SSteve Wise 	return 0;
569a34fc089SSteve Wise 
570c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
571a34fc089SSteve Wise }
572a34fc089SSteve Wise 
573659067b0SLeon Romanovsky static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
574fccec5b8SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
575fccec5b8SSteve Wise {
576fccec5b8SSteve Wise 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
57702da3750SLeon Romanovsky 	struct ib_device *dev = mr->pd->device;
578fccec5b8SSteve Wise 
579659067b0SLeon Romanovsky 	if (has_cap_net_admin) {
580fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
581fccec5b8SSteve Wise 			goto err;
582fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
583fccec5b8SSteve Wise 			goto err;
584fccec5b8SSteve Wise 	}
585fccec5b8SSteve Wise 
58625a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
58725a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
588fccec5b8SSteve Wise 		goto err;
589fccec5b8SSteve Wise 
590517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
591517b773eSLeon Romanovsky 		goto err;
592517b773eSLeon Romanovsky 
593c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
594c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
595c3d02788SLeon Romanovsky 		goto err;
596c3d02788SLeon Romanovsky 
597fccec5b8SSteve Wise 	if (fill_res_name_pid(msg, res))
598fccec5b8SSteve Wise 		goto err;
599fccec5b8SSteve Wise 
60002da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
601da5c8507SSteve Wise 		goto err;
602da5c8507SSteve Wise 
603fccec5b8SSteve Wise 	return 0;
604fccec5b8SSteve Wise 
605c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
606fccec5b8SSteve Wise }
607fccec5b8SSteve Wise 
608659067b0SLeon Romanovsky static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
60929cf1351SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
61029cf1351SSteve Wise {
61129cf1351SSteve Wise 	struct ib_pd *pd = container_of(res, struct ib_pd, res);
61202da3750SLeon Romanovsky 	struct ib_device *dev = pd->device;
61329cf1351SSteve Wise 
614659067b0SLeon Romanovsky 	if (has_cap_net_admin) {
61529cf1351SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
61629cf1351SSteve Wise 				pd->local_dma_lkey))
61729cf1351SSteve Wise 			goto err;
61829cf1351SSteve Wise 		if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
61929cf1351SSteve Wise 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
62029cf1351SSteve Wise 				pd->unsafe_global_rkey))
62129cf1351SSteve Wise 			goto err;
62229cf1351SSteve Wise 	}
62329cf1351SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
62425a0ad85SSteve Wise 			      atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
62529cf1351SSteve Wise 		goto err;
62629cf1351SSteve Wise 
627517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
628517b773eSLeon Romanovsky 		goto err;
629517b773eSLeon Romanovsky 
630c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
631c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
632c3d02788SLeon Romanovsky 			pd->uobject->context->res.id))
633c3d02788SLeon Romanovsky 		goto err;
634c3d02788SLeon Romanovsky 
63529cf1351SSteve Wise 	if (fill_res_name_pid(msg, res))
63629cf1351SSteve Wise 		goto err;
63729cf1351SSteve Wise 
63802da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
639da5c8507SSteve Wise 		goto err;
640da5c8507SSteve Wise 
64129cf1351SSteve Wise 	return 0;
64229cf1351SSteve Wise 
643c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
64429cf1351SSteve Wise }
64529cf1351SSteve Wise 
646c4ffee7cSMark Zhang static int fill_stat_counter_mode(struct sk_buff *msg,
647c4ffee7cSMark Zhang 				  struct rdma_counter *counter)
648c4ffee7cSMark Zhang {
649c4ffee7cSMark Zhang 	struct rdma_counter_mode *m = &counter->mode;
650c4ffee7cSMark Zhang 
651c4ffee7cSMark Zhang 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode))
652c4ffee7cSMark Zhang 		return -EMSGSIZE;
653c4ffee7cSMark Zhang 
654c4ffee7cSMark Zhang 	if (m->mode == RDMA_COUNTER_MODE_AUTO)
655c4ffee7cSMark Zhang 		if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) &&
656c4ffee7cSMark Zhang 		    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type))
657c4ffee7cSMark Zhang 			return -EMSGSIZE;
658c4ffee7cSMark Zhang 
659c4ffee7cSMark Zhang 	return 0;
660c4ffee7cSMark Zhang }
661c4ffee7cSMark Zhang 
662c4ffee7cSMark Zhang static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn)
663c4ffee7cSMark Zhang {
664c4ffee7cSMark Zhang 	struct nlattr *entry_attr;
665c4ffee7cSMark Zhang 
666c4ffee7cSMark Zhang 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
667c4ffee7cSMark Zhang 	if (!entry_attr)
668c4ffee7cSMark Zhang 		return -EMSGSIZE;
669c4ffee7cSMark Zhang 
670c4ffee7cSMark Zhang 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn))
671c4ffee7cSMark Zhang 		goto err;
672c4ffee7cSMark Zhang 
673c4ffee7cSMark Zhang 	nla_nest_end(msg, entry_attr);
674c4ffee7cSMark Zhang 	return 0;
675c4ffee7cSMark Zhang 
676c4ffee7cSMark Zhang err:
677c4ffee7cSMark Zhang 	nla_nest_cancel(msg, entry_attr);
678c4ffee7cSMark Zhang 	return -EMSGSIZE;
679c4ffee7cSMark Zhang }
680c4ffee7cSMark Zhang 
681c4ffee7cSMark Zhang static int fill_stat_counter_qps(struct sk_buff *msg,
682c4ffee7cSMark Zhang 				 struct rdma_counter *counter)
683c4ffee7cSMark Zhang {
684c4ffee7cSMark Zhang 	struct rdma_restrack_entry *res;
685c4ffee7cSMark Zhang 	struct rdma_restrack_root *rt;
686c4ffee7cSMark Zhang 	struct nlattr *table_attr;
687c4ffee7cSMark Zhang 	struct ib_qp *qp = NULL;
688c4ffee7cSMark Zhang 	unsigned long id = 0;
689c4ffee7cSMark Zhang 	int ret = 0;
690c4ffee7cSMark Zhang 
691c4ffee7cSMark Zhang 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
692c4ffee7cSMark Zhang 
693c4ffee7cSMark Zhang 	rt = &counter->device->res[RDMA_RESTRACK_QP];
694c4ffee7cSMark Zhang 	xa_lock(&rt->xa);
695c4ffee7cSMark Zhang 	xa_for_each(&rt->xa, id, res) {
696c4ffee7cSMark Zhang 		if (!rdma_is_visible_in_pid_ns(res))
697c4ffee7cSMark Zhang 			continue;
698c4ffee7cSMark Zhang 
699c4ffee7cSMark Zhang 		qp = container_of(res, struct ib_qp, res);
700c4ffee7cSMark Zhang 		if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
701c4ffee7cSMark Zhang 			continue;
702c4ffee7cSMark Zhang 
703c4ffee7cSMark Zhang 		if (!qp->counter || (qp->counter->id != counter->id))
704c4ffee7cSMark Zhang 			continue;
705c4ffee7cSMark Zhang 
706c4ffee7cSMark Zhang 		ret = fill_stat_counter_qp_entry(msg, qp->qp_num);
707c4ffee7cSMark Zhang 		if (ret)
708c4ffee7cSMark Zhang 			goto err;
709c4ffee7cSMark Zhang 	}
710c4ffee7cSMark Zhang 
711c4ffee7cSMark Zhang 	xa_unlock(&rt->xa);
712c4ffee7cSMark Zhang 	nla_nest_end(msg, table_attr);
713c4ffee7cSMark Zhang 	return 0;
714c4ffee7cSMark Zhang 
715c4ffee7cSMark Zhang err:
716c4ffee7cSMark Zhang 	xa_unlock(&rt->xa);
717c4ffee7cSMark Zhang 	nla_nest_cancel(msg, table_attr);
718c4ffee7cSMark Zhang 	return ret;
719c4ffee7cSMark Zhang }
720c4ffee7cSMark Zhang 
721c4ffee7cSMark Zhang static int fill_stat_hwcounter_entry(struct sk_buff *msg,
722c4ffee7cSMark Zhang 				     const char *name, u64 value)
723c4ffee7cSMark Zhang {
724c4ffee7cSMark Zhang 	struct nlattr *entry_attr;
725c4ffee7cSMark Zhang 
726c4ffee7cSMark Zhang 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
727c4ffee7cSMark Zhang 	if (!entry_attr)
728c4ffee7cSMark Zhang 		return -EMSGSIZE;
729c4ffee7cSMark Zhang 
730c4ffee7cSMark Zhang 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
731c4ffee7cSMark Zhang 			   name))
732c4ffee7cSMark Zhang 		goto err;
733c4ffee7cSMark Zhang 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE,
734c4ffee7cSMark Zhang 			      value, RDMA_NLDEV_ATTR_PAD))
735c4ffee7cSMark Zhang 		goto err;
736c4ffee7cSMark Zhang 
737c4ffee7cSMark Zhang 	nla_nest_end(msg, entry_attr);
738c4ffee7cSMark Zhang 	return 0;
739c4ffee7cSMark Zhang 
740c4ffee7cSMark Zhang err:
741c4ffee7cSMark Zhang 	nla_nest_cancel(msg, entry_attr);
742c4ffee7cSMark Zhang 	return -EMSGSIZE;
743c4ffee7cSMark Zhang }
744c4ffee7cSMark Zhang 
745c4ffee7cSMark Zhang static int fill_stat_counter_hwcounters(struct sk_buff *msg,
746c4ffee7cSMark Zhang 					struct rdma_counter *counter)
747c4ffee7cSMark Zhang {
748c4ffee7cSMark Zhang 	struct rdma_hw_stats *st = counter->stats;
749c4ffee7cSMark Zhang 	struct nlattr *table_attr;
750c4ffee7cSMark Zhang 	int i;
751c4ffee7cSMark Zhang 
752c4ffee7cSMark Zhang 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
753c4ffee7cSMark Zhang 	if (!table_attr)
754c4ffee7cSMark Zhang 		return -EMSGSIZE;
755c4ffee7cSMark Zhang 
756c4ffee7cSMark Zhang 	for (i = 0; i < st->num_counters; i++)
757c4ffee7cSMark Zhang 		if (fill_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
758c4ffee7cSMark Zhang 			goto err;
759c4ffee7cSMark Zhang 
760c4ffee7cSMark Zhang 	nla_nest_end(msg, table_attr);
761c4ffee7cSMark Zhang 	return 0;
762c4ffee7cSMark Zhang 
763c4ffee7cSMark Zhang err:
764c4ffee7cSMark Zhang 	nla_nest_cancel(msg, table_attr);
765c4ffee7cSMark Zhang 	return -EMSGSIZE;
766c4ffee7cSMark Zhang }
767c4ffee7cSMark Zhang 
768c4ffee7cSMark Zhang static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
769c4ffee7cSMark Zhang 				  struct rdma_restrack_entry *res,
770c4ffee7cSMark Zhang 				  uint32_t port)
771c4ffee7cSMark Zhang {
772c4ffee7cSMark Zhang 	struct rdma_counter *counter =
773c4ffee7cSMark Zhang 		container_of(res, struct rdma_counter, res);
774c4ffee7cSMark Zhang 
775c4ffee7cSMark Zhang 	if (port && port != counter->port)
776c4ffee7cSMark Zhang 		return 0;
777c4ffee7cSMark Zhang 
778c4ffee7cSMark Zhang 	/* Dump it even query failed */
779c4ffee7cSMark Zhang 	rdma_counter_query_stats(counter);
780c4ffee7cSMark Zhang 
781c4ffee7cSMark Zhang 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) ||
782c4ffee7cSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) ||
783c4ffee7cSMark Zhang 	    fill_res_name_pid(msg, &counter->res) ||
784c4ffee7cSMark Zhang 	    fill_stat_counter_mode(msg, counter) ||
785c4ffee7cSMark Zhang 	    fill_stat_counter_qps(msg, counter) ||
786c4ffee7cSMark Zhang 	    fill_stat_counter_hwcounters(msg, counter))
787c4ffee7cSMark Zhang 		return -EMSGSIZE;
788c4ffee7cSMark Zhang 
789c4ffee7cSMark Zhang 	return 0;
790c4ffee7cSMark Zhang }
791c4ffee7cSMark Zhang 
792e5c9469eSLeon Romanovsky static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
793e5c9469eSLeon Romanovsky 			  struct netlink_ext_ack *extack)
794e5c9469eSLeon Romanovsky {
795e5c9469eSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
796e5c9469eSLeon Romanovsky 	struct ib_device *device;
797e5c9469eSLeon Romanovsky 	struct sk_buff *msg;
798e5c9469eSLeon Romanovsky 	u32 index;
799e5c9469eSLeon Romanovsky 	int err;
800e5c9469eSLeon Romanovsky 
8018cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
802e5c9469eSLeon Romanovsky 				     nldev_policy, extack);
803e5c9469eSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
804e5c9469eSLeon Romanovsky 		return -EINVAL;
805e5c9469eSLeon Romanovsky 
806e5c9469eSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
807e5c9469eSLeon Romanovsky 
80837eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
809e5c9469eSLeon Romanovsky 	if (!device)
810e5c9469eSLeon Romanovsky 		return -EINVAL;
811e5c9469eSLeon Romanovsky 
812e5c9469eSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
813f8978bd9SLeon Romanovsky 	if (!msg) {
814f8978bd9SLeon Romanovsky 		err = -ENOMEM;
815f8978bd9SLeon Romanovsky 		goto err;
816f8978bd9SLeon Romanovsky 	}
817e5c9469eSLeon Romanovsky 
818e5c9469eSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
819e5c9469eSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
820e5c9469eSLeon Romanovsky 			0, 0);
821e5c9469eSLeon Romanovsky 
822e5c9469eSLeon Romanovsky 	err = fill_dev_info(msg, device);
823f8978bd9SLeon Romanovsky 	if (err)
824f8978bd9SLeon Romanovsky 		goto err_free;
825e5c9469eSLeon Romanovsky 
826e5c9469eSLeon Romanovsky 	nlmsg_end(msg, nlh);
827e5c9469eSLeon Romanovsky 
82801b67117SParav Pandit 	ib_device_put(device);
829e5c9469eSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
830f8978bd9SLeon Romanovsky 
831f8978bd9SLeon Romanovsky err_free:
832f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
833f8978bd9SLeon Romanovsky err:
83401b67117SParav Pandit 	ib_device_put(device);
835f8978bd9SLeon Romanovsky 	return err;
836e5c9469eSLeon Romanovsky }
837e5c9469eSLeon Romanovsky 
83805d940d3SLeon Romanovsky static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
83905d940d3SLeon Romanovsky 			  struct netlink_ext_ack *extack)
84005d940d3SLeon Romanovsky {
84105d940d3SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
84205d940d3SLeon Romanovsky 	struct ib_device *device;
84305d940d3SLeon Romanovsky 	u32 index;
84405d940d3SLeon Romanovsky 	int err;
84505d940d3SLeon Romanovsky 
8468cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
8478cb08174SJohannes Berg 				     nldev_policy, extack);
84805d940d3SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
84905d940d3SLeon Romanovsky 		return -EINVAL;
85005d940d3SLeon Romanovsky 
85105d940d3SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
85237eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
85305d940d3SLeon Romanovsky 	if (!device)
85405d940d3SLeon Romanovsky 		return -EINVAL;
85505d940d3SLeon Romanovsky 
85605d940d3SLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
85705d940d3SLeon Romanovsky 		char name[IB_DEVICE_NAME_MAX] = {};
85805d940d3SLeon Romanovsky 
85905d940d3SLeon Romanovsky 		nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
86005d940d3SLeon Romanovsky 			    IB_DEVICE_NAME_MAX);
86105d940d3SLeon Romanovsky 		err = ib_device_rename(device, name);
8622e5b8a01SParav Pandit 		goto done;
86305d940d3SLeon Romanovsky 	}
86405d940d3SLeon Romanovsky 
8652e5b8a01SParav Pandit 	if (tb[RDMA_NLDEV_NET_NS_FD]) {
8662e5b8a01SParav Pandit 		u32 ns_fd;
8672e5b8a01SParav Pandit 
8682e5b8a01SParav Pandit 		ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
8692e5b8a01SParav Pandit 		err = ib_device_set_netns_put(skb, device, ns_fd);
8702e5b8a01SParav Pandit 		goto put_done;
8712e5b8a01SParav Pandit 	}
8722e5b8a01SParav Pandit 
8732e5b8a01SParav Pandit done:
87401b67117SParav Pandit 	ib_device_put(device);
8752e5b8a01SParav Pandit put_done:
87605d940d3SLeon Romanovsky 	return err;
87705d940d3SLeon Romanovsky }
87805d940d3SLeon Romanovsky 
879b4c598a6SLeon Romanovsky static int _nldev_get_dumpit(struct ib_device *device,
880b4c598a6SLeon Romanovsky 			     struct sk_buff *skb,
881b4c598a6SLeon Romanovsky 			     struct netlink_callback *cb,
882b4c598a6SLeon Romanovsky 			     unsigned int idx)
883b4c598a6SLeon Romanovsky {
884b4c598a6SLeon Romanovsky 	int start = cb->args[0];
885b4c598a6SLeon Romanovsky 	struct nlmsghdr *nlh;
886b4c598a6SLeon Romanovsky 
887b4c598a6SLeon Romanovsky 	if (idx < start)
888b4c598a6SLeon Romanovsky 		return 0;
889b4c598a6SLeon Romanovsky 
890b4c598a6SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
891b4c598a6SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
892b4c598a6SLeon Romanovsky 			0, NLM_F_MULTI);
893b4c598a6SLeon Romanovsky 
894b4c598a6SLeon Romanovsky 	if (fill_dev_info(skb, device)) {
895b4c598a6SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
896b4c598a6SLeon Romanovsky 		goto out;
897b4c598a6SLeon Romanovsky 	}
898b4c598a6SLeon Romanovsky 
899b4c598a6SLeon Romanovsky 	nlmsg_end(skb, nlh);
900b4c598a6SLeon Romanovsky 
901b4c598a6SLeon Romanovsky 	idx++;
902b4c598a6SLeon Romanovsky 
903b4c598a6SLeon Romanovsky out:	cb->args[0] = idx;
904b4c598a6SLeon Romanovsky 	return skb->len;
905b4c598a6SLeon Romanovsky }
906b4c598a6SLeon Romanovsky 
907b4c598a6SLeon Romanovsky static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
908b4c598a6SLeon Romanovsky {
909b4c598a6SLeon Romanovsky 	/*
910b4c598a6SLeon Romanovsky 	 * There is no need to take lock, because
91137eeab55SParav Pandit 	 * we are relying on ib_core's locking.
912b4c598a6SLeon Romanovsky 	 */
913b4c598a6SLeon Romanovsky 	return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
914b4c598a6SLeon Romanovsky }
915b4c598a6SLeon Romanovsky 
916c3f66f7bSLeon Romanovsky static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
917c3f66f7bSLeon Romanovsky 			       struct netlink_ext_ack *extack)
918c3f66f7bSLeon Romanovsky {
919c3f66f7bSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
920c3f66f7bSLeon Romanovsky 	struct ib_device *device;
921c3f66f7bSLeon Romanovsky 	struct sk_buff *msg;
922c3f66f7bSLeon Romanovsky 	u32 index;
923c3f66f7bSLeon Romanovsky 	u32 port;
924c3f66f7bSLeon Romanovsky 	int err;
925c3f66f7bSLeon Romanovsky 
9268cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
927c3f66f7bSLeon Romanovsky 				     nldev_policy, extack);
928287683d0SLeon Romanovsky 	if (err ||
929287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
930287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
931c3f66f7bSLeon Romanovsky 		return -EINVAL;
932c3f66f7bSLeon Romanovsky 
933c3f66f7bSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
93437eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
935c3f66f7bSLeon Romanovsky 	if (!device)
936c3f66f7bSLeon Romanovsky 		return -EINVAL;
937c3f66f7bSLeon Romanovsky 
938c3f66f7bSLeon Romanovsky 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
939f8978bd9SLeon Romanovsky 	if (!rdma_is_port_valid(device, port)) {
940f8978bd9SLeon Romanovsky 		err = -EINVAL;
941f8978bd9SLeon Romanovsky 		goto err;
942f8978bd9SLeon Romanovsky 	}
943c3f66f7bSLeon Romanovsky 
944c3f66f7bSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
945f8978bd9SLeon Romanovsky 	if (!msg) {
946f8978bd9SLeon Romanovsky 		err = -ENOMEM;
947f8978bd9SLeon Romanovsky 		goto err;
948f8978bd9SLeon Romanovsky 	}
949c3f66f7bSLeon Romanovsky 
950c3f66f7bSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
951c3f66f7bSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
952c3f66f7bSLeon Romanovsky 			0, 0);
953c3f66f7bSLeon Romanovsky 
9545b2cc79dSLeon Romanovsky 	err = fill_port_info(msg, device, port, sock_net(skb->sk));
955f8978bd9SLeon Romanovsky 	if (err)
956f8978bd9SLeon Romanovsky 		goto err_free;
957c3f66f7bSLeon Romanovsky 
958c3f66f7bSLeon Romanovsky 	nlmsg_end(msg, nlh);
95901b67117SParav Pandit 	ib_device_put(device);
960c3f66f7bSLeon Romanovsky 
961c3f66f7bSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
962f8978bd9SLeon Romanovsky 
963f8978bd9SLeon Romanovsky err_free:
964f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
965f8978bd9SLeon Romanovsky err:
96601b67117SParav Pandit 	ib_device_put(device);
967f8978bd9SLeon Romanovsky 	return err;
968c3f66f7bSLeon Romanovsky }
969c3f66f7bSLeon Romanovsky 
9707d02f605SLeon Romanovsky static int nldev_port_get_dumpit(struct sk_buff *skb,
9717d02f605SLeon Romanovsky 				 struct netlink_callback *cb)
9727d02f605SLeon Romanovsky {
9737d02f605SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
9747d02f605SLeon Romanovsky 	struct ib_device *device;
9757d02f605SLeon Romanovsky 	int start = cb->args[0];
9767d02f605SLeon Romanovsky 	struct nlmsghdr *nlh;
9777d02f605SLeon Romanovsky 	u32 idx = 0;
9787d02f605SLeon Romanovsky 	u32 ifindex;
9797d02f605SLeon Romanovsky 	int err;
980ea1075edSJason Gunthorpe 	unsigned int p;
9817d02f605SLeon Romanovsky 
9828cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
9837d02f605SLeon Romanovsky 				     nldev_policy, NULL);
9847d02f605SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
9857d02f605SLeon Romanovsky 		return -EINVAL;
9867d02f605SLeon Romanovsky 
9877d02f605SLeon Romanovsky 	ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
98837eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
9897d02f605SLeon Romanovsky 	if (!device)
9907d02f605SLeon Romanovsky 		return -EINVAL;
9917d02f605SLeon Romanovsky 
992ea1075edSJason Gunthorpe 	rdma_for_each_port (device, p) {
9937d02f605SLeon Romanovsky 		/*
9947d02f605SLeon Romanovsky 		 * The dumpit function returns all information from specific
9957d02f605SLeon Romanovsky 		 * index. This specific index is taken from the netlink
9967d02f605SLeon Romanovsky 		 * messages request sent by user and it is available
9977d02f605SLeon Romanovsky 		 * in cb->args[0].
9987d02f605SLeon Romanovsky 		 *
9997d02f605SLeon Romanovsky 		 * Usually, the user doesn't fill this field and it causes
10007d02f605SLeon Romanovsky 		 * to return everything.
10017d02f605SLeon Romanovsky 		 *
10027d02f605SLeon Romanovsky 		 */
10037d02f605SLeon Romanovsky 		if (idx < start) {
10047d02f605SLeon Romanovsky 			idx++;
10057d02f605SLeon Romanovsky 			continue;
10067d02f605SLeon Romanovsky 		}
10077d02f605SLeon Romanovsky 
10087d02f605SLeon Romanovsky 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
10097d02f605SLeon Romanovsky 				cb->nlh->nlmsg_seq,
10107d02f605SLeon Romanovsky 				RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
10117d02f605SLeon Romanovsky 						 RDMA_NLDEV_CMD_PORT_GET),
10127d02f605SLeon Romanovsky 				0, NLM_F_MULTI);
10137d02f605SLeon Romanovsky 
10145b2cc79dSLeon Romanovsky 		if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
10157d02f605SLeon Romanovsky 			nlmsg_cancel(skb, nlh);
10167d02f605SLeon Romanovsky 			goto out;
10177d02f605SLeon Romanovsky 		}
10187d02f605SLeon Romanovsky 		idx++;
10197d02f605SLeon Romanovsky 		nlmsg_end(skb, nlh);
10207d02f605SLeon Romanovsky 	}
10217d02f605SLeon Romanovsky 
1022f8978bd9SLeon Romanovsky out:
102301b67117SParav Pandit 	ib_device_put(device);
1024f8978bd9SLeon Romanovsky 	cb->args[0] = idx;
10257d02f605SLeon Romanovsky 	return skb->len;
10267d02f605SLeon Romanovsky }
10277d02f605SLeon Romanovsky 
1028bf3c5a93SLeon Romanovsky static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1029bf3c5a93SLeon Romanovsky 			      struct netlink_ext_ack *extack)
1030bf3c5a93SLeon Romanovsky {
1031bf3c5a93SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1032bf3c5a93SLeon Romanovsky 	struct ib_device *device;
1033bf3c5a93SLeon Romanovsky 	struct sk_buff *msg;
1034bf3c5a93SLeon Romanovsky 	u32 index;
1035bf3c5a93SLeon Romanovsky 	int ret;
1036bf3c5a93SLeon Romanovsky 
10378cb08174SJohannes Berg 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1038bf3c5a93SLeon Romanovsky 				     nldev_policy, extack);
1039bf3c5a93SLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1040bf3c5a93SLeon Romanovsky 		return -EINVAL;
1041bf3c5a93SLeon Romanovsky 
1042bf3c5a93SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
104337eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1044bf3c5a93SLeon Romanovsky 	if (!device)
1045bf3c5a93SLeon Romanovsky 		return -EINVAL;
1046bf3c5a93SLeon Romanovsky 
1047bf3c5a93SLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1048f34727a1SDan Carpenter 	if (!msg) {
1049f34727a1SDan Carpenter 		ret = -ENOMEM;
1050bf3c5a93SLeon Romanovsky 		goto err;
1051f34727a1SDan Carpenter 	}
1052bf3c5a93SLeon Romanovsky 
1053bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1054bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1055bf3c5a93SLeon Romanovsky 			0, 0);
1056bf3c5a93SLeon Romanovsky 
1057bf3c5a93SLeon Romanovsky 	ret = fill_res_info(msg, device);
1058bf3c5a93SLeon Romanovsky 	if (ret)
1059bf3c5a93SLeon Romanovsky 		goto err_free;
1060bf3c5a93SLeon Romanovsky 
1061bf3c5a93SLeon Romanovsky 	nlmsg_end(msg, nlh);
106201b67117SParav Pandit 	ib_device_put(device);
1063bf3c5a93SLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1064bf3c5a93SLeon Romanovsky 
1065bf3c5a93SLeon Romanovsky err_free:
1066bf3c5a93SLeon Romanovsky 	nlmsg_free(msg);
1067bf3c5a93SLeon Romanovsky err:
106801b67117SParav Pandit 	ib_device_put(device);
1069bf3c5a93SLeon Romanovsky 	return ret;
1070bf3c5a93SLeon Romanovsky }
1071bf3c5a93SLeon Romanovsky 
1072bf3c5a93SLeon Romanovsky static int _nldev_res_get_dumpit(struct ib_device *device,
1073bf3c5a93SLeon Romanovsky 				 struct sk_buff *skb,
1074bf3c5a93SLeon Romanovsky 				 struct netlink_callback *cb,
1075bf3c5a93SLeon Romanovsky 				 unsigned int idx)
1076bf3c5a93SLeon Romanovsky {
1077bf3c5a93SLeon Romanovsky 	int start = cb->args[0];
1078bf3c5a93SLeon Romanovsky 	struct nlmsghdr *nlh;
1079bf3c5a93SLeon Romanovsky 
1080bf3c5a93SLeon Romanovsky 	if (idx < start)
1081bf3c5a93SLeon Romanovsky 		return 0;
1082bf3c5a93SLeon Romanovsky 
1083bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1084bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1085bf3c5a93SLeon Romanovsky 			0, NLM_F_MULTI);
1086bf3c5a93SLeon Romanovsky 
1087bf3c5a93SLeon Romanovsky 	if (fill_res_info(skb, device)) {
1088bf3c5a93SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
1089bf3c5a93SLeon Romanovsky 		goto out;
1090bf3c5a93SLeon Romanovsky 	}
1091bf3c5a93SLeon Romanovsky 	nlmsg_end(skb, nlh);
1092bf3c5a93SLeon Romanovsky 
1093bf3c5a93SLeon Romanovsky 	idx++;
1094bf3c5a93SLeon Romanovsky 
1095bf3c5a93SLeon Romanovsky out:
1096bf3c5a93SLeon Romanovsky 	cb->args[0] = idx;
1097bf3c5a93SLeon Romanovsky 	return skb->len;
1098bf3c5a93SLeon Romanovsky }
1099bf3c5a93SLeon Romanovsky 
1100bf3c5a93SLeon Romanovsky static int nldev_res_get_dumpit(struct sk_buff *skb,
1101bf3c5a93SLeon Romanovsky 				struct netlink_callback *cb)
1102bf3c5a93SLeon Romanovsky {
1103bf3c5a93SLeon Romanovsky 	return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
1104bf3c5a93SLeon Romanovsky }
1105bf3c5a93SLeon Romanovsky 
1106d12ff624SSteve Wise struct nldev_fill_res_entry {
1107659067b0SLeon Romanovsky 	int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin,
1108d12ff624SSteve Wise 			     struct rdma_restrack_entry *res, u32 port);
1109d12ff624SSteve Wise 	enum rdma_nldev_attr nldev_attr;
1110d12ff624SSteve Wise 	enum rdma_nldev_command nldev_cmd;
1111c5dfe0eaSLeon Romanovsky 	u8 flags;
1112c5dfe0eaSLeon Romanovsky 	u32 entry;
1113c5dfe0eaSLeon Romanovsky 	u32 id;
1114c5dfe0eaSLeon Romanovsky };
1115c5dfe0eaSLeon Romanovsky 
1116c5dfe0eaSLeon Romanovsky enum nldev_res_flags {
1117c5dfe0eaSLeon Romanovsky 	NLDEV_PER_DEV = 1 << 0,
1118d12ff624SSteve Wise };
1119d12ff624SSteve Wise 
1120d12ff624SSteve Wise static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
1121d12ff624SSteve Wise 	[RDMA_RESTRACK_QP] = {
1122d12ff624SSteve Wise 		.fill_res_func = fill_res_qp_entry,
1123d12ff624SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
1124d12ff624SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
1125c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
11261b8b7788SLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_LQPN,
1127d12ff624SSteve Wise 	},
112800313983SSteve Wise 	[RDMA_RESTRACK_CM_ID] = {
112900313983SSteve Wise 		.fill_res_func = fill_res_cm_id_entry,
113000313983SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
113100313983SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
1132c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
1133517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_CM_IDN,
113400313983SSteve Wise 	},
1135a34fc089SSteve Wise 	[RDMA_RESTRACK_CQ] = {
1136a34fc089SSteve Wise 		.fill_res_func = fill_res_cq_entry,
1137a34fc089SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
1138a34fc089SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
1139c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
1140c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
1141517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_CQN,
1142a34fc089SSteve Wise 	},
1143fccec5b8SSteve Wise 	[RDMA_RESTRACK_MR] = {
1144fccec5b8SSteve Wise 		.fill_res_func = fill_res_mr_entry,
1145fccec5b8SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
1146fccec5b8SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
1147c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
1148c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
1149517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_MRN,
1150fccec5b8SSteve Wise 	},
115129cf1351SSteve Wise 	[RDMA_RESTRACK_PD] = {
115229cf1351SSteve Wise 		.fill_res_func = fill_res_pd_entry,
115329cf1351SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
115429cf1351SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
1155c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
1156c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
1157517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_PDN,
115829cf1351SSteve Wise 	},
1159c4ffee7cSMark Zhang 	[RDMA_RESTRACK_COUNTER] = {
1160c4ffee7cSMark Zhang 		.fill_res_func = fill_res_counter_entry,
1161c4ffee7cSMark Zhang 		.nldev_cmd = RDMA_NLDEV_CMD_STAT_GET,
1162c4ffee7cSMark Zhang 		.nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
1163c4ffee7cSMark Zhang 		.entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
1164c4ffee7cSMark Zhang 		.id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
1165c4ffee7cSMark Zhang 	},
1166d12ff624SSteve Wise };
1167d12ff624SSteve Wise 
1168c5dfe0eaSLeon Romanovsky static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1169c5dfe0eaSLeon Romanovsky 			       struct netlink_ext_ack *extack,
1170c5dfe0eaSLeon Romanovsky 			       enum rdma_restrack_type res_type)
1171c5dfe0eaSLeon Romanovsky {
1172c5dfe0eaSLeon Romanovsky 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1173c5dfe0eaSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1174c5dfe0eaSLeon Romanovsky 	struct rdma_restrack_entry *res;
1175c5dfe0eaSLeon Romanovsky 	struct ib_device *device;
1176c5dfe0eaSLeon Romanovsky 	u32 index, id, port = 0;
1177c5dfe0eaSLeon Romanovsky 	bool has_cap_net_admin;
1178c5dfe0eaSLeon Romanovsky 	struct sk_buff *msg;
1179c5dfe0eaSLeon Romanovsky 	int ret;
1180c5dfe0eaSLeon Romanovsky 
11818cb08174SJohannes Berg 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1182c5dfe0eaSLeon Romanovsky 				     nldev_policy, extack);
1183c5dfe0eaSLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
1184c5dfe0eaSLeon Romanovsky 		return -EINVAL;
1185c5dfe0eaSLeon Romanovsky 
1186c5dfe0eaSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
118737eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1188c5dfe0eaSLeon Romanovsky 	if (!device)
1189c5dfe0eaSLeon Romanovsky 		return -EINVAL;
1190c5dfe0eaSLeon Romanovsky 
1191c5dfe0eaSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1192c5dfe0eaSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1193c5dfe0eaSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
1194c5dfe0eaSLeon Romanovsky 			ret = -EINVAL;
1195c5dfe0eaSLeon Romanovsky 			goto err;
1196c5dfe0eaSLeon Romanovsky 		}
1197c5dfe0eaSLeon Romanovsky 	}
1198c5dfe0eaSLeon Romanovsky 
1199c5dfe0eaSLeon Romanovsky 	if ((port && fe->flags & NLDEV_PER_DEV) ||
1200c5dfe0eaSLeon Romanovsky 	    (!port && ~fe->flags & NLDEV_PER_DEV)) {
1201c5dfe0eaSLeon Romanovsky 		ret = -EINVAL;
1202c5dfe0eaSLeon Romanovsky 		goto err;
1203c5dfe0eaSLeon Romanovsky 	}
1204c5dfe0eaSLeon Romanovsky 
1205c5dfe0eaSLeon Romanovsky 	id = nla_get_u32(tb[fe->id]);
1206c5dfe0eaSLeon Romanovsky 	res = rdma_restrack_get_byid(device, res_type, id);
1207c5dfe0eaSLeon Romanovsky 	if (IS_ERR(res)) {
1208c5dfe0eaSLeon Romanovsky 		ret = PTR_ERR(res);
1209c5dfe0eaSLeon Romanovsky 		goto err;
1210c5dfe0eaSLeon Romanovsky 	}
1211c5dfe0eaSLeon Romanovsky 
12126a6c306aSMark Zhang 	if (!rdma_is_visible_in_pid_ns(res)) {
1213c5dfe0eaSLeon Romanovsky 		ret = -ENOENT;
1214c5dfe0eaSLeon Romanovsky 		goto err_get;
1215c5dfe0eaSLeon Romanovsky 	}
1216c5dfe0eaSLeon Romanovsky 
1217c5dfe0eaSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1218c5dfe0eaSLeon Romanovsky 	if (!msg) {
1219c5dfe0eaSLeon Romanovsky 		ret = -ENOMEM;
1220c5dfe0eaSLeon Romanovsky 		goto err;
1221c5dfe0eaSLeon Romanovsky 	}
1222c5dfe0eaSLeon Romanovsky 
1223c5dfe0eaSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1224c5dfe0eaSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1225c5dfe0eaSLeon Romanovsky 			0, 0);
1226c5dfe0eaSLeon Romanovsky 
1227c5dfe0eaSLeon Romanovsky 	if (fill_nldev_handle(msg, device)) {
1228c5dfe0eaSLeon Romanovsky 		ret = -EMSGSIZE;
1229c5dfe0eaSLeon Romanovsky 		goto err_free;
1230c5dfe0eaSLeon Romanovsky 	}
1231c5dfe0eaSLeon Romanovsky 
1232c5dfe0eaSLeon Romanovsky 	has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1233c5dfe0eaSLeon Romanovsky 	ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
1234c5dfe0eaSLeon Romanovsky 	rdma_restrack_put(res);
1235c5dfe0eaSLeon Romanovsky 	if (ret)
1236c5dfe0eaSLeon Romanovsky 		goto err_free;
1237c5dfe0eaSLeon Romanovsky 
1238c5dfe0eaSLeon Romanovsky 	nlmsg_end(msg, nlh);
1239c5dfe0eaSLeon Romanovsky 	ib_device_put(device);
1240c5dfe0eaSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1241c5dfe0eaSLeon Romanovsky 
1242c5dfe0eaSLeon Romanovsky err_free:
1243c5dfe0eaSLeon Romanovsky 	nlmsg_free(msg);
1244c5dfe0eaSLeon Romanovsky err_get:
1245c5dfe0eaSLeon Romanovsky 	rdma_restrack_put(res);
1246c5dfe0eaSLeon Romanovsky err:
1247c5dfe0eaSLeon Romanovsky 	ib_device_put(device);
1248c5dfe0eaSLeon Romanovsky 	return ret;
1249c5dfe0eaSLeon Romanovsky }
1250c5dfe0eaSLeon Romanovsky 
1251d12ff624SSteve Wise static int res_get_common_dumpit(struct sk_buff *skb,
1252d12ff624SSteve Wise 				 struct netlink_callback *cb,
1253d12ff624SSteve Wise 				 enum rdma_restrack_type res_type)
1254b5fa635aSLeon Romanovsky {
1255d12ff624SSteve Wise 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1256b5fa635aSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1257b5fa635aSLeon Romanovsky 	struct rdma_restrack_entry *res;
12587c77c6a9SLeon Romanovsky 	struct rdma_restrack_root *rt;
1259b5fa635aSLeon Romanovsky 	int err, ret = 0, idx = 0;
1260b5fa635aSLeon Romanovsky 	struct nlattr *table_attr;
1261c5dfe0eaSLeon Romanovsky 	struct nlattr *entry_attr;
1262b5fa635aSLeon Romanovsky 	struct ib_device *device;
1263b5fa635aSLeon Romanovsky 	int start = cb->args[0];
1264659067b0SLeon Romanovsky 	bool has_cap_net_admin;
1265b5fa635aSLeon Romanovsky 	struct nlmsghdr *nlh;
1266fd47c2f9SLeon Romanovsky 	unsigned long id;
1267b5fa635aSLeon Romanovsky 	u32 index, port = 0;
1268d12ff624SSteve Wise 	bool filled = false;
1269b5fa635aSLeon Romanovsky 
12708cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1271b5fa635aSLeon Romanovsky 				     nldev_policy, NULL);
1272b5fa635aSLeon Romanovsky 	/*
1273d12ff624SSteve Wise 	 * Right now, we are expecting the device index to get res information,
1274b5fa635aSLeon Romanovsky 	 * but it is possible to extend this code to return all devices in
1275b5fa635aSLeon Romanovsky 	 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
1276b5fa635aSLeon Romanovsky 	 * if it doesn't exist, we will iterate over all devices.
1277b5fa635aSLeon Romanovsky 	 *
1278b5fa635aSLeon Romanovsky 	 * But it is not needed for now.
1279b5fa635aSLeon Romanovsky 	 */
1280b5fa635aSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1281b5fa635aSLeon Romanovsky 		return -EINVAL;
1282b5fa635aSLeon Romanovsky 
1283b5fa635aSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
128437eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1285b5fa635aSLeon Romanovsky 	if (!device)
1286b5fa635aSLeon Romanovsky 		return -EINVAL;
1287b5fa635aSLeon Romanovsky 
1288b5fa635aSLeon Romanovsky 	/*
1289b5fa635aSLeon Romanovsky 	 * If no PORT_INDEX is supplied, we will return all QPs from that device
1290b5fa635aSLeon Romanovsky 	 */
1291b5fa635aSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1292b5fa635aSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1293b5fa635aSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
1294b5fa635aSLeon Romanovsky 			ret = -EINVAL;
1295b5fa635aSLeon Romanovsky 			goto err_index;
1296b5fa635aSLeon Romanovsky 		}
1297b5fa635aSLeon Romanovsky 	}
1298b5fa635aSLeon Romanovsky 
1299b5fa635aSLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1300d12ff624SSteve Wise 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1301b5fa635aSLeon Romanovsky 			0, NLM_F_MULTI);
1302b5fa635aSLeon Romanovsky 
1303b5fa635aSLeon Romanovsky 	if (fill_nldev_handle(skb, device)) {
1304b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
1305b5fa635aSLeon Romanovsky 		goto err;
1306b5fa635aSLeon Romanovsky 	}
1307b5fa635aSLeon Romanovsky 
1308ae0be8deSMichal Kubecek 	table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
1309b5fa635aSLeon Romanovsky 	if (!table_attr) {
1310b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
1311b5fa635aSLeon Romanovsky 		goto err;
1312b5fa635aSLeon Romanovsky 	}
1313b5fa635aSLeon Romanovsky 
1314659067b0SLeon Romanovsky 	has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
1315659067b0SLeon Romanovsky 
13167c77c6a9SLeon Romanovsky 	rt = &device->res[res_type];
13177c77c6a9SLeon Romanovsky 	xa_lock(&rt->xa);
1318fd47c2f9SLeon Romanovsky 	/*
1319fd47c2f9SLeon Romanovsky 	 * FIXME: if the skip ahead is something common this loop should
1320fd47c2f9SLeon Romanovsky 	 * use xas_for_each & xas_pause to optimize, we can have a lot of
1321fd47c2f9SLeon Romanovsky 	 * objects.
1322fd47c2f9SLeon Romanovsky 	 */
13237c77c6a9SLeon Romanovsky 	xa_for_each(&rt->xa, id, res) {
13246a6c306aSMark Zhang 		if (!rdma_is_visible_in_pid_ns(res))
1325f2a0e45fSLeon Romanovsky 			continue;
1326b5fa635aSLeon Romanovsky 
1327f2a0e45fSLeon Romanovsky 		if (idx < start || !rdma_restrack_get(res))
1328b5fa635aSLeon Romanovsky 			goto next;
1329b5fa635aSLeon Romanovsky 
13307c77c6a9SLeon Romanovsky 		xa_unlock(&rt->xa);
13317c77c6a9SLeon Romanovsky 
1332d12ff624SSteve Wise 		filled = true;
1333b5fa635aSLeon Romanovsky 
1334ae0be8deSMichal Kubecek 		entry_attr = nla_nest_start_noflag(skb, fe->entry);
1335c5dfe0eaSLeon Romanovsky 		if (!entry_attr) {
1336c5dfe0eaSLeon Romanovsky 			ret = -EMSGSIZE;
1337c5dfe0eaSLeon Romanovsky 			rdma_restrack_put(res);
13387c77c6a9SLeon Romanovsky 			goto msg_full;
1339c5dfe0eaSLeon Romanovsky 		}
1340c5dfe0eaSLeon Romanovsky 
1341659067b0SLeon Romanovsky 		ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
1342b5fa635aSLeon Romanovsky 		rdma_restrack_put(res);
1343b5fa635aSLeon Romanovsky 
13447c77c6a9SLeon Romanovsky 		if (ret) {
1345c5dfe0eaSLeon Romanovsky 			nla_nest_cancel(skb, entry_attr);
1346b5fa635aSLeon Romanovsky 			if (ret == -EMSGSIZE)
13477c77c6a9SLeon Romanovsky 				goto msg_full;
1348c5dfe0eaSLeon Romanovsky 			if (ret == -EAGAIN)
13497c77c6a9SLeon Romanovsky 				goto again;
1350b5fa635aSLeon Romanovsky 			goto res_err;
13517c77c6a9SLeon Romanovsky 		}
1352c5dfe0eaSLeon Romanovsky 		nla_nest_end(skb, entry_attr);
13537c77c6a9SLeon Romanovsky again:		xa_lock(&rt->xa);
1354b5fa635aSLeon Romanovsky next:		idx++;
1355b5fa635aSLeon Romanovsky 	}
13567c77c6a9SLeon Romanovsky 	xa_unlock(&rt->xa);
1357b5fa635aSLeon Romanovsky 
13587c77c6a9SLeon Romanovsky msg_full:
1359b5fa635aSLeon Romanovsky 	nla_nest_end(skb, table_attr);
1360b5fa635aSLeon Romanovsky 	nlmsg_end(skb, nlh);
1361b5fa635aSLeon Romanovsky 	cb->args[0] = idx;
1362b5fa635aSLeon Romanovsky 
1363b5fa635aSLeon Romanovsky 	/*
1364d12ff624SSteve Wise 	 * No more entries to fill, cancel the message and
1365b5fa635aSLeon Romanovsky 	 * return 0 to mark end of dumpit.
1366b5fa635aSLeon Romanovsky 	 */
1367d12ff624SSteve Wise 	if (!filled)
1368b5fa635aSLeon Romanovsky 		goto err;
1369b5fa635aSLeon Romanovsky 
137001b67117SParav Pandit 	ib_device_put(device);
1371b5fa635aSLeon Romanovsky 	return skb->len;
1372b5fa635aSLeon Romanovsky 
1373b5fa635aSLeon Romanovsky res_err:
1374b5fa635aSLeon Romanovsky 	nla_nest_cancel(skb, table_attr);
1375b5fa635aSLeon Romanovsky 
1376b5fa635aSLeon Romanovsky err:
1377b5fa635aSLeon Romanovsky 	nlmsg_cancel(skb, nlh);
1378b5fa635aSLeon Romanovsky 
1379b5fa635aSLeon Romanovsky err_index:
138001b67117SParav Pandit 	ib_device_put(device);
1381b5fa635aSLeon Romanovsky 	return ret;
1382b5fa635aSLeon Romanovsky }
1383b5fa635aSLeon Romanovsky 
1384f732e713SLeon Romanovsky #define RES_GET_FUNCS(name, type)                                              \
1385f732e713SLeon Romanovsky 	static int nldev_res_get_##name##_dumpit(struct sk_buff *skb,          \
1386f732e713SLeon Romanovsky 						 struct netlink_callback *cb)  \
1387f732e713SLeon Romanovsky 	{                                                                      \
1388f732e713SLeon Romanovsky 		return res_get_common_dumpit(skb, cb, type);                   \
1389c5dfe0eaSLeon Romanovsky 	}                                                                      \
1390c5dfe0eaSLeon Romanovsky 	static int nldev_res_get_##name##_doit(struct sk_buff *skb,            \
1391c5dfe0eaSLeon Romanovsky 					       struct nlmsghdr *nlh,           \
1392c5dfe0eaSLeon Romanovsky 					       struct netlink_ext_ack *extack) \
1393c5dfe0eaSLeon Romanovsky 	{                                                                      \
1394c5dfe0eaSLeon Romanovsky 		return res_get_common_doit(skb, nlh, extack, type);            \
1395d12ff624SSteve Wise 	}
1396d12ff624SSteve Wise 
1397f732e713SLeon Romanovsky RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
1398f732e713SLeon Romanovsky RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
1399f732e713SLeon Romanovsky RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
1400f732e713SLeon Romanovsky RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
1401f732e713SLeon Romanovsky RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
1402c4ffee7cSMark Zhang RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
140329cf1351SSteve Wise 
14043856ec4bSSteve Wise static LIST_HEAD(link_ops);
14053856ec4bSSteve Wise static DECLARE_RWSEM(link_ops_rwsem);
14063856ec4bSSteve Wise 
14073856ec4bSSteve Wise static const struct rdma_link_ops *link_ops_get(const char *type)
14083856ec4bSSteve Wise {
14093856ec4bSSteve Wise 	const struct rdma_link_ops *ops;
14103856ec4bSSteve Wise 
14113856ec4bSSteve Wise 	list_for_each_entry(ops, &link_ops, list) {
14123856ec4bSSteve Wise 		if (!strcmp(ops->type, type))
14133856ec4bSSteve Wise 			goto out;
14143856ec4bSSteve Wise 	}
14153856ec4bSSteve Wise 	ops = NULL;
14163856ec4bSSteve Wise out:
14173856ec4bSSteve Wise 	return ops;
14183856ec4bSSteve Wise }
14193856ec4bSSteve Wise 
14203856ec4bSSteve Wise void rdma_link_register(struct rdma_link_ops *ops)
14213856ec4bSSteve Wise {
14223856ec4bSSteve Wise 	down_write(&link_ops_rwsem);
1423afc1990eSDan Carpenter 	if (WARN_ON_ONCE(link_ops_get(ops->type)))
14243856ec4bSSteve Wise 		goto out;
14253856ec4bSSteve Wise 	list_add(&ops->list, &link_ops);
14263856ec4bSSteve Wise out:
14273856ec4bSSteve Wise 	up_write(&link_ops_rwsem);
14283856ec4bSSteve Wise }
14293856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_register);
14303856ec4bSSteve Wise 
14313856ec4bSSteve Wise void rdma_link_unregister(struct rdma_link_ops *ops)
14323856ec4bSSteve Wise {
14333856ec4bSSteve Wise 	down_write(&link_ops_rwsem);
14343856ec4bSSteve Wise 	list_del(&ops->list);
14353856ec4bSSteve Wise 	up_write(&link_ops_rwsem);
14363856ec4bSSteve Wise }
14373856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_unregister);
14383856ec4bSSteve Wise 
14393856ec4bSSteve Wise static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
14403856ec4bSSteve Wise 			  struct netlink_ext_ack *extack)
14413856ec4bSSteve Wise {
14423856ec4bSSteve Wise 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
14433856ec4bSSteve Wise 	char ibdev_name[IB_DEVICE_NAME_MAX];
14443856ec4bSSteve Wise 	const struct rdma_link_ops *ops;
14453856ec4bSSteve Wise 	char ndev_name[IFNAMSIZ];
14463856ec4bSSteve Wise 	struct net_device *ndev;
14473856ec4bSSteve Wise 	char type[IFNAMSIZ];
14483856ec4bSSteve Wise 	int err;
14493856ec4bSSteve Wise 
14508cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
14513856ec4bSSteve Wise 				     nldev_policy, extack);
14523856ec4bSSteve Wise 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
14533856ec4bSSteve Wise 	    !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
14543856ec4bSSteve Wise 		return -EINVAL;
14553856ec4bSSteve Wise 
14563856ec4bSSteve Wise 	nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
14573856ec4bSSteve Wise 		    sizeof(ibdev_name));
14583856ec4bSSteve Wise 	if (strchr(ibdev_name, '%'))
14593856ec4bSSteve Wise 		return -EINVAL;
14603856ec4bSSteve Wise 
14613856ec4bSSteve Wise 	nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
14623856ec4bSSteve Wise 	nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
14633856ec4bSSteve Wise 		    sizeof(ndev_name));
14643856ec4bSSteve Wise 
14653856ec4bSSteve Wise 	ndev = dev_get_by_name(&init_net, ndev_name);
14663856ec4bSSteve Wise 	if (!ndev)
14673856ec4bSSteve Wise 		return -ENODEV;
14683856ec4bSSteve Wise 
14693856ec4bSSteve Wise 	down_read(&link_ops_rwsem);
14703856ec4bSSteve Wise 	ops = link_ops_get(type);
14713856ec4bSSteve Wise #ifdef CONFIG_MODULES
14723856ec4bSSteve Wise 	if (!ops) {
14733856ec4bSSteve Wise 		up_read(&link_ops_rwsem);
14743856ec4bSSteve Wise 		request_module("rdma-link-%s", type);
14753856ec4bSSteve Wise 		down_read(&link_ops_rwsem);
14763856ec4bSSteve Wise 		ops = link_ops_get(type);
14773856ec4bSSteve Wise 	}
14783856ec4bSSteve Wise #endif
14793856ec4bSSteve Wise 	err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
14803856ec4bSSteve Wise 	up_read(&link_ops_rwsem);
14813856ec4bSSteve Wise 	dev_put(ndev);
14823856ec4bSSteve Wise 
14833856ec4bSSteve Wise 	return err;
14843856ec4bSSteve Wise }
14853856ec4bSSteve Wise 
14863856ec4bSSteve Wise static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
14873856ec4bSSteve Wise 			  struct netlink_ext_ack *extack)
14883856ec4bSSteve Wise {
14893856ec4bSSteve Wise 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
14903856ec4bSSteve Wise 	struct ib_device *device;
14913856ec4bSSteve Wise 	u32 index;
14923856ec4bSSteve Wise 	int err;
14933856ec4bSSteve Wise 
14948cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
14953856ec4bSSteve Wise 				     nldev_policy, extack);
14963856ec4bSSteve Wise 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
14973856ec4bSSteve Wise 		return -EINVAL;
14983856ec4bSSteve Wise 
14993856ec4bSSteve Wise 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
150037eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
15013856ec4bSSteve Wise 	if (!device)
15023856ec4bSSteve Wise 		return -EINVAL;
15033856ec4bSSteve Wise 
15043856ec4bSSteve Wise 	if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
15053856ec4bSSteve Wise 		ib_device_put(device);
15063856ec4bSSteve Wise 		return -EINVAL;
15073856ec4bSSteve Wise 	}
15083856ec4bSSteve Wise 
15093856ec4bSSteve Wise 	ib_unregister_device_and_put(device);
15103856ec4bSSteve Wise 	return 0;
15113856ec4bSSteve Wise }
15123856ec4bSSteve Wise 
15130e2d00ebSJason Gunthorpe static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
15140e2d00ebSJason Gunthorpe 			     struct netlink_ext_ack *extack)
15150e2d00ebSJason Gunthorpe {
15160e2d00ebSJason Gunthorpe 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
151734d65cd8SDoug Ledford 	char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE];
15180e2d00ebSJason Gunthorpe 	struct ib_client_nl_info data = {};
15190e2d00ebSJason Gunthorpe 	struct ib_device *ibdev = NULL;
15200e2d00ebSJason Gunthorpe 	struct sk_buff *msg;
15210e2d00ebSJason Gunthorpe 	u32 index;
15220e2d00ebSJason Gunthorpe 	int err;
15230e2d00ebSJason Gunthorpe 
15240e2d00ebSJason Gunthorpe 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
15250e2d00ebSJason Gunthorpe 			  extack);
15260e2d00ebSJason Gunthorpe 	if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
15270e2d00ebSJason Gunthorpe 		return -EINVAL;
15280e2d00ebSJason Gunthorpe 
152934d65cd8SDoug Ledford 	nla_strlcpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
153034d65cd8SDoug Ledford 		    sizeof(client_name));
15310e2d00ebSJason Gunthorpe 
15320e2d00ebSJason Gunthorpe 	if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
15330e2d00ebSJason Gunthorpe 		index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
15340e2d00ebSJason Gunthorpe 		ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
15350e2d00ebSJason Gunthorpe 		if (!ibdev)
15360e2d00ebSJason Gunthorpe 			return -EINVAL;
15370e2d00ebSJason Gunthorpe 
15380e2d00ebSJason Gunthorpe 		if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
15390e2d00ebSJason Gunthorpe 			data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
15400e2d00ebSJason Gunthorpe 			if (!rdma_is_port_valid(ibdev, data.port)) {
15410e2d00ebSJason Gunthorpe 				err = -EINVAL;
15420e2d00ebSJason Gunthorpe 				goto out_put;
15430e2d00ebSJason Gunthorpe 			}
15440e2d00ebSJason Gunthorpe 		} else {
15450e2d00ebSJason Gunthorpe 			data.port = -1;
15460e2d00ebSJason Gunthorpe 		}
15470e2d00ebSJason Gunthorpe 	} else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
15480e2d00ebSJason Gunthorpe 		return -EINVAL;
15490e2d00ebSJason Gunthorpe 	}
15500e2d00ebSJason Gunthorpe 
15510e2d00ebSJason Gunthorpe 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
15520e2d00ebSJason Gunthorpe 	if (!msg) {
15530e2d00ebSJason Gunthorpe 		err = -ENOMEM;
15540e2d00ebSJason Gunthorpe 		goto out_put;
15550e2d00ebSJason Gunthorpe 	}
15560e2d00ebSJason Gunthorpe 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
15570e2d00ebSJason Gunthorpe 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
15580e2d00ebSJason Gunthorpe 					 RDMA_NLDEV_CMD_GET_CHARDEV),
15590e2d00ebSJason Gunthorpe 			0, 0);
15600e2d00ebSJason Gunthorpe 
15610e2d00ebSJason Gunthorpe 	data.nl_msg = msg;
15620e2d00ebSJason Gunthorpe 	err = ib_get_client_nl_info(ibdev, client_name, &data);
15630e2d00ebSJason Gunthorpe 	if (err)
15640e2d00ebSJason Gunthorpe 		goto out_nlmsg;
15650e2d00ebSJason Gunthorpe 
15660e2d00ebSJason Gunthorpe 	err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV,
15670e2d00ebSJason Gunthorpe 				huge_encode_dev(data.cdev->devt),
15680e2d00ebSJason Gunthorpe 				RDMA_NLDEV_ATTR_PAD);
15690e2d00ebSJason Gunthorpe 	if (err)
15700e2d00ebSJason Gunthorpe 		goto out_data;
15710e2d00ebSJason Gunthorpe 	err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi,
15720e2d00ebSJason Gunthorpe 				RDMA_NLDEV_ATTR_PAD);
15730e2d00ebSJason Gunthorpe 	if (err)
15740e2d00ebSJason Gunthorpe 		goto out_data;
15750e2d00ebSJason Gunthorpe 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME,
15760e2d00ebSJason Gunthorpe 			   dev_name(data.cdev))) {
15770e2d00ebSJason Gunthorpe 		err = -EMSGSIZE;
15780e2d00ebSJason Gunthorpe 		goto out_data;
15790e2d00ebSJason Gunthorpe 	}
15800e2d00ebSJason Gunthorpe 
15810e2d00ebSJason Gunthorpe 	nlmsg_end(msg, nlh);
15820e2d00ebSJason Gunthorpe 	put_device(data.cdev);
15830e2d00ebSJason Gunthorpe 	if (ibdev)
15840e2d00ebSJason Gunthorpe 		ib_device_put(ibdev);
15850e2d00ebSJason Gunthorpe 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
15860e2d00ebSJason Gunthorpe 
15870e2d00ebSJason Gunthorpe out_data:
15880e2d00ebSJason Gunthorpe 	put_device(data.cdev);
15890e2d00ebSJason Gunthorpe out_nlmsg:
15900e2d00ebSJason Gunthorpe 	nlmsg_free(msg);
15910e2d00ebSJason Gunthorpe out_put:
15920e2d00ebSJason Gunthorpe 	if (ibdev)
15930e2d00ebSJason Gunthorpe 		ib_device_put(ibdev);
15940e2d00ebSJason Gunthorpe 	return err;
15950e2d00ebSJason Gunthorpe }
15960e2d00ebSJason Gunthorpe 
15974d7ba8ceSParav Pandit static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
15984d7ba8ceSParav Pandit 			      struct netlink_ext_ack *extack)
1599cb7e0e13SParav Pandit {
1600cb7e0e13SParav Pandit 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
16014d7ba8ceSParav Pandit 	struct sk_buff *msg;
1602cb7e0e13SParav Pandit 	int err;
1603cb7e0e13SParav Pandit 
16044d7ba8ceSParav Pandit 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
16054d7ba8ceSParav Pandit 			  nldev_policy, extack);
1606cb7e0e13SParav Pandit 	if (err)
1607cb7e0e13SParav Pandit 		return err;
1608cb7e0e13SParav Pandit 
16094d7ba8ceSParav Pandit 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
16104d7ba8ceSParav Pandit 	if (!msg)
16114d7ba8ceSParav Pandit 		return -ENOMEM;
16124d7ba8ceSParav Pandit 
16134d7ba8ceSParav Pandit 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1614cb7e0e13SParav Pandit 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1615cb7e0e13SParav Pandit 					 RDMA_NLDEV_CMD_SYS_GET),
1616cb7e0e13SParav Pandit 			0, 0);
1617cb7e0e13SParav Pandit 
16184d7ba8ceSParav Pandit 	err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
1619cb7e0e13SParav Pandit 			 (u8)ib_devices_shared_netns);
1620cb7e0e13SParav Pandit 	if (err) {
16214d7ba8ceSParav Pandit 		nlmsg_free(msg);
1622cb7e0e13SParav Pandit 		return err;
1623cb7e0e13SParav Pandit 	}
16244d7ba8ceSParav Pandit 	nlmsg_end(msg, nlh);
16254d7ba8ceSParav Pandit 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1626cb7e0e13SParav Pandit }
1627cb7e0e13SParav Pandit 
16282b34c558SParav Pandit static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
16292b34c558SParav Pandit 				  struct netlink_ext_ack *extack)
16302b34c558SParav Pandit {
16312b34c558SParav Pandit 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
16322b34c558SParav Pandit 	u8 enable;
16332b34c558SParav Pandit 	int err;
16342b34c558SParav Pandit 
16352b34c558SParav Pandit 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
16362b34c558SParav Pandit 			  nldev_policy, extack);
16372b34c558SParav Pandit 	if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
16382b34c558SParav Pandit 		return -EINVAL;
16392b34c558SParav Pandit 
16402b34c558SParav Pandit 	enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
16412b34c558SParav Pandit 	/* Only 0 and 1 are supported */
16422b34c558SParav Pandit 	if (enable > 1)
16432b34c558SParav Pandit 		return -EINVAL;
16442b34c558SParav Pandit 
16452b34c558SParav Pandit 	err = rdma_compatdev_set(enable);
16462b34c558SParav Pandit 	return err;
16472b34c558SParav Pandit }
16482b34c558SParav Pandit 
1649b47ae6f8SMark Zhang static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1650b47ae6f8SMark Zhang 			       struct netlink_ext_ack *extack)
1651b47ae6f8SMark Zhang {
1652b389327dSMark Zhang 	u32 index, port, mode, mask = 0, qpn, cntn = 0;
1653b47ae6f8SMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1654b47ae6f8SMark Zhang 	struct ib_device *device;
1655b47ae6f8SMark Zhang 	struct sk_buff *msg;
1656b47ae6f8SMark Zhang 	int ret;
1657b47ae6f8SMark Zhang 
1658b47ae6f8SMark Zhang 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1659b47ae6f8SMark Zhang 			  nldev_policy, extack);
1660b47ae6f8SMark Zhang 	/* Currently only counter for QP is supported */
1661b47ae6f8SMark Zhang 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1662b47ae6f8SMark Zhang 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1663b47ae6f8SMark Zhang 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE])
1664b47ae6f8SMark Zhang 		return -EINVAL;
1665b47ae6f8SMark Zhang 
1666b47ae6f8SMark Zhang 	if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1667b47ae6f8SMark Zhang 		return -EINVAL;
1668b47ae6f8SMark Zhang 
1669b47ae6f8SMark Zhang 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1670b47ae6f8SMark Zhang 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1671b47ae6f8SMark Zhang 	if (!device)
1672b47ae6f8SMark Zhang 		return -EINVAL;
1673b47ae6f8SMark Zhang 
1674b47ae6f8SMark Zhang 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1675b47ae6f8SMark Zhang 	if (!rdma_is_port_valid(device, port)) {
1676b47ae6f8SMark Zhang 		ret = -EINVAL;
1677b47ae6f8SMark Zhang 		goto err;
1678b47ae6f8SMark Zhang 	}
1679b47ae6f8SMark Zhang 
1680b47ae6f8SMark Zhang 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1681b47ae6f8SMark Zhang 	if (!msg) {
1682b47ae6f8SMark Zhang 		ret = -ENOMEM;
1683b47ae6f8SMark Zhang 		goto err;
1684b47ae6f8SMark Zhang 	}
1685b47ae6f8SMark Zhang 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1686b47ae6f8SMark Zhang 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1687b47ae6f8SMark Zhang 					 RDMA_NLDEV_CMD_STAT_SET),
1688b47ae6f8SMark Zhang 			0, 0);
1689b47ae6f8SMark Zhang 
1690b47ae6f8SMark Zhang 	mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
1691b389327dSMark Zhang 	if (mode == RDMA_COUNTER_MODE_AUTO) {
1692b47ae6f8SMark Zhang 		if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
1693b389327dSMark Zhang 			mask = nla_get_u32(
1694b389327dSMark Zhang 				tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
1695b47ae6f8SMark Zhang 
1696b47ae6f8SMark Zhang 		ret = rdma_counter_set_auto_mode(device, port,
1697b47ae6f8SMark Zhang 						 mask ? true : false, mask);
1698b47ae6f8SMark Zhang 		if (ret)
1699b47ae6f8SMark Zhang 			goto err_msg;
1700b389327dSMark Zhang 	} else {
1701b389327dSMark Zhang 		qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
1702b389327dSMark Zhang 		if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
1703b389327dSMark Zhang 			cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
1704b389327dSMark Zhang 			ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
1705b389327dSMark Zhang 		} else {
1706b389327dSMark Zhang 			ret = rdma_counter_bind_qpn_alloc(device, port,
1707b389327dSMark Zhang 							  qpn, &cntn);
1708b389327dSMark Zhang 		}
1709b389327dSMark Zhang 		if (ret)
1710b47ae6f8SMark Zhang 			goto err_msg;
1711b389327dSMark Zhang 
1712b389327dSMark Zhang 		if (fill_nldev_handle(msg, device) ||
1713b389327dSMark Zhang 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1714b389327dSMark Zhang 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
1715b389327dSMark Zhang 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
1716b389327dSMark Zhang 			ret = -EMSGSIZE;
1717b389327dSMark Zhang 			goto err_fill;
1718b389327dSMark Zhang 		}
1719b47ae6f8SMark Zhang 	}
1720b47ae6f8SMark Zhang 
1721b47ae6f8SMark Zhang 	nlmsg_end(msg, nlh);
1722b47ae6f8SMark Zhang 	ib_device_put(device);
1723b47ae6f8SMark Zhang 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1724b47ae6f8SMark Zhang 
1725b389327dSMark Zhang err_fill:
1726b389327dSMark Zhang 	rdma_counter_unbind_qpn(device, port, qpn, cntn);
1727b47ae6f8SMark Zhang err_msg:
1728b47ae6f8SMark Zhang 	nlmsg_free(msg);
1729b47ae6f8SMark Zhang err:
1730b47ae6f8SMark Zhang 	ib_device_put(device);
1731b47ae6f8SMark Zhang 	return ret;
1732b47ae6f8SMark Zhang }
1733b47ae6f8SMark Zhang 
1734b389327dSMark Zhang static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1735b389327dSMark Zhang 			       struct netlink_ext_ack *extack)
1736b389327dSMark Zhang {
1737b389327dSMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1738b389327dSMark Zhang 	struct ib_device *device;
1739b389327dSMark Zhang 	struct sk_buff *msg;
1740b389327dSMark Zhang 	u32 index, port, qpn, cntn;
1741b389327dSMark Zhang 	int ret;
1742b389327dSMark Zhang 
1743b389327dSMark Zhang 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1744b389327dSMark Zhang 			  nldev_policy, extack);
1745b389327dSMark Zhang 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1746b389327dSMark Zhang 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
1747b389327dSMark Zhang 	    !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
1748b389327dSMark Zhang 	    !tb[RDMA_NLDEV_ATTR_RES_LQPN])
1749b389327dSMark Zhang 		return -EINVAL;
1750b389327dSMark Zhang 
1751b389327dSMark Zhang 	if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1752b389327dSMark Zhang 		return -EINVAL;
1753b389327dSMark Zhang 
1754b389327dSMark Zhang 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1755b389327dSMark Zhang 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1756b389327dSMark Zhang 	if (!device)
1757b389327dSMark Zhang 		return -EINVAL;
1758b389327dSMark Zhang 
1759b389327dSMark Zhang 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1760b389327dSMark Zhang 	if (!rdma_is_port_valid(device, port)) {
1761b389327dSMark Zhang 		ret = -EINVAL;
1762b389327dSMark Zhang 		goto err;
1763b389327dSMark Zhang 	}
1764b389327dSMark Zhang 
1765b389327dSMark Zhang 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1766b389327dSMark Zhang 	if (!msg) {
1767b389327dSMark Zhang 		ret = -ENOMEM;
1768b389327dSMark Zhang 		goto err;
1769b389327dSMark Zhang 	}
1770b389327dSMark Zhang 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1771b389327dSMark Zhang 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1772b389327dSMark Zhang 					 RDMA_NLDEV_CMD_STAT_SET),
1773b389327dSMark Zhang 			0, 0);
1774b389327dSMark Zhang 
1775b389327dSMark Zhang 	cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
1776b389327dSMark Zhang 	qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
1777b389327dSMark Zhang 	ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
1778b389327dSMark Zhang 	if (ret)
1779b389327dSMark Zhang 		goto err_unbind;
1780b389327dSMark Zhang 
1781b389327dSMark Zhang 	if (fill_nldev_handle(msg, device) ||
1782b389327dSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1783b389327dSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
1784b389327dSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
1785b389327dSMark Zhang 		ret = -EMSGSIZE;
1786b389327dSMark Zhang 		goto err_fill;
1787b389327dSMark Zhang 	}
1788b389327dSMark Zhang 
1789b389327dSMark Zhang 	nlmsg_end(msg, nlh);
1790b389327dSMark Zhang 	ib_device_put(device);
1791b389327dSMark Zhang 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1792b389327dSMark Zhang 
1793b389327dSMark Zhang err_fill:
1794b389327dSMark Zhang 	rdma_counter_bind_qpn(device, port, qpn, cntn);
1795b389327dSMark Zhang err_unbind:
1796b389327dSMark Zhang 	nlmsg_free(msg);
1797b389327dSMark Zhang err:
1798b389327dSMark Zhang 	ib_device_put(device);
1799b389327dSMark Zhang 	return ret;
1800b389327dSMark Zhang }
1801b389327dSMark Zhang 
1802c4ffee7cSMark Zhang static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1803c4ffee7cSMark Zhang 			       struct netlink_ext_ack *extack)
1804c4ffee7cSMark Zhang {
1805c4ffee7cSMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1806c4ffee7cSMark Zhang 	int ret;
1807c4ffee7cSMark Zhang 
1808c4ffee7cSMark Zhang 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1809c4ffee7cSMark Zhang 			  nldev_policy, extack);
1810c4ffee7cSMark Zhang 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
1811c4ffee7cSMark Zhang 		return -EINVAL;
1812c4ffee7cSMark Zhang 
1813c4ffee7cSMark Zhang 	switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
1814c4ffee7cSMark Zhang 	case RDMA_NLDEV_ATTR_RES_QP:
1815c4ffee7cSMark Zhang 		ret = nldev_res_get_counter_doit(skb, nlh, extack);
1816c4ffee7cSMark Zhang 		break;
1817c4ffee7cSMark Zhang 
1818c4ffee7cSMark Zhang 	default:
1819c4ffee7cSMark Zhang 		ret = -EINVAL;
1820c4ffee7cSMark Zhang 		break;
1821c4ffee7cSMark Zhang 	}
1822c4ffee7cSMark Zhang 
1823c4ffee7cSMark Zhang 	return ret;
1824c4ffee7cSMark Zhang }
1825c4ffee7cSMark Zhang 
1826c4ffee7cSMark Zhang static int nldev_stat_get_dumpit(struct sk_buff *skb,
1827c4ffee7cSMark Zhang 				 struct netlink_callback *cb)
1828c4ffee7cSMark Zhang {
1829c4ffee7cSMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1830c4ffee7cSMark Zhang 	int ret;
1831c4ffee7cSMark Zhang 
1832c4ffee7cSMark Zhang 	ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1833c4ffee7cSMark Zhang 			  nldev_policy, NULL);
1834c4ffee7cSMark Zhang 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
1835c4ffee7cSMark Zhang 		return -EINVAL;
1836c4ffee7cSMark Zhang 
1837c4ffee7cSMark Zhang 	switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
1838c4ffee7cSMark Zhang 	case RDMA_NLDEV_ATTR_RES_QP:
1839c4ffee7cSMark Zhang 		ret = nldev_res_get_counter_dumpit(skb, cb);
1840c4ffee7cSMark Zhang 		break;
1841c4ffee7cSMark Zhang 
1842c4ffee7cSMark Zhang 	default:
1843c4ffee7cSMark Zhang 		ret = -EINVAL;
1844c4ffee7cSMark Zhang 		break;
1845c4ffee7cSMark Zhang 	}
1846c4ffee7cSMark Zhang 
1847c4ffee7cSMark Zhang 	return ret;
1848c4ffee7cSMark Zhang }
1849c4ffee7cSMark Zhang 
1850d0e312feSLeon Romanovsky static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
1851b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_CMD_GET] = {
1852e5c9469eSLeon Romanovsky 		.doit = nldev_get_doit,
1853b4c598a6SLeon Romanovsky 		.dump = nldev_get_dumpit,
1854b4c598a6SLeon Romanovsky 	},
18550e2d00ebSJason Gunthorpe 	[RDMA_NLDEV_CMD_GET_CHARDEV] = {
18560e2d00ebSJason Gunthorpe 		.doit = nldev_get_chardev,
18570e2d00ebSJason Gunthorpe 	},
185805d940d3SLeon Romanovsky 	[RDMA_NLDEV_CMD_SET] = {
185905d940d3SLeon Romanovsky 		.doit = nldev_set_doit,
186005d940d3SLeon Romanovsky 		.flags = RDMA_NL_ADMIN_PERM,
186105d940d3SLeon Romanovsky 	},
18623856ec4bSSteve Wise 	[RDMA_NLDEV_CMD_NEWLINK] = {
18633856ec4bSSteve Wise 		.doit = nldev_newlink,
18643856ec4bSSteve Wise 		.flags = RDMA_NL_ADMIN_PERM,
18653856ec4bSSteve Wise 	},
18663856ec4bSSteve Wise 	[RDMA_NLDEV_CMD_DELLINK] = {
18673856ec4bSSteve Wise 		.doit = nldev_dellink,
18683856ec4bSSteve Wise 		.flags = RDMA_NL_ADMIN_PERM,
18693856ec4bSSteve Wise 	},
18707d02f605SLeon Romanovsky 	[RDMA_NLDEV_CMD_PORT_GET] = {
1871c3f66f7bSLeon Romanovsky 		.doit = nldev_port_get_doit,
18727d02f605SLeon Romanovsky 		.dump = nldev_port_get_dumpit,
18737d02f605SLeon Romanovsky 	},
1874bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_GET] = {
1875bf3c5a93SLeon Romanovsky 		.doit = nldev_res_get_doit,
1876bf3c5a93SLeon Romanovsky 		.dump = nldev_res_get_dumpit,
1877bf3c5a93SLeon Romanovsky 	},
1878b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_QP_GET] = {
1879c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_qp_doit,
1880b5fa635aSLeon Romanovsky 		.dump = nldev_res_get_qp_dumpit,
1881b5fa635aSLeon Romanovsky 	},
188200313983SSteve Wise 	[RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
1883c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_cm_id_doit,
188400313983SSteve Wise 		.dump = nldev_res_get_cm_id_dumpit,
188500313983SSteve Wise 	},
1886a34fc089SSteve Wise 	[RDMA_NLDEV_CMD_RES_CQ_GET] = {
1887c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_cq_doit,
1888a34fc089SSteve Wise 		.dump = nldev_res_get_cq_dumpit,
1889a34fc089SSteve Wise 	},
1890fccec5b8SSteve Wise 	[RDMA_NLDEV_CMD_RES_MR_GET] = {
1891c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_mr_doit,
1892fccec5b8SSteve Wise 		.dump = nldev_res_get_mr_dumpit,
1893fccec5b8SSteve Wise 	},
189429cf1351SSteve Wise 	[RDMA_NLDEV_CMD_RES_PD_GET] = {
1895c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_pd_doit,
189629cf1351SSteve Wise 		.dump = nldev_res_get_pd_dumpit,
189729cf1351SSteve Wise 	},
1898cb7e0e13SParav Pandit 	[RDMA_NLDEV_CMD_SYS_GET] = {
18994d7ba8ceSParav Pandit 		.doit = nldev_sys_get_doit,
1900cb7e0e13SParav Pandit 	},
19012b34c558SParav Pandit 	[RDMA_NLDEV_CMD_SYS_SET] = {
19022b34c558SParav Pandit 		.doit = nldev_set_sys_set_doit,
1903b47ae6f8SMark Zhang 	},
1904b47ae6f8SMark Zhang 	[RDMA_NLDEV_CMD_STAT_SET] = {
1905b47ae6f8SMark Zhang 		.doit = nldev_stat_set_doit,
19062b34c558SParav Pandit 		.flags = RDMA_NL_ADMIN_PERM,
19072b34c558SParav Pandit 	},
1908c4ffee7cSMark Zhang 	[RDMA_NLDEV_CMD_STAT_GET] = {
1909c4ffee7cSMark Zhang 		.doit = nldev_stat_get_doit,
1910c4ffee7cSMark Zhang 		.dump = nldev_stat_get_dumpit,
1911c4ffee7cSMark Zhang 	},
1912b389327dSMark Zhang 	[RDMA_NLDEV_CMD_STAT_DEL] = {
1913b389327dSMark Zhang 		.doit = nldev_stat_del_doit,
1914b389327dSMark Zhang 		.flags = RDMA_NL_ADMIN_PERM,
1915b389327dSMark Zhang 	},
1916b4c598a6SLeon Romanovsky };
1917b4c598a6SLeon Romanovsky 
19186c80b41aSLeon Romanovsky void __init nldev_init(void)
19196c80b41aSLeon Romanovsky {
1920b4c598a6SLeon Romanovsky 	rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
19216c80b41aSLeon Romanovsky }
19226c80b41aSLeon Romanovsky 
19236c80b41aSLeon Romanovsky void __exit nldev_exit(void)
19246c80b41aSLeon Romanovsky {
19256c80b41aSLeon Romanovsky 	rdma_nl_unregister(RDMA_NL_NLDEV);
19266c80b41aSLeon Romanovsky }
1927e3bf14bdSJason Gunthorpe 
1928e3bf14bdSJason Gunthorpe MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
1929