xref: /openbmc/linux/drivers/infiniband/core/nldev.c (revision b47ae6f8)
16c80b41aSLeon Romanovsky /*
26c80b41aSLeon Romanovsky  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
36c80b41aSLeon Romanovsky  *
46c80b41aSLeon Romanovsky  * Redistribution and use in source and binary forms, with or without
56c80b41aSLeon Romanovsky  * modification, are permitted provided that the following conditions are met:
66c80b41aSLeon Romanovsky  *
76c80b41aSLeon Romanovsky  * 1. Redistributions of source code must retain the above copyright
86c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer.
96c80b41aSLeon Romanovsky  * 2. Redistributions in binary form must reproduce the above copyright
106c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer in the
116c80b41aSLeon Romanovsky  *    documentation and/or other materials provided with the distribution.
126c80b41aSLeon Romanovsky  * 3. Neither the names of the copyright holders nor the names of its
136c80b41aSLeon Romanovsky  *    contributors may be used to endorse or promote products derived from
146c80b41aSLeon Romanovsky  *    this software without specific prior written permission.
156c80b41aSLeon Romanovsky  *
166c80b41aSLeon Romanovsky  * Alternatively, this software may be distributed under the terms of the
176c80b41aSLeon Romanovsky  * GNU General Public License ("GPL") version 2 as published by the Free
186c80b41aSLeon Romanovsky  * Software Foundation.
196c80b41aSLeon Romanovsky  *
206c80b41aSLeon Romanovsky  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
216c80b41aSLeon Romanovsky  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
226c80b41aSLeon Romanovsky  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
236c80b41aSLeon Romanovsky  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
246c80b41aSLeon Romanovsky  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
256c80b41aSLeon Romanovsky  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
266c80b41aSLeon Romanovsky  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
276c80b41aSLeon Romanovsky  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
286c80b41aSLeon Romanovsky  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
296c80b41aSLeon Romanovsky  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
306c80b41aSLeon Romanovsky  * POSSIBILITY OF SUCH DAMAGE.
316c80b41aSLeon Romanovsky  */
326c80b41aSLeon Romanovsky 
33e3bf14bdSJason Gunthorpe #include <linux/module.h>
34bf3c5a93SLeon Romanovsky #include <linux/pid.h>
35bf3c5a93SLeon Romanovsky #include <linux/pid_namespace.h>
363856ec4bSSteve Wise #include <linux/mutex.h>
37b4c598a6SLeon Romanovsky #include <net/netlink.h>
3800313983SSteve Wise #include <rdma/rdma_cm.h>
396c80b41aSLeon Romanovsky #include <rdma/rdma_netlink.h>
406c80b41aSLeon Romanovsky 
416c80b41aSLeon Romanovsky #include "core_priv.h"
4200313983SSteve Wise #include "cma_priv.h"
4341eda65cSLeon Romanovsky #include "restrack.h"
446c80b41aSLeon Romanovsky 
45696de2e9SDoug Ledford /*
46696de2e9SDoug Ledford  * Sort array elements by the netlink attribute name
47696de2e9SDoug Ledford  */
48b4c598a6SLeon Romanovsky static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
49696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV]		= { .type = NLA_U64 },
50696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV_ABI]		= { .type = NLA_U64 },
51696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV_NAME]		= { .type = NLA_NUL_STRING,
5234d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
53696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV_TYPE]		= { .type = NLA_NUL_STRING,
5434d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE },
55b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_INDEX]		= { .type = NLA_U32 },
56b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_NAME]		= { .type = NLA_NUL_STRING,
5734d65cd8SDoug Ledford 					.len = IB_DEVICE_NAME_MAX },
58696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DEV_NODE_TYPE]		= { .type = NLA_U8 },
59696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DEV_PROTOCOL]		= { .type = NLA_NUL_STRING,
6034d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
61696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER]		= { .type = NLA_NESTED },
62696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_ENTRY]		= { .type = NLA_NESTED },
63696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE]	= { .type = NLA_U8 },
64696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_STRING]		= { .type = NLA_NUL_STRING,
6534d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
66696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_S32]		= { .type = NLA_S32 },
67696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_S64]		= { .type = NLA_S64 },
68696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_U32]		= { .type = NLA_U32 },
69696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_U64]		= { .type = NLA_U64 },
708621a7e3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_FW_VERSION]		= { .type = NLA_NUL_STRING,
7134d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
7280a06dd3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_LID]			= { .type = NLA_U32 },
73696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_LINK_TYPE]		= { .type = NLA_NUL_STRING,
7434d65cd8SDoug Ledford 					.len = IFNAMSIZ },
7534840feaSLeon Romanovsky 	[RDMA_NLDEV_ATTR_LMC]			= { .type = NLA_U8 },
765b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_INDEX]		= { .type = NLA_U32 },
775b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_NAME]		= { .type = NLA_NUL_STRING,
785b2cc79dSLeon Romanovsky 					.len = IFNAMSIZ },
79696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_NODE_GUID]		= { .type = NLA_U64 },
80696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_PORT_INDEX]		= { .type = NLA_U32 },
81696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_PORT_PHYS_STATE]	= { .type = NLA_U8 },
82696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_PORT_STATE]		= { .type = NLA_U8 },
83696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CM_ID]		= { .type = NLA_NESTED },
84517b773eSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CM_IDN]		= { .type = NLA_U32 },
85696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY]	= { .type = NLA_NESTED },
86696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQ]		= { .type = NLA_NESTED },
87696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQE]		= { .type = NLA_U32 },
88696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQN]		= { .type = NLA_U32 },
89696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQ_ENTRY]		= { .type = NLA_NESTED },
90c3d02788SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CTXN]		= { .type = NLA_U32 },
91696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_DST_ADDR]		= {
92696de2e9SDoug Ledford 			.len = sizeof(struct __kernel_sockaddr_storage) },
93696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_IOVA]		= { .type = NLA_U64 },
94696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_KERN_NAME]		= { .type = NLA_NUL_STRING,
9534d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
96696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_LKEY]		= { .type = NLA_U32 },
97696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY]	= { .type = NLA_U32 },
98696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_LQPN]		= { .type = NLA_U32 },
99696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MR]		= { .type = NLA_NESTED },
100696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MRLEN]		= { .type = NLA_U64 },
101696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MRN]		= { .type = NLA_U32 },
102696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MR_ENTRY]		= { .type = NLA_NESTED },
103696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE]	= { .type = NLA_U8 },
104696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PD]		= { .type = NLA_NESTED },
105696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PDN]		= { .type = NLA_U32 },
106696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PD_ENTRY]		= { .type = NLA_NESTED },
107696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PID]		= { .type = NLA_U32 },
108696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_POLL_CTX]		= { .type = NLA_U8 },
109696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PS]		= { .type = NLA_U32 },
110696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_QP]		= { .type = NLA_NESTED },
111696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_QP_ENTRY]		= { .type = NLA_NESTED },
112696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_RKEY]		= { .type = NLA_U32 },
113696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_RQPN]		= { .type = NLA_U32 },
114696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_RQ_PSN]		= { .type = NLA_U32 },
115696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SQ_PSN]		= { .type = NLA_U32 },
116696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SRC_ADDR]		= {
117696de2e9SDoug Ledford 			.len = sizeof(struct __kernel_sockaddr_storage) },
118696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_STATE]		= { .type = NLA_U8 },
119696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY]		= { .type = NLA_NESTED },
120696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY]	= { .type = NLA_NESTED },
121696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 },
122696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
12334d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
124696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_TYPE]		= { .type = NLA_U8 },
125696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
126696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_USECNT]		= { .type = NLA_U64 },
127696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_SM_LID]		= { .type = NLA_U32 },
128696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_SUBNET_PREFIX]		= { .type = NLA_U64 },
129b47ae6f8SMark Zhang 	[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]	= { .type = NLA_U32 },
130b47ae6f8SMark Zhang 	[RDMA_NLDEV_ATTR_STAT_MODE]		= { .type = NLA_U32 },
131b47ae6f8SMark Zhang 	[RDMA_NLDEV_ATTR_STAT_RES]		= { .type = NLA_U32 },
132696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_SYS_IMAGE_GUID]	= { .type = NLA_U64 },
1338f71bb00SJason Gunthorpe 	[RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID]	= { .type = NLA_U32 },
134696de2e9SDoug Ledford 	[RDMA_NLDEV_NET_NS_FD]			= { .type = NLA_U32 },
135696de2e9SDoug Ledford 	[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]	= { .type = NLA_U8 },
136b4c598a6SLeon Romanovsky };
137b4c598a6SLeon Romanovsky 
13873937e8aSSteve Wise static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
13973937e8aSSteve Wise 				      enum rdma_nldev_print_type print_type)
14073937e8aSSteve Wise {
14173937e8aSSteve Wise 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
14273937e8aSSteve Wise 		return -EMSGSIZE;
14373937e8aSSteve Wise 	if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
14473937e8aSSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
14573937e8aSSteve Wise 		return -EMSGSIZE;
14673937e8aSSteve Wise 
14773937e8aSSteve Wise 	return 0;
14873937e8aSSteve Wise }
14973937e8aSSteve Wise 
15073937e8aSSteve Wise static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
15173937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
15273937e8aSSteve Wise 				   u32 value)
15373937e8aSSteve Wise {
15473937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
15573937e8aSSteve Wise 		return -EMSGSIZE;
15673937e8aSSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
15773937e8aSSteve Wise 		return -EMSGSIZE;
15873937e8aSSteve Wise 
15973937e8aSSteve Wise 	return 0;
16073937e8aSSteve Wise }
16173937e8aSSteve Wise 
16273937e8aSSteve Wise static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
16373937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
16473937e8aSSteve Wise 				   u64 value)
16573937e8aSSteve Wise {
16673937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
16773937e8aSSteve Wise 		return -EMSGSIZE;
16873937e8aSSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
16973937e8aSSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
17073937e8aSSteve Wise 		return -EMSGSIZE;
17173937e8aSSteve Wise 
17273937e8aSSteve Wise 	return 0;
17373937e8aSSteve Wise }
17473937e8aSSteve Wise 
17573937e8aSSteve Wise int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
17673937e8aSSteve Wise {
17773937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
17873937e8aSSteve Wise 				       value);
17973937e8aSSteve Wise }
18073937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32);
18173937e8aSSteve Wise 
18273937e8aSSteve Wise int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
18373937e8aSSteve Wise 			       u32 value)
18473937e8aSSteve Wise {
18573937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
18673937e8aSSteve Wise 				       value);
18773937e8aSSteve Wise }
18873937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
18973937e8aSSteve Wise 
19073937e8aSSteve Wise int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
19173937e8aSSteve Wise {
19273937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
19373937e8aSSteve Wise 				       value);
19473937e8aSSteve Wise }
19573937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64);
19673937e8aSSteve Wise 
19773937e8aSSteve Wise int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
19873937e8aSSteve Wise {
19973937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
20073937e8aSSteve Wise 				       value);
20173937e8aSSteve Wise }
20273937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
20373937e8aSSteve Wise 
204c2409810SLeon Romanovsky static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
205b4c598a6SLeon Romanovsky {
206b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
207b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
208896de009SJason Gunthorpe 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
209896de009SJason Gunthorpe 			   dev_name(&device->dev)))
210b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
211c2409810SLeon Romanovsky 
212c2409810SLeon Romanovsky 	return 0;
213c2409810SLeon Romanovsky }
214c2409810SLeon Romanovsky 
215c2409810SLeon Romanovsky static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
216c2409810SLeon Romanovsky {
217c2409810SLeon Romanovsky 	char fw[IB_FW_VERSION_NAME_MAX];
2189e886b39SLeon Romanovsky 	int ret = 0;
2199e886b39SLeon Romanovsky 	u8 port;
220c2409810SLeon Romanovsky 
221c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
222c2409810SLeon Romanovsky 		return -EMSGSIZE;
223c2409810SLeon Romanovsky 
224b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
225b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
226ac505253SLeon Romanovsky 
227ac505253SLeon Romanovsky 	BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
228ac505253SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
22925a0ad85SSteve Wise 			      device->attrs.device_cap_flags,
23025a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
231ac505253SLeon Romanovsky 		return -EMSGSIZE;
232ac505253SLeon Romanovsky 
2338621a7e3SLeon Romanovsky 	ib_get_device_fw_str(device, fw);
2345b2cc79dSLeon Romanovsky 	/* Device without FW has strlen(fw) = 0 */
2358621a7e3SLeon Romanovsky 	if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
2368621a7e3SLeon Romanovsky 		return -EMSGSIZE;
2378621a7e3SLeon Romanovsky 
2381aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
23925a0ad85SSteve Wise 			      be64_to_cpu(device->node_guid),
24025a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2411aaff896SLeon Romanovsky 		return -EMSGSIZE;
2421aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
24325a0ad85SSteve Wise 			      be64_to_cpu(device->attrs.sys_image_guid),
24425a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2451aaff896SLeon Romanovsky 		return -EMSGSIZE;
2461bb77b8cSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
2471bb77b8cSLeon Romanovsky 		return -EMSGSIZE;
2489e886b39SLeon Romanovsky 
2499e886b39SLeon Romanovsky 	/*
2509e886b39SLeon Romanovsky 	 * Link type is determined on first port and mlx4 device
2519e886b39SLeon Romanovsky 	 * which can potentially have two different link type for the same
2529e886b39SLeon Romanovsky 	 * IB device is considered as better to be avoided in the future,
2539e886b39SLeon Romanovsky 	 */
2549e886b39SLeon Romanovsky 	port = rdma_start_port(device);
2559e886b39SLeon Romanovsky 	if (rdma_cap_opa_mad(device, port))
2569e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
2579e886b39SLeon Romanovsky 	else if (rdma_protocol_ib(device, port))
2589e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
2599e886b39SLeon Romanovsky 	else if (rdma_protocol_iwarp(device, port))
2609e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
2619e886b39SLeon Romanovsky 	else if (rdma_protocol_roce(device, port))
2629e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
2639e886b39SLeon Romanovsky 	else if (rdma_protocol_usnic(device, port))
2649e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
2659e886b39SLeon Romanovsky 				     "usnic");
2669e886b39SLeon Romanovsky 	return ret;
267b4c598a6SLeon Romanovsky }
268b4c598a6SLeon Romanovsky 
2697d02f605SLeon Romanovsky static int fill_port_info(struct sk_buff *msg,
2705b2cc79dSLeon Romanovsky 			  struct ib_device *device, u32 port,
2715b2cc79dSLeon Romanovsky 			  const struct net *net)
2727d02f605SLeon Romanovsky {
2735b2cc79dSLeon Romanovsky 	struct net_device *netdev = NULL;
274ac505253SLeon Romanovsky 	struct ib_port_attr attr;
275ac505253SLeon Romanovsky 	int ret;
2764fa2813dSMichael Guralnik 	u64 cap_flags = 0;
277ac505253SLeon Romanovsky 
278c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
2797d02f605SLeon Romanovsky 		return -EMSGSIZE;
280c2409810SLeon Romanovsky 
2817d02f605SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
2827d02f605SLeon Romanovsky 		return -EMSGSIZE;
283ac505253SLeon Romanovsky 
284ac505253SLeon Romanovsky 	ret = ib_query_port(device, port, &attr);
285ac505253SLeon Romanovsky 	if (ret)
286ac505253SLeon Romanovsky 		return ret;
287ac505253SLeon Romanovsky 
288dd8028f1SLeon Romanovsky 	if (rdma_protocol_ib(device, port)) {
2894fa2813dSMichael Guralnik 		BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
2904fa2813dSMichael Guralnik 				sizeof(attr.port_cap_flags2)) > sizeof(u64));
2914fa2813dSMichael Guralnik 		cap_flags = attr.port_cap_flags |
2924fa2813dSMichael Guralnik 			((u64)attr.port_cap_flags2 << 32);
293ac505253SLeon Romanovsky 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
2944fa2813dSMichael Guralnik 				      cap_flags, RDMA_NLDEV_ATTR_PAD))
295ac505253SLeon Romanovsky 			return -EMSGSIZE;
296dd8028f1SLeon Romanovsky 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
29725a0ad85SSteve Wise 				      attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
29812026fbbSLeon Romanovsky 			return -EMSGSIZE;
29980a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
30080a06dd3SLeon Romanovsky 			return -EMSGSIZE;
30180a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
30280a06dd3SLeon Romanovsky 			return -EMSGSIZE;
30334840feaSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
30434840feaSLeon Romanovsky 			return -EMSGSIZE;
30580a06dd3SLeon Romanovsky 	}
3065654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
3075654e49dSLeon Romanovsky 		return -EMSGSIZE;
3085654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
3095654e49dSLeon Romanovsky 		return -EMSGSIZE;
3105b2cc79dSLeon Romanovsky 
311c2261dd7SJason Gunthorpe 	netdev = ib_device_get_netdev(device, port);
3125b2cc79dSLeon Romanovsky 	if (netdev && net_eq(dev_net(netdev), net)) {
3135b2cc79dSLeon Romanovsky 		ret = nla_put_u32(msg,
3145b2cc79dSLeon Romanovsky 				  RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
3155b2cc79dSLeon Romanovsky 		if (ret)
3165b2cc79dSLeon Romanovsky 			goto out;
3175b2cc79dSLeon Romanovsky 		ret = nla_put_string(msg,
3185b2cc79dSLeon Romanovsky 				     RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
3195b2cc79dSLeon Romanovsky 	}
3205b2cc79dSLeon Romanovsky 
3215b2cc79dSLeon Romanovsky out:
3225b2cc79dSLeon Romanovsky 	if (netdev)
3235b2cc79dSLeon Romanovsky 		dev_put(netdev);
3245b2cc79dSLeon Romanovsky 	return ret;
3257d02f605SLeon Romanovsky }
3267d02f605SLeon Romanovsky 
327bf3c5a93SLeon Romanovsky static int fill_res_info_entry(struct sk_buff *msg,
328bf3c5a93SLeon Romanovsky 			       const char *name, u64 curr)
329bf3c5a93SLeon Romanovsky {
330bf3c5a93SLeon Romanovsky 	struct nlattr *entry_attr;
331bf3c5a93SLeon Romanovsky 
332ae0be8deSMichal Kubecek 	entry_attr = nla_nest_start_noflag(msg,
333ae0be8deSMichal Kubecek 					   RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
334bf3c5a93SLeon Romanovsky 	if (!entry_attr)
335bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
336bf3c5a93SLeon Romanovsky 
337bf3c5a93SLeon Romanovsky 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
338bf3c5a93SLeon Romanovsky 		goto err;
33925a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
34025a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
341bf3c5a93SLeon Romanovsky 		goto err;
342bf3c5a93SLeon Romanovsky 
343bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, entry_attr);
344bf3c5a93SLeon Romanovsky 	return 0;
345bf3c5a93SLeon Romanovsky 
346bf3c5a93SLeon Romanovsky err:
347bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, entry_attr);
348bf3c5a93SLeon Romanovsky 	return -EMSGSIZE;
349bf3c5a93SLeon Romanovsky }
350bf3c5a93SLeon Romanovsky 
351bf3c5a93SLeon Romanovsky static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
352bf3c5a93SLeon Romanovsky {
353bf3c5a93SLeon Romanovsky 	static const char * const names[RDMA_RESTRACK_MAX] = {
354bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_PD] = "pd",
355bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_CQ] = "cq",
356bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_QP] = "qp",
35700313983SSteve Wise 		[RDMA_RESTRACK_CM_ID] = "cm_id",
358fccec5b8SSteve Wise 		[RDMA_RESTRACK_MR] = "mr",
359ffd321e4SLeon Romanovsky 		[RDMA_RESTRACK_CTX] = "ctx",
360bf3c5a93SLeon Romanovsky 	};
361bf3c5a93SLeon Romanovsky 
362bf3c5a93SLeon Romanovsky 	struct nlattr *table_attr;
363bf3c5a93SLeon Romanovsky 	int ret, i, curr;
364bf3c5a93SLeon Romanovsky 
365bf3c5a93SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
366bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
367bf3c5a93SLeon Romanovsky 
368ae0be8deSMichal Kubecek 	table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
369bf3c5a93SLeon Romanovsky 	if (!table_attr)
370bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
371bf3c5a93SLeon Romanovsky 
372bf3c5a93SLeon Romanovsky 	for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
373bf3c5a93SLeon Romanovsky 		if (!names[i])
374bf3c5a93SLeon Romanovsky 			continue;
3750ad699c0SLeon Romanovsky 		curr = rdma_restrack_count(device, i,
3760ad699c0SLeon Romanovsky 					   task_active_pid_ns(current));
377bf3c5a93SLeon Romanovsky 		ret = fill_res_info_entry(msg, names[i], curr);
378bf3c5a93SLeon Romanovsky 		if (ret)
379bf3c5a93SLeon Romanovsky 			goto err;
380bf3c5a93SLeon Romanovsky 	}
381bf3c5a93SLeon Romanovsky 
382bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, table_attr);
383bf3c5a93SLeon Romanovsky 	return 0;
384bf3c5a93SLeon Romanovsky 
385bf3c5a93SLeon Romanovsky err:
386bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, table_attr);
387bf3c5a93SLeon Romanovsky 	return ret;
388bf3c5a93SLeon Romanovsky }
389bf3c5a93SLeon Romanovsky 
39000313983SSteve Wise static int fill_res_name_pid(struct sk_buff *msg,
39100313983SSteve Wise 			     struct rdma_restrack_entry *res)
39200313983SSteve Wise {
39300313983SSteve Wise 	/*
39400313983SSteve Wise 	 * For user resources, user is should read /proc/PID/comm to get the
39500313983SSteve Wise 	 * name of the task file.
39600313983SSteve Wise 	 */
39700313983SSteve Wise 	if (rdma_is_kernel_res(res)) {
39800313983SSteve Wise 		if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
39900313983SSteve Wise 		    res->kern_name))
40000313983SSteve Wise 			return -EMSGSIZE;
40100313983SSteve Wise 	} else {
40200313983SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
40300313983SSteve Wise 		    task_pid_vnr(res->task)))
40400313983SSteve Wise 			return -EMSGSIZE;
40500313983SSteve Wise 	}
40600313983SSteve Wise 	return 0;
40700313983SSteve Wise }
40800313983SSteve Wise 
40902da3750SLeon Romanovsky static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
41002da3750SLeon Romanovsky 			   struct rdma_restrack_entry *res)
41102da3750SLeon Romanovsky {
41202da3750SLeon Romanovsky 	if (!dev->ops.fill_res_entry)
41302da3750SLeon Romanovsky 		return false;
41402da3750SLeon Romanovsky 	return dev->ops.fill_res_entry(msg, res);
41502da3750SLeon Romanovsky }
41602da3750SLeon Romanovsky 
417659067b0SLeon Romanovsky static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
418d12ff624SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
419b5fa635aSLeon Romanovsky {
420d12ff624SSteve Wise 	struct ib_qp *qp = container_of(res, struct ib_qp, res);
42102da3750SLeon Romanovsky 	struct ib_device *dev = qp->device;
422b5fa635aSLeon Romanovsky 	struct ib_qp_init_attr qp_init_attr;
423b5fa635aSLeon Romanovsky 	struct ib_qp_attr qp_attr;
424b5fa635aSLeon Romanovsky 	int ret;
425b5fa635aSLeon Romanovsky 
426b5fa635aSLeon Romanovsky 	ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
427b5fa635aSLeon Romanovsky 	if (ret)
428b5fa635aSLeon Romanovsky 		return ret;
429b5fa635aSLeon Romanovsky 
430b5fa635aSLeon Romanovsky 	if (port && port != qp_attr.port_num)
431c5dfe0eaSLeon Romanovsky 		return -EAGAIN;
432b5fa635aSLeon Romanovsky 
433b5fa635aSLeon Romanovsky 	/* In create_qp() port is not set yet */
434b5fa635aSLeon Romanovsky 	if (qp_attr.port_num &&
435b5fa635aSLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
436b5fa635aSLeon Romanovsky 		goto err;
437b5fa635aSLeon Romanovsky 
438b5fa635aSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
439b5fa635aSLeon Romanovsky 		goto err;
440b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
441b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
442b5fa635aSLeon Romanovsky 				qp_attr.dest_qp_num))
443b5fa635aSLeon Romanovsky 			goto err;
444b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
445b5fa635aSLeon Romanovsky 				qp_attr.rq_psn))
446b5fa635aSLeon Romanovsky 			goto err;
447b5fa635aSLeon Romanovsky 	}
448b5fa635aSLeon Romanovsky 
449b5fa635aSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
450b5fa635aSLeon Romanovsky 		goto err;
451b5fa635aSLeon Romanovsky 
452b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
453b5fa635aSLeon Romanovsky 	    qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
454b5fa635aSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
455b5fa635aSLeon Romanovsky 			       qp_attr.path_mig_state))
456b5fa635aSLeon Romanovsky 			goto err;
457b5fa635aSLeon Romanovsky 	}
458b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
459b5fa635aSLeon Romanovsky 		goto err;
460b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
461b5fa635aSLeon Romanovsky 		goto err;
462b5fa635aSLeon Romanovsky 
463c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
464c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
465c3d02788SLeon Romanovsky 		goto err;
466c3d02788SLeon Romanovsky 
46700313983SSteve Wise 	if (fill_res_name_pid(msg, res))
468b5fa635aSLeon Romanovsky 		goto err;
46900313983SSteve Wise 
47002da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
471da5c8507SSteve Wise 		goto err;
472da5c8507SSteve Wise 
47300313983SSteve Wise 	return 0;
47400313983SSteve Wise 
475c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
47600313983SSteve Wise }
47700313983SSteve Wise 
478659067b0SLeon Romanovsky static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
47900313983SSteve Wise 				struct rdma_restrack_entry *res, uint32_t port)
48000313983SSteve Wise {
48100313983SSteve Wise 	struct rdma_id_private *id_priv =
48200313983SSteve Wise 				container_of(res, struct rdma_id_private, res);
48302da3750SLeon Romanovsky 	struct ib_device *dev = id_priv->id.device;
48400313983SSteve Wise 	struct rdma_cm_id *cm_id = &id_priv->id;
48500313983SSteve Wise 
48600313983SSteve Wise 	if (port && port != cm_id->port_num)
48700313983SSteve Wise 		return 0;
48800313983SSteve Wise 
48900313983SSteve Wise 	if (cm_id->port_num &&
49000313983SSteve Wise 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
49100313983SSteve Wise 		goto err;
49200313983SSteve Wise 
49300313983SSteve Wise 	if (id_priv->qp_num) {
49400313983SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
49500313983SSteve Wise 			goto err;
49600313983SSteve Wise 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
497b5fa635aSLeon Romanovsky 			goto err;
498b5fa635aSLeon Romanovsky 	}
499b5fa635aSLeon Romanovsky 
50000313983SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
50100313983SSteve Wise 		goto err;
50200313983SSteve Wise 
50300313983SSteve Wise 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
50400313983SSteve Wise 		goto err;
50500313983SSteve Wise 
50600313983SSteve Wise 	if (cm_id->route.addr.src_addr.ss_family &&
50700313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
50800313983SSteve Wise 		    sizeof(cm_id->route.addr.src_addr),
50900313983SSteve Wise 		    &cm_id->route.addr.src_addr))
51000313983SSteve Wise 		goto err;
51100313983SSteve Wise 	if (cm_id->route.addr.dst_addr.ss_family &&
51200313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
51300313983SSteve Wise 		    sizeof(cm_id->route.addr.dst_addr),
51400313983SSteve Wise 		    &cm_id->route.addr.dst_addr))
51500313983SSteve Wise 		goto err;
51600313983SSteve Wise 
517517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
518517b773eSLeon Romanovsky 		goto err;
519517b773eSLeon Romanovsky 
52000313983SSteve Wise 	if (fill_res_name_pid(msg, res))
52100313983SSteve Wise 		goto err;
52200313983SSteve Wise 
52302da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
524da5c8507SSteve Wise 		goto err;
525da5c8507SSteve Wise 
526b5fa635aSLeon Romanovsky 	return 0;
527b5fa635aSLeon Romanovsky 
528c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE;
529b5fa635aSLeon Romanovsky }
530b5fa635aSLeon Romanovsky 
531659067b0SLeon Romanovsky static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
532a34fc089SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
533a34fc089SSteve Wise {
534a34fc089SSteve Wise 	struct ib_cq *cq = container_of(res, struct ib_cq, res);
53502da3750SLeon Romanovsky 	struct ib_device *dev = cq->device;
536a34fc089SSteve Wise 
537a34fc089SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
538a34fc089SSteve Wise 		goto err;
539a34fc089SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
54025a0ad85SSteve Wise 			      atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
541a34fc089SSteve Wise 		goto err;
542a34fc089SSteve Wise 
543a34fc089SSteve Wise 	/* Poll context is only valid for kernel CQs */
544a34fc089SSteve Wise 	if (rdma_is_kernel_res(res) &&
545a34fc089SSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
546a34fc089SSteve Wise 		goto err;
547a34fc089SSteve Wise 
548517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
549517b773eSLeon Romanovsky 		goto err;
550c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
551c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
552c3d02788SLeon Romanovsky 			cq->uobject->context->res.id))
553c3d02788SLeon Romanovsky 		goto err;
554517b773eSLeon Romanovsky 
555a34fc089SSteve Wise 	if (fill_res_name_pid(msg, res))
556a34fc089SSteve Wise 		goto err;
557a34fc089SSteve Wise 
55802da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
559da5c8507SSteve Wise 		goto err;
560da5c8507SSteve Wise 
561a34fc089SSteve Wise 	return 0;
562a34fc089SSteve Wise 
563c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
564a34fc089SSteve Wise }
565a34fc089SSteve Wise 
566659067b0SLeon Romanovsky static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
567fccec5b8SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
568fccec5b8SSteve Wise {
569fccec5b8SSteve Wise 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
57002da3750SLeon Romanovsky 	struct ib_device *dev = mr->pd->device;
571fccec5b8SSteve Wise 
572659067b0SLeon Romanovsky 	if (has_cap_net_admin) {
573fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
574fccec5b8SSteve Wise 			goto err;
575fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
576fccec5b8SSteve Wise 			goto err;
577fccec5b8SSteve Wise 	}
578fccec5b8SSteve Wise 
57925a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
58025a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
581fccec5b8SSteve Wise 		goto err;
582fccec5b8SSteve Wise 
583517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
584517b773eSLeon Romanovsky 		goto err;
585517b773eSLeon Romanovsky 
586c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
587c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
588c3d02788SLeon Romanovsky 		goto err;
589c3d02788SLeon Romanovsky 
590fccec5b8SSteve Wise 	if (fill_res_name_pid(msg, res))
591fccec5b8SSteve Wise 		goto err;
592fccec5b8SSteve Wise 
59302da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
594da5c8507SSteve Wise 		goto err;
595da5c8507SSteve Wise 
596fccec5b8SSteve Wise 	return 0;
597fccec5b8SSteve Wise 
598c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
599fccec5b8SSteve Wise }
600fccec5b8SSteve Wise 
601659067b0SLeon Romanovsky static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
60229cf1351SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
60329cf1351SSteve Wise {
60429cf1351SSteve Wise 	struct ib_pd *pd = container_of(res, struct ib_pd, res);
60502da3750SLeon Romanovsky 	struct ib_device *dev = pd->device;
60629cf1351SSteve Wise 
607659067b0SLeon Romanovsky 	if (has_cap_net_admin) {
60829cf1351SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
60929cf1351SSteve Wise 				pd->local_dma_lkey))
61029cf1351SSteve Wise 			goto err;
61129cf1351SSteve Wise 		if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
61229cf1351SSteve Wise 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
61329cf1351SSteve Wise 				pd->unsafe_global_rkey))
61429cf1351SSteve Wise 			goto err;
61529cf1351SSteve Wise 	}
61629cf1351SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
61725a0ad85SSteve Wise 			      atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
61829cf1351SSteve Wise 		goto err;
61929cf1351SSteve Wise 
620517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
621517b773eSLeon Romanovsky 		goto err;
622517b773eSLeon Romanovsky 
623c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
624c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
625c3d02788SLeon Romanovsky 			pd->uobject->context->res.id))
626c3d02788SLeon Romanovsky 		goto err;
627c3d02788SLeon Romanovsky 
62829cf1351SSteve Wise 	if (fill_res_name_pid(msg, res))
62929cf1351SSteve Wise 		goto err;
63029cf1351SSteve Wise 
63102da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
632da5c8507SSteve Wise 		goto err;
633da5c8507SSteve Wise 
63429cf1351SSteve Wise 	return 0;
63529cf1351SSteve Wise 
636c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
63729cf1351SSteve Wise }
63829cf1351SSteve Wise 
639e5c9469eSLeon Romanovsky static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
640e5c9469eSLeon Romanovsky 			  struct netlink_ext_ack *extack)
641e5c9469eSLeon Romanovsky {
642e5c9469eSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
643e5c9469eSLeon Romanovsky 	struct ib_device *device;
644e5c9469eSLeon Romanovsky 	struct sk_buff *msg;
645e5c9469eSLeon Romanovsky 	u32 index;
646e5c9469eSLeon Romanovsky 	int err;
647e5c9469eSLeon Romanovsky 
6488cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
649e5c9469eSLeon Romanovsky 				     nldev_policy, extack);
650e5c9469eSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
651e5c9469eSLeon Romanovsky 		return -EINVAL;
652e5c9469eSLeon Romanovsky 
653e5c9469eSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
654e5c9469eSLeon Romanovsky 
65537eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
656e5c9469eSLeon Romanovsky 	if (!device)
657e5c9469eSLeon Romanovsky 		return -EINVAL;
658e5c9469eSLeon Romanovsky 
659e5c9469eSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
660f8978bd9SLeon Romanovsky 	if (!msg) {
661f8978bd9SLeon Romanovsky 		err = -ENOMEM;
662f8978bd9SLeon Romanovsky 		goto err;
663f8978bd9SLeon Romanovsky 	}
664e5c9469eSLeon Romanovsky 
665e5c9469eSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
666e5c9469eSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
667e5c9469eSLeon Romanovsky 			0, 0);
668e5c9469eSLeon Romanovsky 
669e5c9469eSLeon Romanovsky 	err = fill_dev_info(msg, device);
670f8978bd9SLeon Romanovsky 	if (err)
671f8978bd9SLeon Romanovsky 		goto err_free;
672e5c9469eSLeon Romanovsky 
673e5c9469eSLeon Romanovsky 	nlmsg_end(msg, nlh);
674e5c9469eSLeon Romanovsky 
67501b67117SParav Pandit 	ib_device_put(device);
676e5c9469eSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
677f8978bd9SLeon Romanovsky 
678f8978bd9SLeon Romanovsky err_free:
679f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
680f8978bd9SLeon Romanovsky err:
68101b67117SParav Pandit 	ib_device_put(device);
682f8978bd9SLeon Romanovsky 	return err;
683e5c9469eSLeon Romanovsky }
684e5c9469eSLeon Romanovsky 
68505d940d3SLeon Romanovsky static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
68605d940d3SLeon Romanovsky 			  struct netlink_ext_ack *extack)
68705d940d3SLeon Romanovsky {
68805d940d3SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
68905d940d3SLeon Romanovsky 	struct ib_device *device;
69005d940d3SLeon Romanovsky 	u32 index;
69105d940d3SLeon Romanovsky 	int err;
69205d940d3SLeon Romanovsky 
6938cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
6948cb08174SJohannes Berg 				     nldev_policy, extack);
69505d940d3SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
69605d940d3SLeon Romanovsky 		return -EINVAL;
69705d940d3SLeon Romanovsky 
69805d940d3SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
69937eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
70005d940d3SLeon Romanovsky 	if (!device)
70105d940d3SLeon Romanovsky 		return -EINVAL;
70205d940d3SLeon Romanovsky 
70305d940d3SLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
70405d940d3SLeon Romanovsky 		char name[IB_DEVICE_NAME_MAX] = {};
70505d940d3SLeon Romanovsky 
70605d940d3SLeon Romanovsky 		nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
70705d940d3SLeon Romanovsky 			    IB_DEVICE_NAME_MAX);
70805d940d3SLeon Romanovsky 		err = ib_device_rename(device, name);
7092e5b8a01SParav Pandit 		goto done;
71005d940d3SLeon Romanovsky 	}
71105d940d3SLeon Romanovsky 
7122e5b8a01SParav Pandit 	if (tb[RDMA_NLDEV_NET_NS_FD]) {
7132e5b8a01SParav Pandit 		u32 ns_fd;
7142e5b8a01SParav Pandit 
7152e5b8a01SParav Pandit 		ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
7162e5b8a01SParav Pandit 		err = ib_device_set_netns_put(skb, device, ns_fd);
7172e5b8a01SParav Pandit 		goto put_done;
7182e5b8a01SParav Pandit 	}
7192e5b8a01SParav Pandit 
7202e5b8a01SParav Pandit done:
72101b67117SParav Pandit 	ib_device_put(device);
7222e5b8a01SParav Pandit put_done:
72305d940d3SLeon Romanovsky 	return err;
72405d940d3SLeon Romanovsky }
72505d940d3SLeon Romanovsky 
726b4c598a6SLeon Romanovsky static int _nldev_get_dumpit(struct ib_device *device,
727b4c598a6SLeon Romanovsky 			     struct sk_buff *skb,
728b4c598a6SLeon Romanovsky 			     struct netlink_callback *cb,
729b4c598a6SLeon Romanovsky 			     unsigned int idx)
730b4c598a6SLeon Romanovsky {
731b4c598a6SLeon Romanovsky 	int start = cb->args[0];
732b4c598a6SLeon Romanovsky 	struct nlmsghdr *nlh;
733b4c598a6SLeon Romanovsky 
734b4c598a6SLeon Romanovsky 	if (idx < start)
735b4c598a6SLeon Romanovsky 		return 0;
736b4c598a6SLeon Romanovsky 
737b4c598a6SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
738b4c598a6SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
739b4c598a6SLeon Romanovsky 			0, NLM_F_MULTI);
740b4c598a6SLeon Romanovsky 
741b4c598a6SLeon Romanovsky 	if (fill_dev_info(skb, device)) {
742b4c598a6SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
743b4c598a6SLeon Romanovsky 		goto out;
744b4c598a6SLeon Romanovsky 	}
745b4c598a6SLeon Romanovsky 
746b4c598a6SLeon Romanovsky 	nlmsg_end(skb, nlh);
747b4c598a6SLeon Romanovsky 
748b4c598a6SLeon Romanovsky 	idx++;
749b4c598a6SLeon Romanovsky 
750b4c598a6SLeon Romanovsky out:	cb->args[0] = idx;
751b4c598a6SLeon Romanovsky 	return skb->len;
752b4c598a6SLeon Romanovsky }
753b4c598a6SLeon Romanovsky 
754b4c598a6SLeon Romanovsky static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
755b4c598a6SLeon Romanovsky {
756b4c598a6SLeon Romanovsky 	/*
757b4c598a6SLeon Romanovsky 	 * There is no need to take lock, because
75837eeab55SParav Pandit 	 * we are relying on ib_core's locking.
759b4c598a6SLeon Romanovsky 	 */
760b4c598a6SLeon Romanovsky 	return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
761b4c598a6SLeon Romanovsky }
762b4c598a6SLeon Romanovsky 
763c3f66f7bSLeon Romanovsky static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
764c3f66f7bSLeon Romanovsky 			       struct netlink_ext_ack *extack)
765c3f66f7bSLeon Romanovsky {
766c3f66f7bSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
767c3f66f7bSLeon Romanovsky 	struct ib_device *device;
768c3f66f7bSLeon Romanovsky 	struct sk_buff *msg;
769c3f66f7bSLeon Romanovsky 	u32 index;
770c3f66f7bSLeon Romanovsky 	u32 port;
771c3f66f7bSLeon Romanovsky 	int err;
772c3f66f7bSLeon Romanovsky 
7738cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
774c3f66f7bSLeon Romanovsky 				     nldev_policy, extack);
775287683d0SLeon Romanovsky 	if (err ||
776287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
777287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
778c3f66f7bSLeon Romanovsky 		return -EINVAL;
779c3f66f7bSLeon Romanovsky 
780c3f66f7bSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
78137eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
782c3f66f7bSLeon Romanovsky 	if (!device)
783c3f66f7bSLeon Romanovsky 		return -EINVAL;
784c3f66f7bSLeon Romanovsky 
785c3f66f7bSLeon Romanovsky 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
786f8978bd9SLeon Romanovsky 	if (!rdma_is_port_valid(device, port)) {
787f8978bd9SLeon Romanovsky 		err = -EINVAL;
788f8978bd9SLeon Romanovsky 		goto err;
789f8978bd9SLeon Romanovsky 	}
790c3f66f7bSLeon Romanovsky 
791c3f66f7bSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
792f8978bd9SLeon Romanovsky 	if (!msg) {
793f8978bd9SLeon Romanovsky 		err = -ENOMEM;
794f8978bd9SLeon Romanovsky 		goto err;
795f8978bd9SLeon Romanovsky 	}
796c3f66f7bSLeon Romanovsky 
797c3f66f7bSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
798c3f66f7bSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
799c3f66f7bSLeon Romanovsky 			0, 0);
800c3f66f7bSLeon Romanovsky 
8015b2cc79dSLeon Romanovsky 	err = fill_port_info(msg, device, port, sock_net(skb->sk));
802f8978bd9SLeon Romanovsky 	if (err)
803f8978bd9SLeon Romanovsky 		goto err_free;
804c3f66f7bSLeon Romanovsky 
805c3f66f7bSLeon Romanovsky 	nlmsg_end(msg, nlh);
80601b67117SParav Pandit 	ib_device_put(device);
807c3f66f7bSLeon Romanovsky 
808c3f66f7bSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
809f8978bd9SLeon Romanovsky 
810f8978bd9SLeon Romanovsky err_free:
811f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
812f8978bd9SLeon Romanovsky err:
81301b67117SParav Pandit 	ib_device_put(device);
814f8978bd9SLeon Romanovsky 	return err;
815c3f66f7bSLeon Romanovsky }
816c3f66f7bSLeon Romanovsky 
8177d02f605SLeon Romanovsky static int nldev_port_get_dumpit(struct sk_buff *skb,
8187d02f605SLeon Romanovsky 				 struct netlink_callback *cb)
8197d02f605SLeon Romanovsky {
8207d02f605SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
8217d02f605SLeon Romanovsky 	struct ib_device *device;
8227d02f605SLeon Romanovsky 	int start = cb->args[0];
8237d02f605SLeon Romanovsky 	struct nlmsghdr *nlh;
8247d02f605SLeon Romanovsky 	u32 idx = 0;
8257d02f605SLeon Romanovsky 	u32 ifindex;
8267d02f605SLeon Romanovsky 	int err;
827ea1075edSJason Gunthorpe 	unsigned int p;
8287d02f605SLeon Romanovsky 
8298cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
8307d02f605SLeon Romanovsky 				     nldev_policy, NULL);
8317d02f605SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
8327d02f605SLeon Romanovsky 		return -EINVAL;
8337d02f605SLeon Romanovsky 
8347d02f605SLeon Romanovsky 	ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
83537eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
8367d02f605SLeon Romanovsky 	if (!device)
8377d02f605SLeon Romanovsky 		return -EINVAL;
8387d02f605SLeon Romanovsky 
839ea1075edSJason Gunthorpe 	rdma_for_each_port (device, p) {
8407d02f605SLeon Romanovsky 		/*
8417d02f605SLeon Romanovsky 		 * The dumpit function returns all information from specific
8427d02f605SLeon Romanovsky 		 * index. This specific index is taken from the netlink
8437d02f605SLeon Romanovsky 		 * messages request sent by user and it is available
8447d02f605SLeon Romanovsky 		 * in cb->args[0].
8457d02f605SLeon Romanovsky 		 *
8467d02f605SLeon Romanovsky 		 * Usually, the user doesn't fill this field and it causes
8477d02f605SLeon Romanovsky 		 * to return everything.
8487d02f605SLeon Romanovsky 		 *
8497d02f605SLeon Romanovsky 		 */
8507d02f605SLeon Romanovsky 		if (idx < start) {
8517d02f605SLeon Romanovsky 			idx++;
8527d02f605SLeon Romanovsky 			continue;
8537d02f605SLeon Romanovsky 		}
8547d02f605SLeon Romanovsky 
8557d02f605SLeon Romanovsky 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
8567d02f605SLeon Romanovsky 				cb->nlh->nlmsg_seq,
8577d02f605SLeon Romanovsky 				RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
8587d02f605SLeon Romanovsky 						 RDMA_NLDEV_CMD_PORT_GET),
8597d02f605SLeon Romanovsky 				0, NLM_F_MULTI);
8607d02f605SLeon Romanovsky 
8615b2cc79dSLeon Romanovsky 		if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
8627d02f605SLeon Romanovsky 			nlmsg_cancel(skb, nlh);
8637d02f605SLeon Romanovsky 			goto out;
8647d02f605SLeon Romanovsky 		}
8657d02f605SLeon Romanovsky 		idx++;
8667d02f605SLeon Romanovsky 		nlmsg_end(skb, nlh);
8677d02f605SLeon Romanovsky 	}
8687d02f605SLeon Romanovsky 
869f8978bd9SLeon Romanovsky out:
87001b67117SParav Pandit 	ib_device_put(device);
871f8978bd9SLeon Romanovsky 	cb->args[0] = idx;
8727d02f605SLeon Romanovsky 	return skb->len;
8737d02f605SLeon Romanovsky }
8747d02f605SLeon Romanovsky 
875bf3c5a93SLeon Romanovsky static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
876bf3c5a93SLeon Romanovsky 			      struct netlink_ext_ack *extack)
877bf3c5a93SLeon Romanovsky {
878bf3c5a93SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
879bf3c5a93SLeon Romanovsky 	struct ib_device *device;
880bf3c5a93SLeon Romanovsky 	struct sk_buff *msg;
881bf3c5a93SLeon Romanovsky 	u32 index;
882bf3c5a93SLeon Romanovsky 	int ret;
883bf3c5a93SLeon Romanovsky 
8848cb08174SJohannes Berg 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
885bf3c5a93SLeon Romanovsky 				     nldev_policy, extack);
886bf3c5a93SLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
887bf3c5a93SLeon Romanovsky 		return -EINVAL;
888bf3c5a93SLeon Romanovsky 
889bf3c5a93SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
89037eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
891bf3c5a93SLeon Romanovsky 	if (!device)
892bf3c5a93SLeon Romanovsky 		return -EINVAL;
893bf3c5a93SLeon Romanovsky 
894bf3c5a93SLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
895f34727a1SDan Carpenter 	if (!msg) {
896f34727a1SDan Carpenter 		ret = -ENOMEM;
897bf3c5a93SLeon Romanovsky 		goto err;
898f34727a1SDan Carpenter 	}
899bf3c5a93SLeon Romanovsky 
900bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
901bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
902bf3c5a93SLeon Romanovsky 			0, 0);
903bf3c5a93SLeon Romanovsky 
904bf3c5a93SLeon Romanovsky 	ret = fill_res_info(msg, device);
905bf3c5a93SLeon Romanovsky 	if (ret)
906bf3c5a93SLeon Romanovsky 		goto err_free;
907bf3c5a93SLeon Romanovsky 
908bf3c5a93SLeon Romanovsky 	nlmsg_end(msg, nlh);
90901b67117SParav Pandit 	ib_device_put(device);
910bf3c5a93SLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
911bf3c5a93SLeon Romanovsky 
912bf3c5a93SLeon Romanovsky err_free:
913bf3c5a93SLeon Romanovsky 	nlmsg_free(msg);
914bf3c5a93SLeon Romanovsky err:
91501b67117SParav Pandit 	ib_device_put(device);
916bf3c5a93SLeon Romanovsky 	return ret;
917bf3c5a93SLeon Romanovsky }
918bf3c5a93SLeon Romanovsky 
919bf3c5a93SLeon Romanovsky static int _nldev_res_get_dumpit(struct ib_device *device,
920bf3c5a93SLeon Romanovsky 				 struct sk_buff *skb,
921bf3c5a93SLeon Romanovsky 				 struct netlink_callback *cb,
922bf3c5a93SLeon Romanovsky 				 unsigned int idx)
923bf3c5a93SLeon Romanovsky {
924bf3c5a93SLeon Romanovsky 	int start = cb->args[0];
925bf3c5a93SLeon Romanovsky 	struct nlmsghdr *nlh;
926bf3c5a93SLeon Romanovsky 
927bf3c5a93SLeon Romanovsky 	if (idx < start)
928bf3c5a93SLeon Romanovsky 		return 0;
929bf3c5a93SLeon Romanovsky 
930bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
931bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
932bf3c5a93SLeon Romanovsky 			0, NLM_F_MULTI);
933bf3c5a93SLeon Romanovsky 
934bf3c5a93SLeon Romanovsky 	if (fill_res_info(skb, device)) {
935bf3c5a93SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
936bf3c5a93SLeon Romanovsky 		goto out;
937bf3c5a93SLeon Romanovsky 	}
938bf3c5a93SLeon Romanovsky 	nlmsg_end(skb, nlh);
939bf3c5a93SLeon Romanovsky 
940bf3c5a93SLeon Romanovsky 	idx++;
941bf3c5a93SLeon Romanovsky 
942bf3c5a93SLeon Romanovsky out:
943bf3c5a93SLeon Romanovsky 	cb->args[0] = idx;
944bf3c5a93SLeon Romanovsky 	return skb->len;
945bf3c5a93SLeon Romanovsky }
946bf3c5a93SLeon Romanovsky 
947bf3c5a93SLeon Romanovsky static int nldev_res_get_dumpit(struct sk_buff *skb,
948bf3c5a93SLeon Romanovsky 				struct netlink_callback *cb)
949bf3c5a93SLeon Romanovsky {
950bf3c5a93SLeon Romanovsky 	return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
951bf3c5a93SLeon Romanovsky }
952bf3c5a93SLeon Romanovsky 
953d12ff624SSteve Wise struct nldev_fill_res_entry {
954659067b0SLeon Romanovsky 	int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin,
955d12ff624SSteve Wise 			     struct rdma_restrack_entry *res, u32 port);
956d12ff624SSteve Wise 	enum rdma_nldev_attr nldev_attr;
957d12ff624SSteve Wise 	enum rdma_nldev_command nldev_cmd;
958c5dfe0eaSLeon Romanovsky 	u8 flags;
959c5dfe0eaSLeon Romanovsky 	u32 entry;
960c5dfe0eaSLeon Romanovsky 	u32 id;
961c5dfe0eaSLeon Romanovsky };
962c5dfe0eaSLeon Romanovsky 
963c5dfe0eaSLeon Romanovsky enum nldev_res_flags {
964c5dfe0eaSLeon Romanovsky 	NLDEV_PER_DEV = 1 << 0,
965d12ff624SSteve Wise };
966d12ff624SSteve Wise 
967d12ff624SSteve Wise static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
968d12ff624SSteve Wise 	[RDMA_RESTRACK_QP] = {
969d12ff624SSteve Wise 		.fill_res_func = fill_res_qp_entry,
970d12ff624SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
971d12ff624SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
972c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
9731b8b7788SLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_LQPN,
974d12ff624SSteve Wise 	},
97500313983SSteve Wise 	[RDMA_RESTRACK_CM_ID] = {
97600313983SSteve Wise 		.fill_res_func = fill_res_cm_id_entry,
97700313983SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
97800313983SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
979c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
980517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_CM_IDN,
98100313983SSteve Wise 	},
982a34fc089SSteve Wise 	[RDMA_RESTRACK_CQ] = {
983a34fc089SSteve Wise 		.fill_res_func = fill_res_cq_entry,
984a34fc089SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
985a34fc089SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
986c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
987c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
988517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_CQN,
989a34fc089SSteve Wise 	},
990fccec5b8SSteve Wise 	[RDMA_RESTRACK_MR] = {
991fccec5b8SSteve Wise 		.fill_res_func = fill_res_mr_entry,
992fccec5b8SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
993fccec5b8SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
994c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
995c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
996517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_MRN,
997fccec5b8SSteve Wise 	},
99829cf1351SSteve Wise 	[RDMA_RESTRACK_PD] = {
99929cf1351SSteve Wise 		.fill_res_func = fill_res_pd_entry,
100029cf1351SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
100129cf1351SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
1002c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
1003c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
1004517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_PDN,
100529cf1351SSteve Wise 	},
1006d12ff624SSteve Wise };
1007d12ff624SSteve Wise 
1008c5dfe0eaSLeon Romanovsky static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1009c5dfe0eaSLeon Romanovsky 			       struct netlink_ext_ack *extack,
1010c5dfe0eaSLeon Romanovsky 			       enum rdma_restrack_type res_type)
1011c5dfe0eaSLeon Romanovsky {
1012c5dfe0eaSLeon Romanovsky 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1013c5dfe0eaSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1014c5dfe0eaSLeon Romanovsky 	struct rdma_restrack_entry *res;
1015c5dfe0eaSLeon Romanovsky 	struct ib_device *device;
1016c5dfe0eaSLeon Romanovsky 	u32 index, id, port = 0;
1017c5dfe0eaSLeon Romanovsky 	bool has_cap_net_admin;
1018c5dfe0eaSLeon Romanovsky 	struct sk_buff *msg;
1019c5dfe0eaSLeon Romanovsky 	int ret;
1020c5dfe0eaSLeon Romanovsky 
10218cb08174SJohannes Berg 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1022c5dfe0eaSLeon Romanovsky 				     nldev_policy, extack);
1023c5dfe0eaSLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
1024c5dfe0eaSLeon Romanovsky 		return -EINVAL;
1025c5dfe0eaSLeon Romanovsky 
1026c5dfe0eaSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
102737eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1028c5dfe0eaSLeon Romanovsky 	if (!device)
1029c5dfe0eaSLeon Romanovsky 		return -EINVAL;
1030c5dfe0eaSLeon Romanovsky 
1031c5dfe0eaSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1032c5dfe0eaSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1033c5dfe0eaSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
1034c5dfe0eaSLeon Romanovsky 			ret = -EINVAL;
1035c5dfe0eaSLeon Romanovsky 			goto err;
1036c5dfe0eaSLeon Romanovsky 		}
1037c5dfe0eaSLeon Romanovsky 	}
1038c5dfe0eaSLeon Romanovsky 
1039c5dfe0eaSLeon Romanovsky 	if ((port && fe->flags & NLDEV_PER_DEV) ||
1040c5dfe0eaSLeon Romanovsky 	    (!port && ~fe->flags & NLDEV_PER_DEV)) {
1041c5dfe0eaSLeon Romanovsky 		ret = -EINVAL;
1042c5dfe0eaSLeon Romanovsky 		goto err;
1043c5dfe0eaSLeon Romanovsky 	}
1044c5dfe0eaSLeon Romanovsky 
1045c5dfe0eaSLeon Romanovsky 	id = nla_get_u32(tb[fe->id]);
1046c5dfe0eaSLeon Romanovsky 	res = rdma_restrack_get_byid(device, res_type, id);
1047c5dfe0eaSLeon Romanovsky 	if (IS_ERR(res)) {
1048c5dfe0eaSLeon Romanovsky 		ret = PTR_ERR(res);
1049c5dfe0eaSLeon Romanovsky 		goto err;
1050c5dfe0eaSLeon Romanovsky 	}
1051c5dfe0eaSLeon Romanovsky 
10526a6c306aSMark Zhang 	if (!rdma_is_visible_in_pid_ns(res)) {
1053c5dfe0eaSLeon Romanovsky 		ret = -ENOENT;
1054c5dfe0eaSLeon Romanovsky 		goto err_get;
1055c5dfe0eaSLeon Romanovsky 	}
1056c5dfe0eaSLeon Romanovsky 
1057c5dfe0eaSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1058c5dfe0eaSLeon Romanovsky 	if (!msg) {
1059c5dfe0eaSLeon Romanovsky 		ret = -ENOMEM;
1060c5dfe0eaSLeon Romanovsky 		goto err;
1061c5dfe0eaSLeon Romanovsky 	}
1062c5dfe0eaSLeon Romanovsky 
1063c5dfe0eaSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1064c5dfe0eaSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1065c5dfe0eaSLeon Romanovsky 			0, 0);
1066c5dfe0eaSLeon Romanovsky 
1067c5dfe0eaSLeon Romanovsky 	if (fill_nldev_handle(msg, device)) {
1068c5dfe0eaSLeon Romanovsky 		ret = -EMSGSIZE;
1069c5dfe0eaSLeon Romanovsky 		goto err_free;
1070c5dfe0eaSLeon Romanovsky 	}
1071c5dfe0eaSLeon Romanovsky 
1072c5dfe0eaSLeon Romanovsky 	has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1073c5dfe0eaSLeon Romanovsky 	ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
1074c5dfe0eaSLeon Romanovsky 	rdma_restrack_put(res);
1075c5dfe0eaSLeon Romanovsky 	if (ret)
1076c5dfe0eaSLeon Romanovsky 		goto err_free;
1077c5dfe0eaSLeon Romanovsky 
1078c5dfe0eaSLeon Romanovsky 	nlmsg_end(msg, nlh);
1079c5dfe0eaSLeon Romanovsky 	ib_device_put(device);
1080c5dfe0eaSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1081c5dfe0eaSLeon Romanovsky 
1082c5dfe0eaSLeon Romanovsky err_free:
1083c5dfe0eaSLeon Romanovsky 	nlmsg_free(msg);
1084c5dfe0eaSLeon Romanovsky err_get:
1085c5dfe0eaSLeon Romanovsky 	rdma_restrack_put(res);
1086c5dfe0eaSLeon Romanovsky err:
1087c5dfe0eaSLeon Romanovsky 	ib_device_put(device);
1088c5dfe0eaSLeon Romanovsky 	return ret;
1089c5dfe0eaSLeon Romanovsky }
1090c5dfe0eaSLeon Romanovsky 
1091d12ff624SSteve Wise static int res_get_common_dumpit(struct sk_buff *skb,
1092d12ff624SSteve Wise 				 struct netlink_callback *cb,
1093d12ff624SSteve Wise 				 enum rdma_restrack_type res_type)
1094b5fa635aSLeon Romanovsky {
1095d12ff624SSteve Wise 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1096b5fa635aSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1097b5fa635aSLeon Romanovsky 	struct rdma_restrack_entry *res;
10987c77c6a9SLeon Romanovsky 	struct rdma_restrack_root *rt;
1099b5fa635aSLeon Romanovsky 	int err, ret = 0, idx = 0;
1100b5fa635aSLeon Romanovsky 	struct nlattr *table_attr;
1101c5dfe0eaSLeon Romanovsky 	struct nlattr *entry_attr;
1102b5fa635aSLeon Romanovsky 	struct ib_device *device;
1103b5fa635aSLeon Romanovsky 	int start = cb->args[0];
1104659067b0SLeon Romanovsky 	bool has_cap_net_admin;
1105b5fa635aSLeon Romanovsky 	struct nlmsghdr *nlh;
1106fd47c2f9SLeon Romanovsky 	unsigned long id;
1107b5fa635aSLeon Romanovsky 	u32 index, port = 0;
1108d12ff624SSteve Wise 	bool filled = false;
1109b5fa635aSLeon Romanovsky 
11108cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1111b5fa635aSLeon Romanovsky 				     nldev_policy, NULL);
1112b5fa635aSLeon Romanovsky 	/*
1113d12ff624SSteve Wise 	 * Right now, we are expecting the device index to get res information,
1114b5fa635aSLeon Romanovsky 	 * but it is possible to extend this code to return all devices in
1115b5fa635aSLeon Romanovsky 	 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
1116b5fa635aSLeon Romanovsky 	 * if it doesn't exist, we will iterate over all devices.
1117b5fa635aSLeon Romanovsky 	 *
1118b5fa635aSLeon Romanovsky 	 * But it is not needed for now.
1119b5fa635aSLeon Romanovsky 	 */
1120b5fa635aSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1121b5fa635aSLeon Romanovsky 		return -EINVAL;
1122b5fa635aSLeon Romanovsky 
1123b5fa635aSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
112437eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1125b5fa635aSLeon Romanovsky 	if (!device)
1126b5fa635aSLeon Romanovsky 		return -EINVAL;
1127b5fa635aSLeon Romanovsky 
1128b5fa635aSLeon Romanovsky 	/*
1129b5fa635aSLeon Romanovsky 	 * If no PORT_INDEX is supplied, we will return all QPs from that device
1130b5fa635aSLeon Romanovsky 	 */
1131b5fa635aSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1132b5fa635aSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1133b5fa635aSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
1134b5fa635aSLeon Romanovsky 			ret = -EINVAL;
1135b5fa635aSLeon Romanovsky 			goto err_index;
1136b5fa635aSLeon Romanovsky 		}
1137b5fa635aSLeon Romanovsky 	}
1138b5fa635aSLeon Romanovsky 
1139b5fa635aSLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1140d12ff624SSteve Wise 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1141b5fa635aSLeon Romanovsky 			0, NLM_F_MULTI);
1142b5fa635aSLeon Romanovsky 
1143b5fa635aSLeon Romanovsky 	if (fill_nldev_handle(skb, device)) {
1144b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
1145b5fa635aSLeon Romanovsky 		goto err;
1146b5fa635aSLeon Romanovsky 	}
1147b5fa635aSLeon Romanovsky 
1148ae0be8deSMichal Kubecek 	table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
1149b5fa635aSLeon Romanovsky 	if (!table_attr) {
1150b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
1151b5fa635aSLeon Romanovsky 		goto err;
1152b5fa635aSLeon Romanovsky 	}
1153b5fa635aSLeon Romanovsky 
1154659067b0SLeon Romanovsky 	has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
1155659067b0SLeon Romanovsky 
11567c77c6a9SLeon Romanovsky 	rt = &device->res[res_type];
11577c77c6a9SLeon Romanovsky 	xa_lock(&rt->xa);
1158fd47c2f9SLeon Romanovsky 	/*
1159fd47c2f9SLeon Romanovsky 	 * FIXME: if the skip ahead is something common this loop should
1160fd47c2f9SLeon Romanovsky 	 * use xas_for_each & xas_pause to optimize, we can have a lot of
1161fd47c2f9SLeon Romanovsky 	 * objects.
1162fd47c2f9SLeon Romanovsky 	 */
11637c77c6a9SLeon Romanovsky 	xa_for_each(&rt->xa, id, res) {
11646a6c306aSMark Zhang 		if (!rdma_is_visible_in_pid_ns(res))
1165f2a0e45fSLeon Romanovsky 			continue;
1166b5fa635aSLeon Romanovsky 
1167f2a0e45fSLeon Romanovsky 		if (idx < start || !rdma_restrack_get(res))
1168b5fa635aSLeon Romanovsky 			goto next;
1169b5fa635aSLeon Romanovsky 
11707c77c6a9SLeon Romanovsky 		xa_unlock(&rt->xa);
11717c77c6a9SLeon Romanovsky 
1172d12ff624SSteve Wise 		filled = true;
1173b5fa635aSLeon Romanovsky 
1174ae0be8deSMichal Kubecek 		entry_attr = nla_nest_start_noflag(skb, fe->entry);
1175c5dfe0eaSLeon Romanovsky 		if (!entry_attr) {
1176c5dfe0eaSLeon Romanovsky 			ret = -EMSGSIZE;
1177c5dfe0eaSLeon Romanovsky 			rdma_restrack_put(res);
11787c77c6a9SLeon Romanovsky 			goto msg_full;
1179c5dfe0eaSLeon Romanovsky 		}
1180c5dfe0eaSLeon Romanovsky 
1181659067b0SLeon Romanovsky 		ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
1182b5fa635aSLeon Romanovsky 		rdma_restrack_put(res);
1183b5fa635aSLeon Romanovsky 
11847c77c6a9SLeon Romanovsky 		if (ret) {
1185c5dfe0eaSLeon Romanovsky 			nla_nest_cancel(skb, entry_attr);
1186b5fa635aSLeon Romanovsky 			if (ret == -EMSGSIZE)
11877c77c6a9SLeon Romanovsky 				goto msg_full;
1188c5dfe0eaSLeon Romanovsky 			if (ret == -EAGAIN)
11897c77c6a9SLeon Romanovsky 				goto again;
1190b5fa635aSLeon Romanovsky 			goto res_err;
11917c77c6a9SLeon Romanovsky 		}
1192c5dfe0eaSLeon Romanovsky 		nla_nest_end(skb, entry_attr);
11937c77c6a9SLeon Romanovsky again:		xa_lock(&rt->xa);
1194b5fa635aSLeon Romanovsky next:		idx++;
1195b5fa635aSLeon Romanovsky 	}
11967c77c6a9SLeon Romanovsky 	xa_unlock(&rt->xa);
1197b5fa635aSLeon Romanovsky 
11987c77c6a9SLeon Romanovsky msg_full:
1199b5fa635aSLeon Romanovsky 	nla_nest_end(skb, table_attr);
1200b5fa635aSLeon Romanovsky 	nlmsg_end(skb, nlh);
1201b5fa635aSLeon Romanovsky 	cb->args[0] = idx;
1202b5fa635aSLeon Romanovsky 
1203b5fa635aSLeon Romanovsky 	/*
1204d12ff624SSteve Wise 	 * No more entries to fill, cancel the message and
1205b5fa635aSLeon Romanovsky 	 * return 0 to mark end of dumpit.
1206b5fa635aSLeon Romanovsky 	 */
1207d12ff624SSteve Wise 	if (!filled)
1208b5fa635aSLeon Romanovsky 		goto err;
1209b5fa635aSLeon Romanovsky 
121001b67117SParav Pandit 	ib_device_put(device);
1211b5fa635aSLeon Romanovsky 	return skb->len;
1212b5fa635aSLeon Romanovsky 
1213b5fa635aSLeon Romanovsky res_err:
1214b5fa635aSLeon Romanovsky 	nla_nest_cancel(skb, table_attr);
1215b5fa635aSLeon Romanovsky 
1216b5fa635aSLeon Romanovsky err:
1217b5fa635aSLeon Romanovsky 	nlmsg_cancel(skb, nlh);
1218b5fa635aSLeon Romanovsky 
1219b5fa635aSLeon Romanovsky err_index:
122001b67117SParav Pandit 	ib_device_put(device);
1221b5fa635aSLeon Romanovsky 	return ret;
1222b5fa635aSLeon Romanovsky }
1223b5fa635aSLeon Romanovsky 
1224f732e713SLeon Romanovsky #define RES_GET_FUNCS(name, type)                                              \
1225f732e713SLeon Romanovsky 	static int nldev_res_get_##name##_dumpit(struct sk_buff *skb,          \
1226f732e713SLeon Romanovsky 						 struct netlink_callback *cb)  \
1227f732e713SLeon Romanovsky 	{                                                                      \
1228f732e713SLeon Romanovsky 		return res_get_common_dumpit(skb, cb, type);                   \
1229c5dfe0eaSLeon Romanovsky 	}                                                                      \
1230c5dfe0eaSLeon Romanovsky 	static int nldev_res_get_##name##_doit(struct sk_buff *skb,            \
1231c5dfe0eaSLeon Romanovsky 					       struct nlmsghdr *nlh,           \
1232c5dfe0eaSLeon Romanovsky 					       struct netlink_ext_ack *extack) \
1233c5dfe0eaSLeon Romanovsky 	{                                                                      \
1234c5dfe0eaSLeon Romanovsky 		return res_get_common_doit(skb, nlh, extack, type);            \
1235d12ff624SSteve Wise 	}
1236d12ff624SSteve Wise 
1237f732e713SLeon Romanovsky RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
1238f732e713SLeon Romanovsky RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
1239f732e713SLeon Romanovsky RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
1240f732e713SLeon Romanovsky RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
1241f732e713SLeon Romanovsky RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
124229cf1351SSteve Wise 
12433856ec4bSSteve Wise static LIST_HEAD(link_ops);
12443856ec4bSSteve Wise static DECLARE_RWSEM(link_ops_rwsem);
12453856ec4bSSteve Wise 
12463856ec4bSSteve Wise static const struct rdma_link_ops *link_ops_get(const char *type)
12473856ec4bSSteve Wise {
12483856ec4bSSteve Wise 	const struct rdma_link_ops *ops;
12493856ec4bSSteve Wise 
12503856ec4bSSteve Wise 	list_for_each_entry(ops, &link_ops, list) {
12513856ec4bSSteve Wise 		if (!strcmp(ops->type, type))
12523856ec4bSSteve Wise 			goto out;
12533856ec4bSSteve Wise 	}
12543856ec4bSSteve Wise 	ops = NULL;
12553856ec4bSSteve Wise out:
12563856ec4bSSteve Wise 	return ops;
12573856ec4bSSteve Wise }
12583856ec4bSSteve Wise 
12593856ec4bSSteve Wise void rdma_link_register(struct rdma_link_ops *ops)
12603856ec4bSSteve Wise {
12613856ec4bSSteve Wise 	down_write(&link_ops_rwsem);
1262afc1990eSDan Carpenter 	if (WARN_ON_ONCE(link_ops_get(ops->type)))
12633856ec4bSSteve Wise 		goto out;
12643856ec4bSSteve Wise 	list_add(&ops->list, &link_ops);
12653856ec4bSSteve Wise out:
12663856ec4bSSteve Wise 	up_write(&link_ops_rwsem);
12673856ec4bSSteve Wise }
12683856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_register);
12693856ec4bSSteve Wise 
12703856ec4bSSteve Wise void rdma_link_unregister(struct rdma_link_ops *ops)
12713856ec4bSSteve Wise {
12723856ec4bSSteve Wise 	down_write(&link_ops_rwsem);
12733856ec4bSSteve Wise 	list_del(&ops->list);
12743856ec4bSSteve Wise 	up_write(&link_ops_rwsem);
12753856ec4bSSteve Wise }
12763856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_unregister);
12773856ec4bSSteve Wise 
12783856ec4bSSteve Wise static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
12793856ec4bSSteve Wise 			  struct netlink_ext_ack *extack)
12803856ec4bSSteve Wise {
12813856ec4bSSteve Wise 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
12823856ec4bSSteve Wise 	char ibdev_name[IB_DEVICE_NAME_MAX];
12833856ec4bSSteve Wise 	const struct rdma_link_ops *ops;
12843856ec4bSSteve Wise 	char ndev_name[IFNAMSIZ];
12853856ec4bSSteve Wise 	struct net_device *ndev;
12863856ec4bSSteve Wise 	char type[IFNAMSIZ];
12873856ec4bSSteve Wise 	int err;
12883856ec4bSSteve Wise 
12898cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
12903856ec4bSSteve Wise 				     nldev_policy, extack);
12913856ec4bSSteve Wise 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
12923856ec4bSSteve Wise 	    !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
12933856ec4bSSteve Wise 		return -EINVAL;
12943856ec4bSSteve Wise 
12953856ec4bSSteve Wise 	nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
12963856ec4bSSteve Wise 		    sizeof(ibdev_name));
12973856ec4bSSteve Wise 	if (strchr(ibdev_name, '%'))
12983856ec4bSSteve Wise 		return -EINVAL;
12993856ec4bSSteve Wise 
13003856ec4bSSteve Wise 	nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
13013856ec4bSSteve Wise 	nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
13023856ec4bSSteve Wise 		    sizeof(ndev_name));
13033856ec4bSSteve Wise 
13043856ec4bSSteve Wise 	ndev = dev_get_by_name(&init_net, ndev_name);
13053856ec4bSSteve Wise 	if (!ndev)
13063856ec4bSSteve Wise 		return -ENODEV;
13073856ec4bSSteve Wise 
13083856ec4bSSteve Wise 	down_read(&link_ops_rwsem);
13093856ec4bSSteve Wise 	ops = link_ops_get(type);
13103856ec4bSSteve Wise #ifdef CONFIG_MODULES
13113856ec4bSSteve Wise 	if (!ops) {
13123856ec4bSSteve Wise 		up_read(&link_ops_rwsem);
13133856ec4bSSteve Wise 		request_module("rdma-link-%s", type);
13143856ec4bSSteve Wise 		down_read(&link_ops_rwsem);
13153856ec4bSSteve Wise 		ops = link_ops_get(type);
13163856ec4bSSteve Wise 	}
13173856ec4bSSteve Wise #endif
13183856ec4bSSteve Wise 	err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
13193856ec4bSSteve Wise 	up_read(&link_ops_rwsem);
13203856ec4bSSteve Wise 	dev_put(ndev);
13213856ec4bSSteve Wise 
13223856ec4bSSteve Wise 	return err;
13233856ec4bSSteve Wise }
13243856ec4bSSteve Wise 
13253856ec4bSSteve Wise static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
13263856ec4bSSteve Wise 			  struct netlink_ext_ack *extack)
13273856ec4bSSteve Wise {
13283856ec4bSSteve Wise 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
13293856ec4bSSteve Wise 	struct ib_device *device;
13303856ec4bSSteve Wise 	u32 index;
13313856ec4bSSteve Wise 	int err;
13323856ec4bSSteve Wise 
13338cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
13343856ec4bSSteve Wise 				     nldev_policy, extack);
13353856ec4bSSteve Wise 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
13363856ec4bSSteve Wise 		return -EINVAL;
13373856ec4bSSteve Wise 
13383856ec4bSSteve Wise 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
133937eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
13403856ec4bSSteve Wise 	if (!device)
13413856ec4bSSteve Wise 		return -EINVAL;
13423856ec4bSSteve Wise 
13433856ec4bSSteve Wise 	if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
13443856ec4bSSteve Wise 		ib_device_put(device);
13453856ec4bSSteve Wise 		return -EINVAL;
13463856ec4bSSteve Wise 	}
13473856ec4bSSteve Wise 
13483856ec4bSSteve Wise 	ib_unregister_device_and_put(device);
13493856ec4bSSteve Wise 	return 0;
13503856ec4bSSteve Wise }
13513856ec4bSSteve Wise 
13520e2d00ebSJason Gunthorpe static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
13530e2d00ebSJason Gunthorpe 			     struct netlink_ext_ack *extack)
13540e2d00ebSJason Gunthorpe {
13550e2d00ebSJason Gunthorpe 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
135634d65cd8SDoug Ledford 	char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE];
13570e2d00ebSJason Gunthorpe 	struct ib_client_nl_info data = {};
13580e2d00ebSJason Gunthorpe 	struct ib_device *ibdev = NULL;
13590e2d00ebSJason Gunthorpe 	struct sk_buff *msg;
13600e2d00ebSJason Gunthorpe 	u32 index;
13610e2d00ebSJason Gunthorpe 	int err;
13620e2d00ebSJason Gunthorpe 
13630e2d00ebSJason Gunthorpe 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
13640e2d00ebSJason Gunthorpe 			  extack);
13650e2d00ebSJason Gunthorpe 	if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
13660e2d00ebSJason Gunthorpe 		return -EINVAL;
13670e2d00ebSJason Gunthorpe 
136834d65cd8SDoug Ledford 	nla_strlcpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
136934d65cd8SDoug Ledford 		    sizeof(client_name));
13700e2d00ebSJason Gunthorpe 
13710e2d00ebSJason Gunthorpe 	if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
13720e2d00ebSJason Gunthorpe 		index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
13730e2d00ebSJason Gunthorpe 		ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
13740e2d00ebSJason Gunthorpe 		if (!ibdev)
13750e2d00ebSJason Gunthorpe 			return -EINVAL;
13760e2d00ebSJason Gunthorpe 
13770e2d00ebSJason Gunthorpe 		if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
13780e2d00ebSJason Gunthorpe 			data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
13790e2d00ebSJason Gunthorpe 			if (!rdma_is_port_valid(ibdev, data.port)) {
13800e2d00ebSJason Gunthorpe 				err = -EINVAL;
13810e2d00ebSJason Gunthorpe 				goto out_put;
13820e2d00ebSJason Gunthorpe 			}
13830e2d00ebSJason Gunthorpe 		} else {
13840e2d00ebSJason Gunthorpe 			data.port = -1;
13850e2d00ebSJason Gunthorpe 		}
13860e2d00ebSJason Gunthorpe 	} else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
13870e2d00ebSJason Gunthorpe 		return -EINVAL;
13880e2d00ebSJason Gunthorpe 	}
13890e2d00ebSJason Gunthorpe 
13900e2d00ebSJason Gunthorpe 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
13910e2d00ebSJason Gunthorpe 	if (!msg) {
13920e2d00ebSJason Gunthorpe 		err = -ENOMEM;
13930e2d00ebSJason Gunthorpe 		goto out_put;
13940e2d00ebSJason Gunthorpe 	}
13950e2d00ebSJason Gunthorpe 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
13960e2d00ebSJason Gunthorpe 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
13970e2d00ebSJason Gunthorpe 					 RDMA_NLDEV_CMD_GET_CHARDEV),
13980e2d00ebSJason Gunthorpe 			0, 0);
13990e2d00ebSJason Gunthorpe 
14000e2d00ebSJason Gunthorpe 	data.nl_msg = msg;
14010e2d00ebSJason Gunthorpe 	err = ib_get_client_nl_info(ibdev, client_name, &data);
14020e2d00ebSJason Gunthorpe 	if (err)
14030e2d00ebSJason Gunthorpe 		goto out_nlmsg;
14040e2d00ebSJason Gunthorpe 
14050e2d00ebSJason Gunthorpe 	err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV,
14060e2d00ebSJason Gunthorpe 				huge_encode_dev(data.cdev->devt),
14070e2d00ebSJason Gunthorpe 				RDMA_NLDEV_ATTR_PAD);
14080e2d00ebSJason Gunthorpe 	if (err)
14090e2d00ebSJason Gunthorpe 		goto out_data;
14100e2d00ebSJason Gunthorpe 	err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi,
14110e2d00ebSJason Gunthorpe 				RDMA_NLDEV_ATTR_PAD);
14120e2d00ebSJason Gunthorpe 	if (err)
14130e2d00ebSJason Gunthorpe 		goto out_data;
14140e2d00ebSJason Gunthorpe 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME,
14150e2d00ebSJason Gunthorpe 			   dev_name(data.cdev))) {
14160e2d00ebSJason Gunthorpe 		err = -EMSGSIZE;
14170e2d00ebSJason Gunthorpe 		goto out_data;
14180e2d00ebSJason Gunthorpe 	}
14190e2d00ebSJason Gunthorpe 
14200e2d00ebSJason Gunthorpe 	nlmsg_end(msg, nlh);
14210e2d00ebSJason Gunthorpe 	put_device(data.cdev);
14220e2d00ebSJason Gunthorpe 	if (ibdev)
14230e2d00ebSJason Gunthorpe 		ib_device_put(ibdev);
14240e2d00ebSJason Gunthorpe 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
14250e2d00ebSJason Gunthorpe 
14260e2d00ebSJason Gunthorpe out_data:
14270e2d00ebSJason Gunthorpe 	put_device(data.cdev);
14280e2d00ebSJason Gunthorpe out_nlmsg:
14290e2d00ebSJason Gunthorpe 	nlmsg_free(msg);
14300e2d00ebSJason Gunthorpe out_put:
14310e2d00ebSJason Gunthorpe 	if (ibdev)
14320e2d00ebSJason Gunthorpe 		ib_device_put(ibdev);
14330e2d00ebSJason Gunthorpe 	return err;
14340e2d00ebSJason Gunthorpe }
14350e2d00ebSJason Gunthorpe 
14364d7ba8ceSParav Pandit static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
14374d7ba8ceSParav Pandit 			      struct netlink_ext_ack *extack)
1438cb7e0e13SParav Pandit {
1439cb7e0e13SParav Pandit 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
14404d7ba8ceSParav Pandit 	struct sk_buff *msg;
1441cb7e0e13SParav Pandit 	int err;
1442cb7e0e13SParav Pandit 
14434d7ba8ceSParav Pandit 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
14444d7ba8ceSParav Pandit 			  nldev_policy, extack);
1445cb7e0e13SParav Pandit 	if (err)
1446cb7e0e13SParav Pandit 		return err;
1447cb7e0e13SParav Pandit 
14484d7ba8ceSParav Pandit 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
14494d7ba8ceSParav Pandit 	if (!msg)
14504d7ba8ceSParav Pandit 		return -ENOMEM;
14514d7ba8ceSParav Pandit 
14524d7ba8ceSParav Pandit 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1453cb7e0e13SParav Pandit 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1454cb7e0e13SParav Pandit 					 RDMA_NLDEV_CMD_SYS_GET),
1455cb7e0e13SParav Pandit 			0, 0);
1456cb7e0e13SParav Pandit 
14574d7ba8ceSParav Pandit 	err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
1458cb7e0e13SParav Pandit 			 (u8)ib_devices_shared_netns);
1459cb7e0e13SParav Pandit 	if (err) {
14604d7ba8ceSParav Pandit 		nlmsg_free(msg);
1461cb7e0e13SParav Pandit 		return err;
1462cb7e0e13SParav Pandit 	}
14634d7ba8ceSParav Pandit 	nlmsg_end(msg, nlh);
14644d7ba8ceSParav Pandit 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1465cb7e0e13SParav Pandit }
1466cb7e0e13SParav Pandit 
14672b34c558SParav Pandit static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
14682b34c558SParav Pandit 				  struct netlink_ext_ack *extack)
14692b34c558SParav Pandit {
14702b34c558SParav Pandit 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
14712b34c558SParav Pandit 	u8 enable;
14722b34c558SParav Pandit 	int err;
14732b34c558SParav Pandit 
14742b34c558SParav Pandit 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
14752b34c558SParav Pandit 			  nldev_policy, extack);
14762b34c558SParav Pandit 	if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
14772b34c558SParav Pandit 		return -EINVAL;
14782b34c558SParav Pandit 
14792b34c558SParav Pandit 	enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
14802b34c558SParav Pandit 	/* Only 0 and 1 are supported */
14812b34c558SParav Pandit 	if (enable > 1)
14822b34c558SParav Pandit 		return -EINVAL;
14832b34c558SParav Pandit 
14842b34c558SParav Pandit 	err = rdma_compatdev_set(enable);
14852b34c558SParav Pandit 	return err;
14862b34c558SParav Pandit }
14872b34c558SParav Pandit 
1488b47ae6f8SMark Zhang static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1489b47ae6f8SMark Zhang 			       struct netlink_ext_ack *extack)
1490b47ae6f8SMark Zhang {
1491b47ae6f8SMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1492b47ae6f8SMark Zhang 	u32 index, port, mode, mask = 0;
1493b47ae6f8SMark Zhang 	struct ib_device *device;
1494b47ae6f8SMark Zhang 	struct sk_buff *msg;
1495b47ae6f8SMark Zhang 	int ret;
1496b47ae6f8SMark Zhang 
1497b47ae6f8SMark Zhang 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1498b47ae6f8SMark Zhang 			  nldev_policy, extack);
1499b47ae6f8SMark Zhang 	/* Currently only counter for QP is supported */
1500b47ae6f8SMark Zhang 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1501b47ae6f8SMark Zhang 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1502b47ae6f8SMark Zhang 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE])
1503b47ae6f8SMark Zhang 		return -EINVAL;
1504b47ae6f8SMark Zhang 
1505b47ae6f8SMark Zhang 	if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1506b47ae6f8SMark Zhang 		return -EINVAL;
1507b47ae6f8SMark Zhang 
1508b47ae6f8SMark Zhang 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1509b47ae6f8SMark Zhang 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1510b47ae6f8SMark Zhang 	if (!device)
1511b47ae6f8SMark Zhang 		return -EINVAL;
1512b47ae6f8SMark Zhang 
1513b47ae6f8SMark Zhang 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1514b47ae6f8SMark Zhang 	if (!rdma_is_port_valid(device, port)) {
1515b47ae6f8SMark Zhang 		ret = -EINVAL;
1516b47ae6f8SMark Zhang 		goto err;
1517b47ae6f8SMark Zhang 	}
1518b47ae6f8SMark Zhang 
1519b47ae6f8SMark Zhang 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1520b47ae6f8SMark Zhang 	if (!msg) {
1521b47ae6f8SMark Zhang 		ret = -ENOMEM;
1522b47ae6f8SMark Zhang 		goto err;
1523b47ae6f8SMark Zhang 	}
1524b47ae6f8SMark Zhang 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1525b47ae6f8SMark Zhang 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1526b47ae6f8SMark Zhang 					 RDMA_NLDEV_CMD_STAT_SET),
1527b47ae6f8SMark Zhang 			0, 0);
1528b47ae6f8SMark Zhang 
1529b47ae6f8SMark Zhang 	mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
1530b47ae6f8SMark Zhang 	if (mode != RDMA_COUNTER_MODE_AUTO) {
1531b47ae6f8SMark Zhang 		ret = -EMSGSIZE;
1532b47ae6f8SMark Zhang 		goto err_msg;
1533b47ae6f8SMark Zhang 	}
1534b47ae6f8SMark Zhang 
1535b47ae6f8SMark Zhang 	if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
1536b47ae6f8SMark Zhang 		mask = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
1537b47ae6f8SMark Zhang 
1538b47ae6f8SMark Zhang 	ret = rdma_counter_set_auto_mode(device, port,
1539b47ae6f8SMark Zhang 					 mask ? true : false, mask);
1540b47ae6f8SMark Zhang 	if (ret)
1541b47ae6f8SMark Zhang 		goto err_msg;
1542b47ae6f8SMark Zhang 
1543b47ae6f8SMark Zhang 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode) ||
1544b47ae6f8SMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
1545b47ae6f8SMark Zhang 		ret = -EMSGSIZE;
1546b47ae6f8SMark Zhang 		goto err_msg;
1547b47ae6f8SMark Zhang 	}
1548b47ae6f8SMark Zhang 
1549b47ae6f8SMark Zhang 	nlmsg_end(msg, nlh);
1550b47ae6f8SMark Zhang 	ib_device_put(device);
1551b47ae6f8SMark Zhang 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1552b47ae6f8SMark Zhang 
1553b47ae6f8SMark Zhang err_msg:
1554b47ae6f8SMark Zhang 	nlmsg_free(msg);
1555b47ae6f8SMark Zhang err:
1556b47ae6f8SMark Zhang 	ib_device_put(device);
1557b47ae6f8SMark Zhang 	return ret;
1558b47ae6f8SMark Zhang }
1559b47ae6f8SMark Zhang 
1560d0e312feSLeon Romanovsky static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
1561b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_CMD_GET] = {
1562e5c9469eSLeon Romanovsky 		.doit = nldev_get_doit,
1563b4c598a6SLeon Romanovsky 		.dump = nldev_get_dumpit,
1564b4c598a6SLeon Romanovsky 	},
15650e2d00ebSJason Gunthorpe 	[RDMA_NLDEV_CMD_GET_CHARDEV] = {
15660e2d00ebSJason Gunthorpe 		.doit = nldev_get_chardev,
15670e2d00ebSJason Gunthorpe 	},
156805d940d3SLeon Romanovsky 	[RDMA_NLDEV_CMD_SET] = {
156905d940d3SLeon Romanovsky 		.doit = nldev_set_doit,
157005d940d3SLeon Romanovsky 		.flags = RDMA_NL_ADMIN_PERM,
157105d940d3SLeon Romanovsky 	},
15723856ec4bSSteve Wise 	[RDMA_NLDEV_CMD_NEWLINK] = {
15733856ec4bSSteve Wise 		.doit = nldev_newlink,
15743856ec4bSSteve Wise 		.flags = RDMA_NL_ADMIN_PERM,
15753856ec4bSSteve Wise 	},
15763856ec4bSSteve Wise 	[RDMA_NLDEV_CMD_DELLINK] = {
15773856ec4bSSteve Wise 		.doit = nldev_dellink,
15783856ec4bSSteve Wise 		.flags = RDMA_NL_ADMIN_PERM,
15793856ec4bSSteve Wise 	},
15807d02f605SLeon Romanovsky 	[RDMA_NLDEV_CMD_PORT_GET] = {
1581c3f66f7bSLeon Romanovsky 		.doit = nldev_port_get_doit,
15827d02f605SLeon Romanovsky 		.dump = nldev_port_get_dumpit,
15837d02f605SLeon Romanovsky 	},
1584bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_GET] = {
1585bf3c5a93SLeon Romanovsky 		.doit = nldev_res_get_doit,
1586bf3c5a93SLeon Romanovsky 		.dump = nldev_res_get_dumpit,
1587bf3c5a93SLeon Romanovsky 	},
1588b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_QP_GET] = {
1589c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_qp_doit,
1590b5fa635aSLeon Romanovsky 		.dump = nldev_res_get_qp_dumpit,
1591b5fa635aSLeon Romanovsky 	},
159200313983SSteve Wise 	[RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
1593c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_cm_id_doit,
159400313983SSteve Wise 		.dump = nldev_res_get_cm_id_dumpit,
159500313983SSteve Wise 	},
1596a34fc089SSteve Wise 	[RDMA_NLDEV_CMD_RES_CQ_GET] = {
1597c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_cq_doit,
1598a34fc089SSteve Wise 		.dump = nldev_res_get_cq_dumpit,
1599a34fc089SSteve Wise 	},
1600fccec5b8SSteve Wise 	[RDMA_NLDEV_CMD_RES_MR_GET] = {
1601c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_mr_doit,
1602fccec5b8SSteve Wise 		.dump = nldev_res_get_mr_dumpit,
1603fccec5b8SSteve Wise 	},
160429cf1351SSteve Wise 	[RDMA_NLDEV_CMD_RES_PD_GET] = {
1605c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_pd_doit,
160629cf1351SSteve Wise 		.dump = nldev_res_get_pd_dumpit,
160729cf1351SSteve Wise 	},
1608cb7e0e13SParav Pandit 	[RDMA_NLDEV_CMD_SYS_GET] = {
16094d7ba8ceSParav Pandit 		.doit = nldev_sys_get_doit,
1610cb7e0e13SParav Pandit 	},
16112b34c558SParav Pandit 	[RDMA_NLDEV_CMD_SYS_SET] = {
16122b34c558SParav Pandit 		.doit = nldev_set_sys_set_doit,
1613b47ae6f8SMark Zhang 	},
1614b47ae6f8SMark Zhang 	[RDMA_NLDEV_CMD_STAT_SET] = {
1615b47ae6f8SMark Zhang 		.doit = nldev_stat_set_doit,
16162b34c558SParav Pandit 		.flags = RDMA_NL_ADMIN_PERM,
16172b34c558SParav Pandit 	},
1618b4c598a6SLeon Romanovsky };
1619b4c598a6SLeon Romanovsky 
16206c80b41aSLeon Romanovsky void __init nldev_init(void)
16216c80b41aSLeon Romanovsky {
1622b4c598a6SLeon Romanovsky 	rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
16236c80b41aSLeon Romanovsky }
16246c80b41aSLeon Romanovsky 
16256c80b41aSLeon Romanovsky void __exit nldev_exit(void)
16266c80b41aSLeon Romanovsky {
16276c80b41aSLeon Romanovsky 	rdma_nl_unregister(RDMA_NL_NLDEV);
16286c80b41aSLeon Romanovsky }
1629e3bf14bdSJason Gunthorpe 
1630e3bf14bdSJason Gunthorpe MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
1631