xref: /openbmc/linux/drivers/infiniband/core/nldev.c (revision 33eb12f2)
16c80b41aSLeon Romanovsky /*
26c80b41aSLeon Romanovsky  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
36c80b41aSLeon Romanovsky  *
46c80b41aSLeon Romanovsky  * Redistribution and use in source and binary forms, with or without
56c80b41aSLeon Romanovsky  * modification, are permitted provided that the following conditions are met:
66c80b41aSLeon Romanovsky  *
76c80b41aSLeon Romanovsky  * 1. Redistributions of source code must retain the above copyright
86c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer.
96c80b41aSLeon Romanovsky  * 2. Redistributions in binary form must reproduce the above copyright
106c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer in the
116c80b41aSLeon Romanovsky  *    documentation and/or other materials provided with the distribution.
126c80b41aSLeon Romanovsky  * 3. Neither the names of the copyright holders nor the names of its
136c80b41aSLeon Romanovsky  *    contributors may be used to endorse or promote products derived from
146c80b41aSLeon Romanovsky  *    this software without specific prior written permission.
156c80b41aSLeon Romanovsky  *
166c80b41aSLeon Romanovsky  * Alternatively, this software may be distributed under the terms of the
176c80b41aSLeon Romanovsky  * GNU General Public License ("GPL") version 2 as published by the Free
186c80b41aSLeon Romanovsky  * Software Foundation.
196c80b41aSLeon Romanovsky  *
206c80b41aSLeon Romanovsky  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
216c80b41aSLeon Romanovsky  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
226c80b41aSLeon Romanovsky  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
236c80b41aSLeon Romanovsky  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
246c80b41aSLeon Romanovsky  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
256c80b41aSLeon Romanovsky  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
266c80b41aSLeon Romanovsky  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
276c80b41aSLeon Romanovsky  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
286c80b41aSLeon Romanovsky  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
296c80b41aSLeon Romanovsky  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
306c80b41aSLeon Romanovsky  * POSSIBILITY OF SUCH DAMAGE.
316c80b41aSLeon Romanovsky  */
326c80b41aSLeon Romanovsky 
33e3bf14bdSJason Gunthorpe #include <linux/module.h>
34bf3c5a93SLeon Romanovsky #include <linux/pid.h>
35bf3c5a93SLeon Romanovsky #include <linux/pid_namespace.h>
363856ec4bSSteve Wise #include <linux/mutex.h>
37b4c598a6SLeon Romanovsky #include <net/netlink.h>
3800313983SSteve Wise #include <rdma/rdma_cm.h>
396c80b41aSLeon Romanovsky #include <rdma/rdma_netlink.h>
406c80b41aSLeon Romanovsky 
416c80b41aSLeon Romanovsky #include "core_priv.h"
4200313983SSteve Wise #include "cma_priv.h"
4341eda65cSLeon Romanovsky #include "restrack.h"
445bd48c18SJason Gunthorpe #include "uverbs.h"
456c80b41aSLeon Romanovsky 
46fb910690SErez Alfasi typedef int (*res_fill_func_t)(struct sk_buff*, bool,
47fb910690SErez Alfasi 			       struct rdma_restrack_entry*, uint32_t);
48fb910690SErez Alfasi 
49696de2e9SDoug Ledford /*
50696de2e9SDoug Ledford  * Sort array elements by the netlink attribute name
51696de2e9SDoug Ledford  */
52b4c598a6SLeon Romanovsky static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
53696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV]		= { .type = NLA_U64 },
54696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV_ABI]		= { .type = NLA_U64 },
55696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV_NAME]		= { .type = NLA_NUL_STRING,
5634d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
57696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV_TYPE]		= { .type = NLA_NUL_STRING,
5834d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE },
59f8fc8cd9SYamin Friedman 	[RDMA_NLDEV_ATTR_DEV_DIM]               = { .type = NLA_U8 },
60b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_INDEX]		= { .type = NLA_U32 },
61b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_NAME]		= { .type = NLA_NUL_STRING,
6234d65cd8SDoug Ledford 					.len = IB_DEVICE_NAME_MAX },
63696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DEV_NODE_TYPE]		= { .type = NLA_U8 },
64696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DEV_PROTOCOL]		= { .type = NLA_NUL_STRING,
6534d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
66696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER]		= { .type = NLA_NESTED },
67696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_ENTRY]		= { .type = NLA_NESTED },
68696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE]	= { .type = NLA_U8 },
69696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_STRING]		= { .type = NLA_NUL_STRING,
7034d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
71696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_S32]		= { .type = NLA_S32 },
72696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_S64]		= { .type = NLA_S64 },
73696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_U32]		= { .type = NLA_U32 },
74696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_U64]		= { .type = NLA_U64 },
758621a7e3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_FW_VERSION]		= { .type = NLA_NUL_STRING,
7634d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
7780a06dd3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_LID]			= { .type = NLA_U32 },
78696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_LINK_TYPE]		= { .type = NLA_NUL_STRING,
7934d65cd8SDoug Ledford 					.len = IFNAMSIZ },
8034840feaSLeon Romanovsky 	[RDMA_NLDEV_ATTR_LMC]			= { .type = NLA_U8 },
815b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_INDEX]		= { .type = NLA_U32 },
825b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_NAME]		= { .type = NLA_NUL_STRING,
835b2cc79dSLeon Romanovsky 					.len = IFNAMSIZ },
84696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_NODE_GUID]		= { .type = NLA_U64 },
85696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_PORT_INDEX]		= { .type = NLA_U32 },
86696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_PORT_PHYS_STATE]	= { .type = NLA_U8 },
87696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_PORT_STATE]		= { .type = NLA_U8 },
88696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CM_ID]		= { .type = NLA_NESTED },
89517b773eSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CM_IDN]		= { .type = NLA_U32 },
90696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY]	= { .type = NLA_NESTED },
91696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQ]		= { .type = NLA_NESTED },
92696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQE]		= { .type = NLA_U32 },
93696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQN]		= { .type = NLA_U32 },
94696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQ_ENTRY]		= { .type = NLA_NESTED },
95c3d02788SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CTXN]		= { .type = NLA_U32 },
96696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_DST_ADDR]		= {
97696de2e9SDoug Ledford 			.len = sizeof(struct __kernel_sockaddr_storage) },
98696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_IOVA]		= { .type = NLA_U64 },
99696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_KERN_NAME]		= { .type = NLA_NUL_STRING,
10034d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
101696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_LKEY]		= { .type = NLA_U32 },
102696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY]	= { .type = NLA_U32 },
103696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_LQPN]		= { .type = NLA_U32 },
104696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MR]		= { .type = NLA_NESTED },
105696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MRLEN]		= { .type = NLA_U64 },
106696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MRN]		= { .type = NLA_U32 },
107696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MR_ENTRY]		= { .type = NLA_NESTED },
108696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE]	= { .type = NLA_U8 },
109696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PD]		= { .type = NLA_NESTED },
110696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PDN]		= { .type = NLA_U32 },
111696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PD_ENTRY]		= { .type = NLA_NESTED },
112696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PID]		= { .type = NLA_U32 },
113696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_POLL_CTX]		= { .type = NLA_U8 },
114696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PS]		= { .type = NLA_U32 },
115696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_QP]		= { .type = NLA_NESTED },
116696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_QP_ENTRY]		= { .type = NLA_NESTED },
11765959522SMaor Gottlieb 	[RDMA_NLDEV_ATTR_RES_RAW]		= { .type = NLA_BINARY },
118696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_RKEY]		= { .type = NLA_U32 },
119696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_RQPN]		= { .type = NLA_U32 },
120696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_RQ_PSN]		= { .type = NLA_U32 },
121696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SQ_PSN]		= { .type = NLA_U32 },
122696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SRC_ADDR]		= {
123696de2e9SDoug Ledford 			.len = sizeof(struct __kernel_sockaddr_storage) },
124696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_STATE]		= { .type = NLA_U8 },
125696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY]		= { .type = NLA_NESTED },
126696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY]	= { .type = NLA_NESTED },
127696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 },
128696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
12934d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
130696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_TYPE]		= { .type = NLA_U8 },
131696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
132696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_USECNT]		= { .type = NLA_U64 },
133696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_SM_LID]		= { .type = NLA_U32 },
134696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_SUBNET_PREFIX]		= { .type = NLA_U64 },
135b47ae6f8SMark Zhang 	[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]	= { .type = NLA_U32 },
136b47ae6f8SMark Zhang 	[RDMA_NLDEV_ATTR_STAT_MODE]		= { .type = NLA_U32 },
137b47ae6f8SMark Zhang 	[RDMA_NLDEV_ATTR_STAT_RES]		= { .type = NLA_U32 },
138c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_COUNTER]		= { .type = NLA_NESTED },
139c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY]	= { .type = NLA_NESTED },
140c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]       = { .type = NLA_U32 },
141c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]       = { .type = NLA_NESTED },
142c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY]  = { .type = NLA_NESTED },
143c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING },
144c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 },
145696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_SYS_IMAGE_GUID]	= { .type = NLA_U64 },
1468f71bb00SJason Gunthorpe 	[RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID]	= { .type = NLA_U32 },
147696de2e9SDoug Ledford 	[RDMA_NLDEV_NET_NS_FD]			= { .type = NLA_U32 },
148696de2e9SDoug Ledford 	[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]	= { .type = NLA_U8 },
149b4c598a6SLeon Romanovsky };
150b4c598a6SLeon Romanovsky 
15173937e8aSSteve Wise static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
15273937e8aSSteve Wise 				      enum rdma_nldev_print_type print_type)
15373937e8aSSteve Wise {
15473937e8aSSteve Wise 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
15573937e8aSSteve Wise 		return -EMSGSIZE;
15673937e8aSSteve Wise 	if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
15773937e8aSSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
15873937e8aSSteve Wise 		return -EMSGSIZE;
15973937e8aSSteve Wise 
16073937e8aSSteve Wise 	return 0;
16173937e8aSSteve Wise }
16273937e8aSSteve Wise 
16373937e8aSSteve Wise static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
16473937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
16573937e8aSSteve Wise 				   u32 value)
16673937e8aSSteve Wise {
16773937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
16873937e8aSSteve Wise 		return -EMSGSIZE;
16973937e8aSSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
17073937e8aSSteve Wise 		return -EMSGSIZE;
17173937e8aSSteve Wise 
17273937e8aSSteve Wise 	return 0;
17373937e8aSSteve Wise }
17473937e8aSSteve Wise 
17573937e8aSSteve Wise static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
17673937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
17773937e8aSSteve Wise 				   u64 value)
17873937e8aSSteve Wise {
17973937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
18073937e8aSSteve Wise 		return -EMSGSIZE;
18173937e8aSSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
18273937e8aSSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
18373937e8aSSteve Wise 		return -EMSGSIZE;
18473937e8aSSteve Wise 
18573937e8aSSteve Wise 	return 0;
18673937e8aSSteve Wise }
18773937e8aSSteve Wise 
188e1b95ae0SErez Alfasi int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
189e1b95ae0SErez Alfasi 			      const char *str)
190e1b95ae0SErez Alfasi {
191e1b95ae0SErez Alfasi 	if (put_driver_name_print_type(msg, name,
192e1b95ae0SErez Alfasi 				       RDMA_NLDEV_PRINT_TYPE_UNSPEC))
193e1b95ae0SErez Alfasi 		return -EMSGSIZE;
194e1b95ae0SErez Alfasi 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str))
195e1b95ae0SErez Alfasi 		return -EMSGSIZE;
196e1b95ae0SErez Alfasi 
197e1b95ae0SErez Alfasi 	return 0;
198e1b95ae0SErez Alfasi }
199e1b95ae0SErez Alfasi EXPORT_SYMBOL(rdma_nl_put_driver_string);
200e1b95ae0SErez Alfasi 
20173937e8aSSteve Wise int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
20273937e8aSSteve Wise {
20373937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
20473937e8aSSteve Wise 				       value);
20573937e8aSSteve Wise }
20673937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32);
20773937e8aSSteve Wise 
20873937e8aSSteve Wise int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
20973937e8aSSteve Wise 			       u32 value)
21073937e8aSSteve Wise {
21173937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
21273937e8aSSteve Wise 				       value);
21373937e8aSSteve Wise }
21473937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
21573937e8aSSteve Wise 
21673937e8aSSteve Wise int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
21773937e8aSSteve Wise {
21873937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
21973937e8aSSteve Wise 				       value);
22073937e8aSSteve Wise }
22173937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64);
22273937e8aSSteve Wise 
22373937e8aSSteve Wise int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
22473937e8aSSteve Wise {
22573937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
22673937e8aSSteve Wise 				       value);
22773937e8aSSteve Wise }
22873937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
22973937e8aSSteve Wise 
230c2409810SLeon Romanovsky static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
231b4c598a6SLeon Romanovsky {
232b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
233b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
234896de009SJason Gunthorpe 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
235896de009SJason Gunthorpe 			   dev_name(&device->dev)))
236b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
237c2409810SLeon Romanovsky 
238c2409810SLeon Romanovsky 	return 0;
239c2409810SLeon Romanovsky }
240c2409810SLeon Romanovsky 
241c2409810SLeon Romanovsky static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
242c2409810SLeon Romanovsky {
243c2409810SLeon Romanovsky 	char fw[IB_FW_VERSION_NAME_MAX];
2449e886b39SLeon Romanovsky 	int ret = 0;
2459e886b39SLeon Romanovsky 	u8 port;
246c2409810SLeon Romanovsky 
247c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
248c2409810SLeon Romanovsky 		return -EMSGSIZE;
249c2409810SLeon Romanovsky 
250b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
251b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
252ac505253SLeon Romanovsky 
253ac505253SLeon Romanovsky 	BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
254ac505253SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
25525a0ad85SSteve Wise 			      device->attrs.device_cap_flags,
25625a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
257ac505253SLeon Romanovsky 		return -EMSGSIZE;
258ac505253SLeon Romanovsky 
2598621a7e3SLeon Romanovsky 	ib_get_device_fw_str(device, fw);
2605b2cc79dSLeon Romanovsky 	/* Device without FW has strlen(fw) = 0 */
2618621a7e3SLeon Romanovsky 	if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
2628621a7e3SLeon Romanovsky 		return -EMSGSIZE;
2638621a7e3SLeon Romanovsky 
2641aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
26525a0ad85SSteve Wise 			      be64_to_cpu(device->node_guid),
26625a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2671aaff896SLeon Romanovsky 		return -EMSGSIZE;
2681aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
26925a0ad85SSteve Wise 			      be64_to_cpu(device->attrs.sys_image_guid),
27025a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2711aaff896SLeon Romanovsky 		return -EMSGSIZE;
2721bb77b8cSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
2731bb77b8cSLeon Romanovsky 		return -EMSGSIZE;
274f8fc8cd9SYamin Friedman 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim))
275f8fc8cd9SYamin Friedman 		return -EMSGSIZE;
2769e886b39SLeon Romanovsky 
2779e886b39SLeon Romanovsky 	/*
2789e886b39SLeon Romanovsky 	 * Link type is determined on first port and mlx4 device
2799e886b39SLeon Romanovsky 	 * which can potentially have two different link type for the same
2809e886b39SLeon Romanovsky 	 * IB device is considered as better to be avoided in the future,
2819e886b39SLeon Romanovsky 	 */
2829e886b39SLeon Romanovsky 	port = rdma_start_port(device);
2839e886b39SLeon Romanovsky 	if (rdma_cap_opa_mad(device, port))
2849e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
2859e886b39SLeon Romanovsky 	else if (rdma_protocol_ib(device, port))
2869e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
2879e886b39SLeon Romanovsky 	else if (rdma_protocol_iwarp(device, port))
2889e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
2899e886b39SLeon Romanovsky 	else if (rdma_protocol_roce(device, port))
2909e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
2919e886b39SLeon Romanovsky 	else if (rdma_protocol_usnic(device, port))
2929e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
2939e886b39SLeon Romanovsky 				     "usnic");
2949e886b39SLeon Romanovsky 	return ret;
295b4c598a6SLeon Romanovsky }
296b4c598a6SLeon Romanovsky 
2977d02f605SLeon Romanovsky static int fill_port_info(struct sk_buff *msg,
2985b2cc79dSLeon Romanovsky 			  struct ib_device *device, u32 port,
2995b2cc79dSLeon Romanovsky 			  const struct net *net)
3007d02f605SLeon Romanovsky {
3015b2cc79dSLeon Romanovsky 	struct net_device *netdev = NULL;
302ac505253SLeon Romanovsky 	struct ib_port_attr attr;
303ac505253SLeon Romanovsky 	int ret;
3044fa2813dSMichael Guralnik 	u64 cap_flags = 0;
305ac505253SLeon Romanovsky 
306c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
3077d02f605SLeon Romanovsky 		return -EMSGSIZE;
308c2409810SLeon Romanovsky 
3097d02f605SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
3107d02f605SLeon Romanovsky 		return -EMSGSIZE;
311ac505253SLeon Romanovsky 
312ac505253SLeon Romanovsky 	ret = ib_query_port(device, port, &attr);
313ac505253SLeon Romanovsky 	if (ret)
314ac505253SLeon Romanovsky 		return ret;
315ac505253SLeon Romanovsky 
316dd8028f1SLeon Romanovsky 	if (rdma_protocol_ib(device, port)) {
3174fa2813dSMichael Guralnik 		BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
3184fa2813dSMichael Guralnik 				sizeof(attr.port_cap_flags2)) > sizeof(u64));
3194fa2813dSMichael Guralnik 		cap_flags = attr.port_cap_flags |
3204fa2813dSMichael Guralnik 			((u64)attr.port_cap_flags2 << 32);
321ac505253SLeon Romanovsky 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
3224fa2813dSMichael Guralnik 				      cap_flags, RDMA_NLDEV_ATTR_PAD))
323ac505253SLeon Romanovsky 			return -EMSGSIZE;
324dd8028f1SLeon Romanovsky 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
32525a0ad85SSteve Wise 				      attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
32612026fbbSLeon Romanovsky 			return -EMSGSIZE;
32780a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
32880a06dd3SLeon Romanovsky 			return -EMSGSIZE;
32980a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
33080a06dd3SLeon Romanovsky 			return -EMSGSIZE;
33134840feaSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
33234840feaSLeon Romanovsky 			return -EMSGSIZE;
33380a06dd3SLeon Romanovsky 	}
3345654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
3355654e49dSLeon Romanovsky 		return -EMSGSIZE;
3365654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
3375654e49dSLeon Romanovsky 		return -EMSGSIZE;
3385b2cc79dSLeon Romanovsky 
339c2261dd7SJason Gunthorpe 	netdev = ib_device_get_netdev(device, port);
3405b2cc79dSLeon Romanovsky 	if (netdev && net_eq(dev_net(netdev), net)) {
3415b2cc79dSLeon Romanovsky 		ret = nla_put_u32(msg,
3425b2cc79dSLeon Romanovsky 				  RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
3435b2cc79dSLeon Romanovsky 		if (ret)
3445b2cc79dSLeon Romanovsky 			goto out;
3455b2cc79dSLeon Romanovsky 		ret = nla_put_string(msg,
3465b2cc79dSLeon Romanovsky 				     RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
3475b2cc79dSLeon Romanovsky 	}
3485b2cc79dSLeon Romanovsky 
3495b2cc79dSLeon Romanovsky out:
3505b2cc79dSLeon Romanovsky 	if (netdev)
3515b2cc79dSLeon Romanovsky 		dev_put(netdev);
3525b2cc79dSLeon Romanovsky 	return ret;
3537d02f605SLeon Romanovsky }
3547d02f605SLeon Romanovsky 
355bf3c5a93SLeon Romanovsky static int fill_res_info_entry(struct sk_buff *msg,
356bf3c5a93SLeon Romanovsky 			       const char *name, u64 curr)
357bf3c5a93SLeon Romanovsky {
358bf3c5a93SLeon Romanovsky 	struct nlattr *entry_attr;
359bf3c5a93SLeon Romanovsky 
360ae0be8deSMichal Kubecek 	entry_attr = nla_nest_start_noflag(msg,
361ae0be8deSMichal Kubecek 					   RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
362bf3c5a93SLeon Romanovsky 	if (!entry_attr)
363bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
364bf3c5a93SLeon Romanovsky 
365bf3c5a93SLeon Romanovsky 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
366bf3c5a93SLeon Romanovsky 		goto err;
36725a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
36825a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
369bf3c5a93SLeon Romanovsky 		goto err;
370bf3c5a93SLeon Romanovsky 
371bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, entry_attr);
372bf3c5a93SLeon Romanovsky 	return 0;
373bf3c5a93SLeon Romanovsky 
374bf3c5a93SLeon Romanovsky err:
375bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, entry_attr);
376bf3c5a93SLeon Romanovsky 	return -EMSGSIZE;
377bf3c5a93SLeon Romanovsky }
378bf3c5a93SLeon Romanovsky 
379bf3c5a93SLeon Romanovsky static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
380bf3c5a93SLeon Romanovsky {
381bf3c5a93SLeon Romanovsky 	static const char * const names[RDMA_RESTRACK_MAX] = {
382bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_PD] = "pd",
383bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_CQ] = "cq",
384bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_QP] = "qp",
38500313983SSteve Wise 		[RDMA_RESTRACK_CM_ID] = "cm_id",
386fccec5b8SSteve Wise 		[RDMA_RESTRACK_MR] = "mr",
387ffd321e4SLeon Romanovsky 		[RDMA_RESTRACK_CTX] = "ctx",
388bf3c5a93SLeon Romanovsky 	};
389bf3c5a93SLeon Romanovsky 
390bf3c5a93SLeon Romanovsky 	struct nlattr *table_attr;
391bf3c5a93SLeon Romanovsky 	int ret, i, curr;
392bf3c5a93SLeon Romanovsky 
393bf3c5a93SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
394bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
395bf3c5a93SLeon Romanovsky 
396ae0be8deSMichal Kubecek 	table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
397bf3c5a93SLeon Romanovsky 	if (!table_attr)
398bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
399bf3c5a93SLeon Romanovsky 
400bf3c5a93SLeon Romanovsky 	for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
401bf3c5a93SLeon Romanovsky 		if (!names[i])
402bf3c5a93SLeon Romanovsky 			continue;
40360c78668SLeon Romanovsky 		curr = rdma_restrack_count(device, i);
404bf3c5a93SLeon Romanovsky 		ret = fill_res_info_entry(msg, names[i], curr);
405bf3c5a93SLeon Romanovsky 		if (ret)
406bf3c5a93SLeon Romanovsky 			goto err;
407bf3c5a93SLeon Romanovsky 	}
408bf3c5a93SLeon Romanovsky 
409bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, table_attr);
410bf3c5a93SLeon Romanovsky 	return 0;
411bf3c5a93SLeon Romanovsky 
412bf3c5a93SLeon Romanovsky err:
413bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, table_attr);
414bf3c5a93SLeon Romanovsky 	return ret;
415bf3c5a93SLeon Romanovsky }
416bf3c5a93SLeon Romanovsky 
41700313983SSteve Wise static int fill_res_name_pid(struct sk_buff *msg,
41800313983SSteve Wise 			     struct rdma_restrack_entry *res)
41900313983SSteve Wise {
420ac71ffcfSLeon Romanovsky 	int err = 0;
421ac71ffcfSLeon Romanovsky 
42200313983SSteve Wise 	/*
42300313983SSteve Wise 	 * For user resources, user is should read /proc/PID/comm to get the
42400313983SSteve Wise 	 * name of the task file.
42500313983SSteve Wise 	 */
42600313983SSteve Wise 	if (rdma_is_kernel_res(res)) {
427ac71ffcfSLeon Romanovsky 		err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
428ac71ffcfSLeon Romanovsky 				     res->kern_name);
42900313983SSteve Wise 	} else {
430ac71ffcfSLeon Romanovsky 		pid_t pid;
431ac71ffcfSLeon Romanovsky 
432ac71ffcfSLeon Romanovsky 		pid = task_pid_vnr(res->task);
433ac71ffcfSLeon Romanovsky 		/*
434ac71ffcfSLeon Romanovsky 		 * Task is dead and in zombie state.
435ac71ffcfSLeon Romanovsky 		 * There is no need to print PID anymore.
436ac71ffcfSLeon Romanovsky 		 */
437ac71ffcfSLeon Romanovsky 		if (pid)
438ac71ffcfSLeon Romanovsky 			/*
439ac71ffcfSLeon Romanovsky 			 * This part is racy, task can be killed and PID will
440ac71ffcfSLeon Romanovsky 			 * be zero right here but it is ok, next query won't
441ac71ffcfSLeon Romanovsky 			 * return PID. We don't promise real-time reflection
442ac71ffcfSLeon Romanovsky 			 * of SW objects.
443ac71ffcfSLeon Romanovsky 			 */
444ac71ffcfSLeon Romanovsky 			err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid);
44500313983SSteve Wise 	}
446ac71ffcfSLeon Romanovsky 
447ac71ffcfSLeon Romanovsky 	return err ? -EMSGSIZE : 0;
44800313983SSteve Wise }
44900313983SSteve Wise 
45065959522SMaor Gottlieb static int fill_res_qp_entry_query(struct sk_buff *msg,
45165959522SMaor Gottlieb 				   struct rdma_restrack_entry *res,
45265959522SMaor Gottlieb 				   struct ib_device *dev,
45365959522SMaor Gottlieb 				   struct ib_qp *qp)
454b5fa635aSLeon Romanovsky {
455b5fa635aSLeon Romanovsky 	struct ib_qp_init_attr qp_init_attr;
456b5fa635aSLeon Romanovsky 	struct ib_qp_attr qp_attr;
457b5fa635aSLeon Romanovsky 	int ret;
458b5fa635aSLeon Romanovsky 
459b5fa635aSLeon Romanovsky 	ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
460b5fa635aSLeon Romanovsky 	if (ret)
461b5fa635aSLeon Romanovsky 		return ret;
462b5fa635aSLeon Romanovsky 
463b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
464b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
465b5fa635aSLeon Romanovsky 				qp_attr.dest_qp_num))
466b5fa635aSLeon Romanovsky 			goto err;
467b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
468b5fa635aSLeon Romanovsky 				qp_attr.rq_psn))
469b5fa635aSLeon Romanovsky 			goto err;
470b5fa635aSLeon Romanovsky 	}
471b5fa635aSLeon Romanovsky 
472b5fa635aSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
473b5fa635aSLeon Romanovsky 		goto err;
474b5fa635aSLeon Romanovsky 
475b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
476b5fa635aSLeon Romanovsky 	    qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
477b5fa635aSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
478b5fa635aSLeon Romanovsky 			       qp_attr.path_mig_state))
479b5fa635aSLeon Romanovsky 			goto err;
480b5fa635aSLeon Romanovsky 	}
481b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
482b5fa635aSLeon Romanovsky 		goto err;
483b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
484b5fa635aSLeon Romanovsky 		goto err;
485b5fa635aSLeon Romanovsky 
4865cc34116SMaor Gottlieb 	if (dev->ops.fill_res_qp_entry)
4875cc34116SMaor Gottlieb 		return dev->ops.fill_res_qp_entry(msg, qp);
48800313983SSteve Wise 	return 0;
48900313983SSteve Wise 
490c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
49100313983SSteve Wise }
49200313983SSteve Wise 
49365959522SMaor Gottlieb static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
49465959522SMaor Gottlieb 			     struct rdma_restrack_entry *res, uint32_t port)
49565959522SMaor Gottlieb {
49665959522SMaor Gottlieb 	struct ib_qp *qp = container_of(res, struct ib_qp, res);
49765959522SMaor Gottlieb 	struct ib_device *dev = qp->device;
49865959522SMaor Gottlieb 	int ret;
49965959522SMaor Gottlieb 
50065959522SMaor Gottlieb 	if (port && port != qp->port)
50165959522SMaor Gottlieb 		return -EAGAIN;
50265959522SMaor Gottlieb 
50365959522SMaor Gottlieb 	/* In create_qp() port is not set yet */
50465959522SMaor Gottlieb 	if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port))
50565959522SMaor Gottlieb 		return -EINVAL;
50665959522SMaor Gottlieb 
50765959522SMaor Gottlieb 	ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num);
50865959522SMaor Gottlieb 	if (ret)
50965959522SMaor Gottlieb 		return -EMSGSIZE;
51065959522SMaor Gottlieb 
51165959522SMaor Gottlieb 	if (!rdma_is_kernel_res(res) &&
51265959522SMaor Gottlieb 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
51365959522SMaor Gottlieb 		return -EMSGSIZE;
51465959522SMaor Gottlieb 
51565959522SMaor Gottlieb 	ret = fill_res_name_pid(msg, res);
51665959522SMaor Gottlieb 	if (ret)
51765959522SMaor Gottlieb 		return -EMSGSIZE;
51865959522SMaor Gottlieb 
51965959522SMaor Gottlieb 	return fill_res_qp_entry_query(msg, res, dev, qp);
52065959522SMaor Gottlieb }
52165959522SMaor Gottlieb 
52265959522SMaor Gottlieb static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
52365959522SMaor Gottlieb 				 struct rdma_restrack_entry *res, uint32_t port)
52465959522SMaor Gottlieb {
52565959522SMaor Gottlieb 	struct ib_qp *qp = container_of(res, struct ib_qp, res);
52665959522SMaor Gottlieb 	struct ib_device *dev = qp->device;
52765959522SMaor Gottlieb 
52865959522SMaor Gottlieb 	if (port && port != qp->port)
52965959522SMaor Gottlieb 		return -EAGAIN;
53065959522SMaor Gottlieb 	if (!dev->ops.fill_res_qp_entry_raw)
53165959522SMaor Gottlieb 		return -EINVAL;
53265959522SMaor Gottlieb 	return dev->ops.fill_res_qp_entry_raw(msg, qp);
53365959522SMaor Gottlieb }
53465959522SMaor Gottlieb 
535659067b0SLeon Romanovsky static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
53600313983SSteve Wise 				struct rdma_restrack_entry *res, uint32_t port)
53700313983SSteve Wise {
53800313983SSteve Wise 	struct rdma_id_private *id_priv =
53900313983SSteve Wise 				container_of(res, struct rdma_id_private, res);
54002da3750SLeon Romanovsky 	struct ib_device *dev = id_priv->id.device;
54100313983SSteve Wise 	struct rdma_cm_id *cm_id = &id_priv->id;
54200313983SSteve Wise 
54300313983SSteve Wise 	if (port && port != cm_id->port_num)
54400313983SSteve Wise 		return 0;
54500313983SSteve Wise 
54600313983SSteve Wise 	if (cm_id->port_num &&
54700313983SSteve Wise 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
54800313983SSteve Wise 		goto err;
54900313983SSteve Wise 
55000313983SSteve Wise 	if (id_priv->qp_num) {
55100313983SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
55200313983SSteve Wise 			goto err;
55300313983SSteve Wise 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
554b5fa635aSLeon Romanovsky 			goto err;
555b5fa635aSLeon Romanovsky 	}
556b5fa635aSLeon Romanovsky 
55700313983SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
55800313983SSteve Wise 		goto err;
55900313983SSteve Wise 
56000313983SSteve Wise 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
56100313983SSteve Wise 		goto err;
56200313983SSteve Wise 
56300313983SSteve Wise 	if (cm_id->route.addr.src_addr.ss_family &&
56400313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
56500313983SSteve Wise 		    sizeof(cm_id->route.addr.src_addr),
56600313983SSteve Wise 		    &cm_id->route.addr.src_addr))
56700313983SSteve Wise 		goto err;
56800313983SSteve Wise 	if (cm_id->route.addr.dst_addr.ss_family &&
56900313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
57000313983SSteve Wise 		    sizeof(cm_id->route.addr.dst_addr),
57100313983SSteve Wise 		    &cm_id->route.addr.dst_addr))
57200313983SSteve Wise 		goto err;
57300313983SSteve Wise 
574517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
575517b773eSLeon Romanovsky 		goto err;
576517b773eSLeon Romanovsky 
57700313983SSteve Wise 	if (fill_res_name_pid(msg, res))
57800313983SSteve Wise 		goto err;
57900313983SSteve Wise 
580211cd945SMaor Gottlieb 	if (dev->ops.fill_res_cm_id_entry)
581211cd945SMaor Gottlieb 		return dev->ops.fill_res_cm_id_entry(msg, cm_id);
582b5fa635aSLeon Romanovsky 	return 0;
583b5fa635aSLeon Romanovsky 
584c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE;
585b5fa635aSLeon Romanovsky }
586b5fa635aSLeon Romanovsky 
587659067b0SLeon Romanovsky static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
588a34fc089SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
589a34fc089SSteve Wise {
590a34fc089SSteve Wise 	struct ib_cq *cq = container_of(res, struct ib_cq, res);
59102da3750SLeon Romanovsky 	struct ib_device *dev = cq->device;
592a34fc089SSteve Wise 
593a34fc089SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
59465959522SMaor Gottlieb 		return -EMSGSIZE;
595a34fc089SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
59625a0ad85SSteve Wise 			      atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
59765959522SMaor Gottlieb 		return -EMSGSIZE;
598a34fc089SSteve Wise 
599a34fc089SSteve Wise 	/* Poll context is only valid for kernel CQs */
600a34fc089SSteve Wise 	if (rdma_is_kernel_res(res) &&
601a34fc089SSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
60265959522SMaor Gottlieb 		return -EMSGSIZE;
603a34fc089SSteve Wise 
604f8fc8cd9SYamin Friedman 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL)))
60565959522SMaor Gottlieb 		return -EMSGSIZE;
606f8fc8cd9SYamin Friedman 
607517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
60865959522SMaor Gottlieb 		return -EMSGSIZE;
609c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
610c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
6115bd48c18SJason Gunthorpe 			cq->uobject->uevent.uobject.context->res.id))
61265959522SMaor Gottlieb 		return -EMSGSIZE;
613517b773eSLeon Romanovsky 
614a34fc089SSteve Wise 	if (fill_res_name_pid(msg, res))
61565959522SMaor Gottlieb 		return -EMSGSIZE;
616a34fc089SSteve Wise 
61765959522SMaor Gottlieb 	return (dev->ops.fill_res_cq_entry) ?
61865959522SMaor Gottlieb 		dev->ops.fill_res_cq_entry(msg, cq) : 0;
61965959522SMaor Gottlieb }
620a34fc089SSteve Wise 
62165959522SMaor Gottlieb static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
62265959522SMaor Gottlieb 				 struct rdma_restrack_entry *res, uint32_t port)
62365959522SMaor Gottlieb {
62465959522SMaor Gottlieb 	struct ib_cq *cq = container_of(res, struct ib_cq, res);
62565959522SMaor Gottlieb 	struct ib_device *dev = cq->device;
62665959522SMaor Gottlieb 
62765959522SMaor Gottlieb 	if (!dev->ops.fill_res_cq_entry_raw)
62865959522SMaor Gottlieb 		return -EINVAL;
62965959522SMaor Gottlieb 	return dev->ops.fill_res_cq_entry_raw(msg, cq);
630a34fc089SSteve Wise }
631a34fc089SSteve Wise 
632659067b0SLeon Romanovsky static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
633fccec5b8SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
634fccec5b8SSteve Wise {
635fccec5b8SSteve Wise 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
63602da3750SLeon Romanovsky 	struct ib_device *dev = mr->pd->device;
637fccec5b8SSteve Wise 
638659067b0SLeon Romanovsky 	if (has_cap_net_admin) {
639fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
64065959522SMaor Gottlieb 			return -EMSGSIZE;
641fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
64265959522SMaor Gottlieb 			return -EMSGSIZE;
643fccec5b8SSteve Wise 	}
644fccec5b8SSteve Wise 
64525a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
64625a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
64765959522SMaor Gottlieb 		return -EMSGSIZE;
648fccec5b8SSteve Wise 
649517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
65065959522SMaor Gottlieb 		return -EMSGSIZE;
651517b773eSLeon Romanovsky 
652c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
653c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
65465959522SMaor Gottlieb 		return -EMSGSIZE;
655c3d02788SLeon Romanovsky 
656fccec5b8SSteve Wise 	if (fill_res_name_pid(msg, res))
65765959522SMaor Gottlieb 		return -EMSGSIZE;
658fccec5b8SSteve Wise 
65965959522SMaor Gottlieb 	return (dev->ops.fill_res_mr_entry) ?
66065959522SMaor Gottlieb 		       dev->ops.fill_res_mr_entry(msg, mr) :
66165959522SMaor Gottlieb 		       0;
66265959522SMaor Gottlieb }
663fccec5b8SSteve Wise 
66465959522SMaor Gottlieb static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
66565959522SMaor Gottlieb 				 struct rdma_restrack_entry *res, uint32_t port)
66665959522SMaor Gottlieb {
66765959522SMaor Gottlieb 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
66865959522SMaor Gottlieb 	struct ib_device *dev = mr->pd->device;
66965959522SMaor Gottlieb 
67065959522SMaor Gottlieb 	if (!dev->ops.fill_res_mr_entry_raw)
67165959522SMaor Gottlieb 		return -EINVAL;
67265959522SMaor Gottlieb 	return dev->ops.fill_res_mr_entry_raw(msg, mr);
673fccec5b8SSteve Wise }
674fccec5b8SSteve Wise 
675659067b0SLeon Romanovsky static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
67629cf1351SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
67729cf1351SSteve Wise {
67829cf1351SSteve Wise 	struct ib_pd *pd = container_of(res, struct ib_pd, res);
67929cf1351SSteve Wise 
680659067b0SLeon Romanovsky 	if (has_cap_net_admin) {
68129cf1351SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
68229cf1351SSteve Wise 				pd->local_dma_lkey))
68329cf1351SSteve Wise 			goto err;
68429cf1351SSteve Wise 		if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
68529cf1351SSteve Wise 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
68629cf1351SSteve Wise 				pd->unsafe_global_rkey))
68729cf1351SSteve Wise 			goto err;
68829cf1351SSteve Wise 	}
68929cf1351SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
69025a0ad85SSteve Wise 			      atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
69129cf1351SSteve Wise 		goto err;
69229cf1351SSteve Wise 
693517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
694517b773eSLeon Romanovsky 		goto err;
695517b773eSLeon Romanovsky 
696c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
697c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
698c3d02788SLeon Romanovsky 			pd->uobject->context->res.id))
699c3d02788SLeon Romanovsky 		goto err;
700c3d02788SLeon Romanovsky 
70124fd6d6fSMaor Gottlieb 	return fill_res_name_pid(msg, res);
70229cf1351SSteve Wise 
703c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
70429cf1351SSteve Wise }
70529cf1351SSteve Wise 
706c4ffee7cSMark Zhang static int fill_stat_counter_mode(struct sk_buff *msg,
707c4ffee7cSMark Zhang 				  struct rdma_counter *counter)
708c4ffee7cSMark Zhang {
709c4ffee7cSMark Zhang 	struct rdma_counter_mode *m = &counter->mode;
710c4ffee7cSMark Zhang 
711c4ffee7cSMark Zhang 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode))
712c4ffee7cSMark Zhang 		return -EMSGSIZE;
713c4ffee7cSMark Zhang 
7147c97f3adSMark Zhang 	if (m->mode == RDMA_COUNTER_MODE_AUTO) {
715c4ffee7cSMark Zhang 		if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) &&
716c4ffee7cSMark Zhang 		    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type))
717c4ffee7cSMark Zhang 			return -EMSGSIZE;
718c4ffee7cSMark Zhang 
7197c97f3adSMark Zhang 		if ((m->mask & RDMA_COUNTER_MASK_PID) &&
7207c97f3adSMark Zhang 		    fill_res_name_pid(msg, &counter->res))
7217c97f3adSMark Zhang 			return -EMSGSIZE;
7227c97f3adSMark Zhang 	}
7237c97f3adSMark Zhang 
724c4ffee7cSMark Zhang 	return 0;
725c4ffee7cSMark Zhang }
726c4ffee7cSMark Zhang 
727c4ffee7cSMark Zhang static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn)
728c4ffee7cSMark Zhang {
729c4ffee7cSMark Zhang 	struct nlattr *entry_attr;
730c4ffee7cSMark Zhang 
731c4ffee7cSMark Zhang 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
732c4ffee7cSMark Zhang 	if (!entry_attr)
733c4ffee7cSMark Zhang 		return -EMSGSIZE;
734c4ffee7cSMark Zhang 
735c4ffee7cSMark Zhang 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn))
736c4ffee7cSMark Zhang 		goto err;
737c4ffee7cSMark Zhang 
738c4ffee7cSMark Zhang 	nla_nest_end(msg, entry_attr);
739c4ffee7cSMark Zhang 	return 0;
740c4ffee7cSMark Zhang 
741c4ffee7cSMark Zhang err:
742c4ffee7cSMark Zhang 	nla_nest_cancel(msg, entry_attr);
743c4ffee7cSMark Zhang 	return -EMSGSIZE;
744c4ffee7cSMark Zhang }
745c4ffee7cSMark Zhang 
746c4ffee7cSMark Zhang static int fill_stat_counter_qps(struct sk_buff *msg,
747c4ffee7cSMark Zhang 				 struct rdma_counter *counter)
748c4ffee7cSMark Zhang {
749c4ffee7cSMark Zhang 	struct rdma_restrack_entry *res;
750c4ffee7cSMark Zhang 	struct rdma_restrack_root *rt;
751c4ffee7cSMark Zhang 	struct nlattr *table_attr;
752c4ffee7cSMark Zhang 	struct ib_qp *qp = NULL;
753c4ffee7cSMark Zhang 	unsigned long id = 0;
754c4ffee7cSMark Zhang 	int ret = 0;
755c4ffee7cSMark Zhang 
756c4ffee7cSMark Zhang 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
757c4ffee7cSMark Zhang 
758c4ffee7cSMark Zhang 	rt = &counter->device->res[RDMA_RESTRACK_QP];
759c4ffee7cSMark Zhang 	xa_lock(&rt->xa);
760c4ffee7cSMark Zhang 	xa_for_each(&rt->xa, id, res) {
761c4ffee7cSMark Zhang 		qp = container_of(res, struct ib_qp, res);
762c4ffee7cSMark Zhang 		if (!qp->counter || (qp->counter->id != counter->id))
763c4ffee7cSMark Zhang 			continue;
764c4ffee7cSMark Zhang 
765c4ffee7cSMark Zhang 		ret = fill_stat_counter_qp_entry(msg, qp->qp_num);
766c4ffee7cSMark Zhang 		if (ret)
767c4ffee7cSMark Zhang 			goto err;
768c4ffee7cSMark Zhang 	}
769c4ffee7cSMark Zhang 
770c4ffee7cSMark Zhang 	xa_unlock(&rt->xa);
771c4ffee7cSMark Zhang 	nla_nest_end(msg, table_attr);
772c4ffee7cSMark Zhang 	return 0;
773c4ffee7cSMark Zhang 
774c4ffee7cSMark Zhang err:
775c4ffee7cSMark Zhang 	xa_unlock(&rt->xa);
776c4ffee7cSMark Zhang 	nla_nest_cancel(msg, table_attr);
777c4ffee7cSMark Zhang 	return ret;
778c4ffee7cSMark Zhang }
779c4ffee7cSMark Zhang 
7804061ff7aSErez Alfasi int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
7814061ff7aSErez Alfasi 				 u64 value)
782c4ffee7cSMark Zhang {
783c4ffee7cSMark Zhang 	struct nlattr *entry_attr;
784c4ffee7cSMark Zhang 
785c4ffee7cSMark Zhang 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
786c4ffee7cSMark Zhang 	if (!entry_attr)
787c4ffee7cSMark Zhang 		return -EMSGSIZE;
788c4ffee7cSMark Zhang 
789c4ffee7cSMark Zhang 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
790c4ffee7cSMark Zhang 			   name))
791c4ffee7cSMark Zhang 		goto err;
792c4ffee7cSMark Zhang 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE,
793c4ffee7cSMark Zhang 			      value, RDMA_NLDEV_ATTR_PAD))
794c4ffee7cSMark Zhang 		goto err;
795c4ffee7cSMark Zhang 
796c4ffee7cSMark Zhang 	nla_nest_end(msg, entry_attr);
797c4ffee7cSMark Zhang 	return 0;
798c4ffee7cSMark Zhang 
799c4ffee7cSMark Zhang err:
800c4ffee7cSMark Zhang 	nla_nest_cancel(msg, entry_attr);
801c4ffee7cSMark Zhang 	return -EMSGSIZE;
802c4ffee7cSMark Zhang }
8034061ff7aSErez Alfasi EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry);
8044061ff7aSErez Alfasi 
8054061ff7aSErez Alfasi static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
8064061ff7aSErez Alfasi 			      struct rdma_restrack_entry *res, uint32_t port)
8074061ff7aSErez Alfasi {
8084061ff7aSErez Alfasi 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
8094061ff7aSErez Alfasi 	struct ib_device *dev = mr->pd->device;
8104061ff7aSErez Alfasi 
8114061ff7aSErez Alfasi 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
8124061ff7aSErez Alfasi 		goto err;
8134061ff7aSErez Alfasi 
814f4434529SMaor Gottlieb 	if (dev->ops.fill_stat_mr_entry)
815f4434529SMaor Gottlieb 		return dev->ops.fill_stat_mr_entry(msg, mr);
8164061ff7aSErez Alfasi 	return 0;
8174061ff7aSErez Alfasi 
8184061ff7aSErez Alfasi err:
8194061ff7aSErez Alfasi 	return -EMSGSIZE;
8204061ff7aSErez Alfasi }
821c4ffee7cSMark Zhang 
822c4ffee7cSMark Zhang static int fill_stat_counter_hwcounters(struct sk_buff *msg,
823c4ffee7cSMark Zhang 					struct rdma_counter *counter)
824c4ffee7cSMark Zhang {
825c4ffee7cSMark Zhang 	struct rdma_hw_stats *st = counter->stats;
826c4ffee7cSMark Zhang 	struct nlattr *table_attr;
827c4ffee7cSMark Zhang 	int i;
828c4ffee7cSMark Zhang 
829c4ffee7cSMark Zhang 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
830c4ffee7cSMark Zhang 	if (!table_attr)
831c4ffee7cSMark Zhang 		return -EMSGSIZE;
832c4ffee7cSMark Zhang 
833c4ffee7cSMark Zhang 	for (i = 0; i < st->num_counters; i++)
8344061ff7aSErez Alfasi 		if (rdma_nl_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
835c4ffee7cSMark Zhang 			goto err;
836c4ffee7cSMark Zhang 
837c4ffee7cSMark Zhang 	nla_nest_end(msg, table_attr);
838c4ffee7cSMark Zhang 	return 0;
839c4ffee7cSMark Zhang 
840c4ffee7cSMark Zhang err:
841c4ffee7cSMark Zhang 	nla_nest_cancel(msg, table_attr);
842c4ffee7cSMark Zhang 	return -EMSGSIZE;
843c4ffee7cSMark Zhang }
844c4ffee7cSMark Zhang 
845c4ffee7cSMark Zhang static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
846c4ffee7cSMark Zhang 				  struct rdma_restrack_entry *res,
847c4ffee7cSMark Zhang 				  uint32_t port)
848c4ffee7cSMark Zhang {
849c4ffee7cSMark Zhang 	struct rdma_counter *counter =
850c4ffee7cSMark Zhang 		container_of(res, struct rdma_counter, res);
851c4ffee7cSMark Zhang 
852c4ffee7cSMark Zhang 	if (port && port != counter->port)
853a15542bbSMark Zhang 		return -EAGAIN;
854c4ffee7cSMark Zhang 
855c4ffee7cSMark Zhang 	/* Dump it even query failed */
856c4ffee7cSMark Zhang 	rdma_counter_query_stats(counter);
857c4ffee7cSMark Zhang 
858c4ffee7cSMark Zhang 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) ||
859c4ffee7cSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) ||
860c4ffee7cSMark Zhang 	    fill_stat_counter_mode(msg, counter) ||
861c4ffee7cSMark Zhang 	    fill_stat_counter_qps(msg, counter) ||
862c4ffee7cSMark Zhang 	    fill_stat_counter_hwcounters(msg, counter))
863c4ffee7cSMark Zhang 		return -EMSGSIZE;
864c4ffee7cSMark Zhang 
865c4ffee7cSMark Zhang 	return 0;
866c4ffee7cSMark Zhang }
867c4ffee7cSMark Zhang 
868e5c9469eSLeon Romanovsky static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
869e5c9469eSLeon Romanovsky 			  struct netlink_ext_ack *extack)
870e5c9469eSLeon Romanovsky {
871e5c9469eSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
872e5c9469eSLeon Romanovsky 	struct ib_device *device;
873e5c9469eSLeon Romanovsky 	struct sk_buff *msg;
874e5c9469eSLeon Romanovsky 	u32 index;
875e5c9469eSLeon Romanovsky 	int err;
876e5c9469eSLeon Romanovsky 
8778cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
878e5c9469eSLeon Romanovsky 				     nldev_policy, extack);
879e5c9469eSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
880e5c9469eSLeon Romanovsky 		return -EINVAL;
881e5c9469eSLeon Romanovsky 
882e5c9469eSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
883e5c9469eSLeon Romanovsky 
88437eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
885e5c9469eSLeon Romanovsky 	if (!device)
886e5c9469eSLeon Romanovsky 		return -EINVAL;
887e5c9469eSLeon Romanovsky 
888e5c9469eSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
889f8978bd9SLeon Romanovsky 	if (!msg) {
890f8978bd9SLeon Romanovsky 		err = -ENOMEM;
891f8978bd9SLeon Romanovsky 		goto err;
892f8978bd9SLeon Romanovsky 	}
893e5c9469eSLeon Romanovsky 
894e5c9469eSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
895e5c9469eSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
896e5c9469eSLeon Romanovsky 			0, 0);
897e5c9469eSLeon Romanovsky 
898e5c9469eSLeon Romanovsky 	err = fill_dev_info(msg, device);
899f8978bd9SLeon Romanovsky 	if (err)
900f8978bd9SLeon Romanovsky 		goto err_free;
901e5c9469eSLeon Romanovsky 
902e5c9469eSLeon Romanovsky 	nlmsg_end(msg, nlh);
903e5c9469eSLeon Romanovsky 
90401b67117SParav Pandit 	ib_device_put(device);
9051d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
906f8978bd9SLeon Romanovsky 
907f8978bd9SLeon Romanovsky err_free:
908f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
909f8978bd9SLeon Romanovsky err:
91001b67117SParav Pandit 	ib_device_put(device);
911f8978bd9SLeon Romanovsky 	return err;
912e5c9469eSLeon Romanovsky }
913e5c9469eSLeon Romanovsky 
91405d940d3SLeon Romanovsky static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
91505d940d3SLeon Romanovsky 			  struct netlink_ext_ack *extack)
91605d940d3SLeon Romanovsky {
91705d940d3SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
91805d940d3SLeon Romanovsky 	struct ib_device *device;
91905d940d3SLeon Romanovsky 	u32 index;
92005d940d3SLeon Romanovsky 	int err;
92105d940d3SLeon Romanovsky 
9228cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
9238cb08174SJohannes Berg 				     nldev_policy, extack);
92405d940d3SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
92505d940d3SLeon Romanovsky 		return -EINVAL;
92605d940d3SLeon Romanovsky 
92705d940d3SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
92837eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
92905d940d3SLeon Romanovsky 	if (!device)
93005d940d3SLeon Romanovsky 		return -EINVAL;
93105d940d3SLeon Romanovsky 
93205d940d3SLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
93305d940d3SLeon Romanovsky 		char name[IB_DEVICE_NAME_MAX] = {};
93405d940d3SLeon Romanovsky 
935872f6903SFrancis Laniel 		nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
93605d940d3SLeon Romanovsky 			    IB_DEVICE_NAME_MAX);
9377aefa623SJason Gunthorpe 		if (strlen(name) == 0) {
9387aefa623SJason Gunthorpe 			err = -EINVAL;
9397aefa623SJason Gunthorpe 			goto done;
9407aefa623SJason Gunthorpe 		}
94105d940d3SLeon Romanovsky 		err = ib_device_rename(device, name);
9422e5b8a01SParav Pandit 		goto done;
94305d940d3SLeon Romanovsky 	}
94405d940d3SLeon Romanovsky 
9452e5b8a01SParav Pandit 	if (tb[RDMA_NLDEV_NET_NS_FD]) {
9462e5b8a01SParav Pandit 		u32 ns_fd;
9472e5b8a01SParav Pandit 
9482e5b8a01SParav Pandit 		ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
9492e5b8a01SParav Pandit 		err = ib_device_set_netns_put(skb, device, ns_fd);
9502e5b8a01SParav Pandit 		goto put_done;
9512e5b8a01SParav Pandit 	}
9522e5b8a01SParav Pandit 
953f8fc8cd9SYamin Friedman 	if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) {
954f8fc8cd9SYamin Friedman 		u8 use_dim;
955f8fc8cd9SYamin Friedman 
956f8fc8cd9SYamin Friedman 		use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]);
957f8fc8cd9SYamin Friedman 		err = ib_device_set_dim(device,  use_dim);
958f8fc8cd9SYamin Friedman 		goto done;
959f8fc8cd9SYamin Friedman 	}
960f8fc8cd9SYamin Friedman 
9612e5b8a01SParav Pandit done:
96201b67117SParav Pandit 	ib_device_put(device);
9632e5b8a01SParav Pandit put_done:
96405d940d3SLeon Romanovsky 	return err;
96505d940d3SLeon Romanovsky }
96605d940d3SLeon Romanovsky 
967b4c598a6SLeon Romanovsky static int _nldev_get_dumpit(struct ib_device *device,
968b4c598a6SLeon Romanovsky 			     struct sk_buff *skb,
969b4c598a6SLeon Romanovsky 			     struct netlink_callback *cb,
970b4c598a6SLeon Romanovsky 			     unsigned int idx)
971b4c598a6SLeon Romanovsky {
972b4c598a6SLeon Romanovsky 	int start = cb->args[0];
973b4c598a6SLeon Romanovsky 	struct nlmsghdr *nlh;
974b4c598a6SLeon Romanovsky 
975b4c598a6SLeon Romanovsky 	if (idx < start)
976b4c598a6SLeon Romanovsky 		return 0;
977b4c598a6SLeon Romanovsky 
978b4c598a6SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
979b4c598a6SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
980b4c598a6SLeon Romanovsky 			0, NLM_F_MULTI);
981b4c598a6SLeon Romanovsky 
982b4c598a6SLeon Romanovsky 	if (fill_dev_info(skb, device)) {
983b4c598a6SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
984b4c598a6SLeon Romanovsky 		goto out;
985b4c598a6SLeon Romanovsky 	}
986b4c598a6SLeon Romanovsky 
987b4c598a6SLeon Romanovsky 	nlmsg_end(skb, nlh);
988b4c598a6SLeon Romanovsky 
989b4c598a6SLeon Romanovsky 	idx++;
990b4c598a6SLeon Romanovsky 
991b4c598a6SLeon Romanovsky out:	cb->args[0] = idx;
992b4c598a6SLeon Romanovsky 	return skb->len;
993b4c598a6SLeon Romanovsky }
994b4c598a6SLeon Romanovsky 
995b4c598a6SLeon Romanovsky static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
996b4c598a6SLeon Romanovsky {
997b4c598a6SLeon Romanovsky 	/*
998b4c598a6SLeon Romanovsky 	 * There is no need to take lock, because
99937eeab55SParav Pandit 	 * we are relying on ib_core's locking.
1000b4c598a6SLeon Romanovsky 	 */
1001b4c598a6SLeon Romanovsky 	return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
1002b4c598a6SLeon Romanovsky }
1003b4c598a6SLeon Romanovsky 
1004c3f66f7bSLeon Romanovsky static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1005c3f66f7bSLeon Romanovsky 			       struct netlink_ext_ack *extack)
1006c3f66f7bSLeon Romanovsky {
1007c3f66f7bSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1008c3f66f7bSLeon Romanovsky 	struct ib_device *device;
1009c3f66f7bSLeon Romanovsky 	struct sk_buff *msg;
1010c3f66f7bSLeon Romanovsky 	u32 index;
1011c3f66f7bSLeon Romanovsky 	u32 port;
1012c3f66f7bSLeon Romanovsky 	int err;
1013c3f66f7bSLeon Romanovsky 
10148cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1015c3f66f7bSLeon Romanovsky 				     nldev_policy, extack);
1016287683d0SLeon Romanovsky 	if (err ||
1017287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1018287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
1019c3f66f7bSLeon Romanovsky 		return -EINVAL;
1020c3f66f7bSLeon Romanovsky 
1021c3f66f7bSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
102237eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1023c3f66f7bSLeon Romanovsky 	if (!device)
1024c3f66f7bSLeon Romanovsky 		return -EINVAL;
1025c3f66f7bSLeon Romanovsky 
1026c3f66f7bSLeon Romanovsky 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1027f8978bd9SLeon Romanovsky 	if (!rdma_is_port_valid(device, port)) {
1028f8978bd9SLeon Romanovsky 		err = -EINVAL;
1029f8978bd9SLeon Romanovsky 		goto err;
1030f8978bd9SLeon Romanovsky 	}
1031c3f66f7bSLeon Romanovsky 
1032c3f66f7bSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1033f8978bd9SLeon Romanovsky 	if (!msg) {
1034f8978bd9SLeon Romanovsky 		err = -ENOMEM;
1035f8978bd9SLeon Romanovsky 		goto err;
1036f8978bd9SLeon Romanovsky 	}
1037c3f66f7bSLeon Romanovsky 
1038c3f66f7bSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1039c3f66f7bSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1040c3f66f7bSLeon Romanovsky 			0, 0);
1041c3f66f7bSLeon Romanovsky 
10425b2cc79dSLeon Romanovsky 	err = fill_port_info(msg, device, port, sock_net(skb->sk));
1043f8978bd9SLeon Romanovsky 	if (err)
1044f8978bd9SLeon Romanovsky 		goto err_free;
1045c3f66f7bSLeon Romanovsky 
1046c3f66f7bSLeon Romanovsky 	nlmsg_end(msg, nlh);
104701b67117SParav Pandit 	ib_device_put(device);
1048c3f66f7bSLeon Romanovsky 
10491d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1050f8978bd9SLeon Romanovsky 
1051f8978bd9SLeon Romanovsky err_free:
1052f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
1053f8978bd9SLeon Romanovsky err:
105401b67117SParav Pandit 	ib_device_put(device);
1055f8978bd9SLeon Romanovsky 	return err;
1056c3f66f7bSLeon Romanovsky }
1057c3f66f7bSLeon Romanovsky 
10587d02f605SLeon Romanovsky static int nldev_port_get_dumpit(struct sk_buff *skb,
10597d02f605SLeon Romanovsky 				 struct netlink_callback *cb)
10607d02f605SLeon Romanovsky {
10617d02f605SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
10627d02f605SLeon Romanovsky 	struct ib_device *device;
10637d02f605SLeon Romanovsky 	int start = cb->args[0];
10647d02f605SLeon Romanovsky 	struct nlmsghdr *nlh;
10657d02f605SLeon Romanovsky 	u32 idx = 0;
10667d02f605SLeon Romanovsky 	u32 ifindex;
10677d02f605SLeon Romanovsky 	int err;
1068ea1075edSJason Gunthorpe 	unsigned int p;
10697d02f605SLeon Romanovsky 
10708cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
10717d02f605SLeon Romanovsky 				     nldev_policy, NULL);
10727d02f605SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
10737d02f605SLeon Romanovsky 		return -EINVAL;
10747d02f605SLeon Romanovsky 
10757d02f605SLeon Romanovsky 	ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
107637eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
10777d02f605SLeon Romanovsky 	if (!device)
10787d02f605SLeon Romanovsky 		return -EINVAL;
10797d02f605SLeon Romanovsky 
1080ea1075edSJason Gunthorpe 	rdma_for_each_port (device, p) {
10817d02f605SLeon Romanovsky 		/*
10827d02f605SLeon Romanovsky 		 * The dumpit function returns all information from specific
10837d02f605SLeon Romanovsky 		 * index. This specific index is taken from the netlink
10847d02f605SLeon Romanovsky 		 * messages request sent by user and it is available
10857d02f605SLeon Romanovsky 		 * in cb->args[0].
10867d02f605SLeon Romanovsky 		 *
10877d02f605SLeon Romanovsky 		 * Usually, the user doesn't fill this field and it causes
10887d02f605SLeon Romanovsky 		 * to return everything.
10897d02f605SLeon Romanovsky 		 *
10907d02f605SLeon Romanovsky 		 */
10917d02f605SLeon Romanovsky 		if (idx < start) {
10927d02f605SLeon Romanovsky 			idx++;
10937d02f605SLeon Romanovsky 			continue;
10947d02f605SLeon Romanovsky 		}
10957d02f605SLeon Romanovsky 
10967d02f605SLeon Romanovsky 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
10977d02f605SLeon Romanovsky 				cb->nlh->nlmsg_seq,
10987d02f605SLeon Romanovsky 				RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
10997d02f605SLeon Romanovsky 						 RDMA_NLDEV_CMD_PORT_GET),
11007d02f605SLeon Romanovsky 				0, NLM_F_MULTI);
11017d02f605SLeon Romanovsky 
11025b2cc79dSLeon Romanovsky 		if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
11037d02f605SLeon Romanovsky 			nlmsg_cancel(skb, nlh);
11047d02f605SLeon Romanovsky 			goto out;
11057d02f605SLeon Romanovsky 		}
11067d02f605SLeon Romanovsky 		idx++;
11077d02f605SLeon Romanovsky 		nlmsg_end(skb, nlh);
11087d02f605SLeon Romanovsky 	}
11097d02f605SLeon Romanovsky 
1110f8978bd9SLeon Romanovsky out:
111101b67117SParav Pandit 	ib_device_put(device);
1112f8978bd9SLeon Romanovsky 	cb->args[0] = idx;
11137d02f605SLeon Romanovsky 	return skb->len;
11147d02f605SLeon Romanovsky }
11157d02f605SLeon Romanovsky 
1116bf3c5a93SLeon Romanovsky static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1117bf3c5a93SLeon Romanovsky 			      struct netlink_ext_ack *extack)
1118bf3c5a93SLeon Romanovsky {
1119bf3c5a93SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1120bf3c5a93SLeon Romanovsky 	struct ib_device *device;
1121bf3c5a93SLeon Romanovsky 	struct sk_buff *msg;
1122bf3c5a93SLeon Romanovsky 	u32 index;
1123bf3c5a93SLeon Romanovsky 	int ret;
1124bf3c5a93SLeon Romanovsky 
11258cb08174SJohannes Berg 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1126bf3c5a93SLeon Romanovsky 				     nldev_policy, extack);
1127bf3c5a93SLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1128bf3c5a93SLeon Romanovsky 		return -EINVAL;
1129bf3c5a93SLeon Romanovsky 
1130bf3c5a93SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
113137eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1132bf3c5a93SLeon Romanovsky 	if (!device)
1133bf3c5a93SLeon Romanovsky 		return -EINVAL;
1134bf3c5a93SLeon Romanovsky 
1135bf3c5a93SLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1136f34727a1SDan Carpenter 	if (!msg) {
1137f34727a1SDan Carpenter 		ret = -ENOMEM;
1138bf3c5a93SLeon Romanovsky 		goto err;
1139f34727a1SDan Carpenter 	}
1140bf3c5a93SLeon Romanovsky 
1141bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1142bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1143bf3c5a93SLeon Romanovsky 			0, 0);
1144bf3c5a93SLeon Romanovsky 
1145bf3c5a93SLeon Romanovsky 	ret = fill_res_info(msg, device);
1146bf3c5a93SLeon Romanovsky 	if (ret)
1147bf3c5a93SLeon Romanovsky 		goto err_free;
1148bf3c5a93SLeon Romanovsky 
1149bf3c5a93SLeon Romanovsky 	nlmsg_end(msg, nlh);
115001b67117SParav Pandit 	ib_device_put(device);
11511d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1152bf3c5a93SLeon Romanovsky 
1153bf3c5a93SLeon Romanovsky err_free:
1154bf3c5a93SLeon Romanovsky 	nlmsg_free(msg);
1155bf3c5a93SLeon Romanovsky err:
115601b67117SParav Pandit 	ib_device_put(device);
1157bf3c5a93SLeon Romanovsky 	return ret;
1158bf3c5a93SLeon Romanovsky }
1159bf3c5a93SLeon Romanovsky 
1160bf3c5a93SLeon Romanovsky static int _nldev_res_get_dumpit(struct ib_device *device,
1161bf3c5a93SLeon Romanovsky 				 struct sk_buff *skb,
1162bf3c5a93SLeon Romanovsky 				 struct netlink_callback *cb,
1163bf3c5a93SLeon Romanovsky 				 unsigned int idx)
1164bf3c5a93SLeon Romanovsky {
1165bf3c5a93SLeon Romanovsky 	int start = cb->args[0];
1166bf3c5a93SLeon Romanovsky 	struct nlmsghdr *nlh;
1167bf3c5a93SLeon Romanovsky 
1168bf3c5a93SLeon Romanovsky 	if (idx < start)
1169bf3c5a93SLeon Romanovsky 		return 0;
1170bf3c5a93SLeon Romanovsky 
1171bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1172bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1173bf3c5a93SLeon Romanovsky 			0, NLM_F_MULTI);
1174bf3c5a93SLeon Romanovsky 
1175bf3c5a93SLeon Romanovsky 	if (fill_res_info(skb, device)) {
1176bf3c5a93SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
1177bf3c5a93SLeon Romanovsky 		goto out;
1178bf3c5a93SLeon Romanovsky 	}
1179bf3c5a93SLeon Romanovsky 	nlmsg_end(skb, nlh);
1180bf3c5a93SLeon Romanovsky 
1181bf3c5a93SLeon Romanovsky 	idx++;
1182bf3c5a93SLeon Romanovsky 
1183bf3c5a93SLeon Romanovsky out:
1184bf3c5a93SLeon Romanovsky 	cb->args[0] = idx;
1185bf3c5a93SLeon Romanovsky 	return skb->len;
1186bf3c5a93SLeon Romanovsky }
1187bf3c5a93SLeon Romanovsky 
1188bf3c5a93SLeon Romanovsky static int nldev_res_get_dumpit(struct sk_buff *skb,
1189bf3c5a93SLeon Romanovsky 				struct netlink_callback *cb)
1190bf3c5a93SLeon Romanovsky {
1191bf3c5a93SLeon Romanovsky 	return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
1192bf3c5a93SLeon Romanovsky }
1193bf3c5a93SLeon Romanovsky 
1194d12ff624SSteve Wise struct nldev_fill_res_entry {
1195d12ff624SSteve Wise 	enum rdma_nldev_attr nldev_attr;
1196c5dfe0eaSLeon Romanovsky 	u8 flags;
1197c5dfe0eaSLeon Romanovsky 	u32 entry;
1198c5dfe0eaSLeon Romanovsky 	u32 id;
1199c5dfe0eaSLeon Romanovsky };
1200c5dfe0eaSLeon Romanovsky 
1201c5dfe0eaSLeon Romanovsky enum nldev_res_flags {
1202c5dfe0eaSLeon Romanovsky 	NLDEV_PER_DEV = 1 << 0,
1203d12ff624SSteve Wise };
1204d12ff624SSteve Wise 
1205d12ff624SSteve Wise static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
1206d12ff624SSteve Wise 	[RDMA_RESTRACK_QP] = {
1207d12ff624SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
1208c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
12091b8b7788SLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_LQPN,
1210d12ff624SSteve Wise 	},
121100313983SSteve Wise 	[RDMA_RESTRACK_CM_ID] = {
121200313983SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
1213c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
1214517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_CM_IDN,
121500313983SSteve Wise 	},
1216a34fc089SSteve Wise 	[RDMA_RESTRACK_CQ] = {
1217a34fc089SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
1218c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
1219c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
1220517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_CQN,
1221a34fc089SSteve Wise 	},
1222fccec5b8SSteve Wise 	[RDMA_RESTRACK_MR] = {
1223fccec5b8SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
1224c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
1225c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
1226517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_MRN,
1227fccec5b8SSteve Wise 	},
122829cf1351SSteve Wise 	[RDMA_RESTRACK_PD] = {
122929cf1351SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
1230c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
1231c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
1232517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_PDN,
123329cf1351SSteve Wise 	},
1234c4ffee7cSMark Zhang 	[RDMA_RESTRACK_COUNTER] = {
1235c4ffee7cSMark Zhang 		.nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
1236c4ffee7cSMark Zhang 		.entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
1237c4ffee7cSMark Zhang 		.id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
1238c4ffee7cSMark Zhang 	},
1239d12ff624SSteve Wise };
1240d12ff624SSteve Wise 
1241c5dfe0eaSLeon Romanovsky static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1242c5dfe0eaSLeon Romanovsky 			       struct netlink_ext_ack *extack,
1243fb910690SErez Alfasi 			       enum rdma_restrack_type res_type,
1244fb910690SErez Alfasi 			       res_fill_func_t fill_func)
1245c5dfe0eaSLeon Romanovsky {
1246c5dfe0eaSLeon Romanovsky 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1247c5dfe0eaSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1248c5dfe0eaSLeon Romanovsky 	struct rdma_restrack_entry *res;
1249c5dfe0eaSLeon Romanovsky 	struct ib_device *device;
1250c5dfe0eaSLeon Romanovsky 	u32 index, id, port = 0;
1251c5dfe0eaSLeon Romanovsky 	bool has_cap_net_admin;
1252c5dfe0eaSLeon Romanovsky 	struct sk_buff *msg;
1253c5dfe0eaSLeon Romanovsky 	int ret;
1254c5dfe0eaSLeon Romanovsky 
12558cb08174SJohannes Berg 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1256c5dfe0eaSLeon Romanovsky 				     nldev_policy, extack);
1257c5dfe0eaSLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
1258c5dfe0eaSLeon Romanovsky 		return -EINVAL;
1259c5dfe0eaSLeon Romanovsky 
1260c5dfe0eaSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
126137eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1262c5dfe0eaSLeon Romanovsky 	if (!device)
1263c5dfe0eaSLeon Romanovsky 		return -EINVAL;
1264c5dfe0eaSLeon Romanovsky 
1265c5dfe0eaSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1266c5dfe0eaSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1267c5dfe0eaSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
1268c5dfe0eaSLeon Romanovsky 			ret = -EINVAL;
1269c5dfe0eaSLeon Romanovsky 			goto err;
1270c5dfe0eaSLeon Romanovsky 		}
1271c5dfe0eaSLeon Romanovsky 	}
1272c5dfe0eaSLeon Romanovsky 
1273c5dfe0eaSLeon Romanovsky 	if ((port && fe->flags & NLDEV_PER_DEV) ||
1274c5dfe0eaSLeon Romanovsky 	    (!port && ~fe->flags & NLDEV_PER_DEV)) {
1275c5dfe0eaSLeon Romanovsky 		ret = -EINVAL;
1276c5dfe0eaSLeon Romanovsky 		goto err;
1277c5dfe0eaSLeon Romanovsky 	}
1278c5dfe0eaSLeon Romanovsky 
1279c5dfe0eaSLeon Romanovsky 	id = nla_get_u32(tb[fe->id]);
1280c5dfe0eaSLeon Romanovsky 	res = rdma_restrack_get_byid(device, res_type, id);
1281c5dfe0eaSLeon Romanovsky 	if (IS_ERR(res)) {
1282c5dfe0eaSLeon Romanovsky 		ret = PTR_ERR(res);
1283c5dfe0eaSLeon Romanovsky 		goto err;
1284c5dfe0eaSLeon Romanovsky 	}
1285c5dfe0eaSLeon Romanovsky 
1286c5dfe0eaSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1287c5dfe0eaSLeon Romanovsky 	if (!msg) {
1288c5dfe0eaSLeon Romanovsky 		ret = -ENOMEM;
1289ab59ca3eSChristophe JAILLET 		goto err_get;
1290c5dfe0eaSLeon Romanovsky 	}
1291c5dfe0eaSLeon Romanovsky 
1292c5dfe0eaSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
129365959522SMaor Gottlieb 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
129465959522SMaor Gottlieb 					 RDMA_NL_GET_OP(nlh->nlmsg_type)),
1295c5dfe0eaSLeon Romanovsky 			0, 0);
1296c5dfe0eaSLeon Romanovsky 
1297c5dfe0eaSLeon Romanovsky 	if (fill_nldev_handle(msg, device)) {
1298c5dfe0eaSLeon Romanovsky 		ret = -EMSGSIZE;
1299c5dfe0eaSLeon Romanovsky 		goto err_free;
1300c5dfe0eaSLeon Romanovsky 	}
1301c5dfe0eaSLeon Romanovsky 
1302c5dfe0eaSLeon Romanovsky 	has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1303fb910690SErez Alfasi 
1304fb910690SErez Alfasi 	ret = fill_func(msg, has_cap_net_admin, res, port);
1305c5dfe0eaSLeon Romanovsky 	if (ret)
1306c5dfe0eaSLeon Romanovsky 		goto err_free;
1307c5dfe0eaSLeon Romanovsky 
130850bbe3d3SMaor Gottlieb 	rdma_restrack_put(res);
1309c5dfe0eaSLeon Romanovsky 	nlmsg_end(msg, nlh);
1310c5dfe0eaSLeon Romanovsky 	ib_device_put(device);
13111d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1312c5dfe0eaSLeon Romanovsky 
1313c5dfe0eaSLeon Romanovsky err_free:
1314c5dfe0eaSLeon Romanovsky 	nlmsg_free(msg);
1315c5dfe0eaSLeon Romanovsky err_get:
1316c5dfe0eaSLeon Romanovsky 	rdma_restrack_put(res);
1317c5dfe0eaSLeon Romanovsky err:
1318c5dfe0eaSLeon Romanovsky 	ib_device_put(device);
1319c5dfe0eaSLeon Romanovsky 	return ret;
1320c5dfe0eaSLeon Romanovsky }
1321c5dfe0eaSLeon Romanovsky 
1322d12ff624SSteve Wise static int res_get_common_dumpit(struct sk_buff *skb,
1323d12ff624SSteve Wise 				 struct netlink_callback *cb,
1324fb910690SErez Alfasi 				 enum rdma_restrack_type res_type,
1325fb910690SErez Alfasi 				 res_fill_func_t fill_func)
1326b5fa635aSLeon Romanovsky {
1327d12ff624SSteve Wise 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1328b5fa635aSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1329b5fa635aSLeon Romanovsky 	struct rdma_restrack_entry *res;
13307c77c6a9SLeon Romanovsky 	struct rdma_restrack_root *rt;
1331b5fa635aSLeon Romanovsky 	int err, ret = 0, idx = 0;
1332b5fa635aSLeon Romanovsky 	struct nlattr *table_attr;
1333c5dfe0eaSLeon Romanovsky 	struct nlattr *entry_attr;
1334b5fa635aSLeon Romanovsky 	struct ib_device *device;
1335b5fa635aSLeon Romanovsky 	int start = cb->args[0];
1336659067b0SLeon Romanovsky 	bool has_cap_net_admin;
1337b5fa635aSLeon Romanovsky 	struct nlmsghdr *nlh;
1338fd47c2f9SLeon Romanovsky 	unsigned long id;
1339b5fa635aSLeon Romanovsky 	u32 index, port = 0;
1340d12ff624SSteve Wise 	bool filled = false;
1341b5fa635aSLeon Romanovsky 
13428cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1343b5fa635aSLeon Romanovsky 				     nldev_policy, NULL);
1344b5fa635aSLeon Romanovsky 	/*
1345d12ff624SSteve Wise 	 * Right now, we are expecting the device index to get res information,
1346b5fa635aSLeon Romanovsky 	 * but it is possible to extend this code to return all devices in
1347b5fa635aSLeon Romanovsky 	 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
1348b5fa635aSLeon Romanovsky 	 * if it doesn't exist, we will iterate over all devices.
1349b5fa635aSLeon Romanovsky 	 *
1350b5fa635aSLeon Romanovsky 	 * But it is not needed for now.
1351b5fa635aSLeon Romanovsky 	 */
1352b5fa635aSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1353b5fa635aSLeon Romanovsky 		return -EINVAL;
1354b5fa635aSLeon Romanovsky 
1355b5fa635aSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
135637eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1357b5fa635aSLeon Romanovsky 	if (!device)
1358b5fa635aSLeon Romanovsky 		return -EINVAL;
1359b5fa635aSLeon Romanovsky 
1360b5fa635aSLeon Romanovsky 	/*
1361b5fa635aSLeon Romanovsky 	 * If no PORT_INDEX is supplied, we will return all QPs from that device
1362b5fa635aSLeon Romanovsky 	 */
1363b5fa635aSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1364b5fa635aSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1365b5fa635aSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
1366b5fa635aSLeon Romanovsky 			ret = -EINVAL;
1367b5fa635aSLeon Romanovsky 			goto err_index;
1368b5fa635aSLeon Romanovsky 		}
1369b5fa635aSLeon Romanovsky 	}
1370b5fa635aSLeon Romanovsky 
1371b5fa635aSLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
137265959522SMaor Gottlieb 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
137365959522SMaor Gottlieb 					 RDMA_NL_GET_OP(cb->nlh->nlmsg_type)),
1374b5fa635aSLeon Romanovsky 			0, NLM_F_MULTI);
1375b5fa635aSLeon Romanovsky 
1376b5fa635aSLeon Romanovsky 	if (fill_nldev_handle(skb, device)) {
1377b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
1378b5fa635aSLeon Romanovsky 		goto err;
1379b5fa635aSLeon Romanovsky 	}
1380b5fa635aSLeon Romanovsky 
1381ae0be8deSMichal Kubecek 	table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
1382b5fa635aSLeon Romanovsky 	if (!table_attr) {
1383b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
1384b5fa635aSLeon Romanovsky 		goto err;
1385b5fa635aSLeon Romanovsky 	}
1386b5fa635aSLeon Romanovsky 
1387659067b0SLeon Romanovsky 	has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
1388659067b0SLeon Romanovsky 
13897c77c6a9SLeon Romanovsky 	rt = &device->res[res_type];
13907c77c6a9SLeon Romanovsky 	xa_lock(&rt->xa);
1391fd47c2f9SLeon Romanovsky 	/*
1392fd47c2f9SLeon Romanovsky 	 * FIXME: if the skip ahead is something common this loop should
1393fd47c2f9SLeon Romanovsky 	 * use xas_for_each & xas_pause to optimize, we can have a lot of
1394fd47c2f9SLeon Romanovsky 	 * objects.
1395fd47c2f9SLeon Romanovsky 	 */
13967c77c6a9SLeon Romanovsky 	xa_for_each(&rt->xa, id, res) {
1397f2a0e45fSLeon Romanovsky 		if (idx < start || !rdma_restrack_get(res))
1398b5fa635aSLeon Romanovsky 			goto next;
1399b5fa635aSLeon Romanovsky 
14007c77c6a9SLeon Romanovsky 		xa_unlock(&rt->xa);
14017c77c6a9SLeon Romanovsky 
1402d12ff624SSteve Wise 		filled = true;
1403b5fa635aSLeon Romanovsky 
1404ae0be8deSMichal Kubecek 		entry_attr = nla_nest_start_noflag(skb, fe->entry);
1405c5dfe0eaSLeon Romanovsky 		if (!entry_attr) {
1406c5dfe0eaSLeon Romanovsky 			ret = -EMSGSIZE;
1407c5dfe0eaSLeon Romanovsky 			rdma_restrack_put(res);
14087c77c6a9SLeon Romanovsky 			goto msg_full;
1409c5dfe0eaSLeon Romanovsky 		}
1410c5dfe0eaSLeon Romanovsky 
1411fb910690SErez Alfasi 		ret = fill_func(skb, has_cap_net_admin, res, port);
1412fb910690SErez Alfasi 
1413b5fa635aSLeon Romanovsky 		rdma_restrack_put(res);
1414b5fa635aSLeon Romanovsky 
14157c77c6a9SLeon Romanovsky 		if (ret) {
1416c5dfe0eaSLeon Romanovsky 			nla_nest_cancel(skb, entry_attr);
1417b5fa635aSLeon Romanovsky 			if (ret == -EMSGSIZE)
14187c77c6a9SLeon Romanovsky 				goto msg_full;
1419c5dfe0eaSLeon Romanovsky 			if (ret == -EAGAIN)
14207c77c6a9SLeon Romanovsky 				goto again;
1421b5fa635aSLeon Romanovsky 			goto res_err;
14227c77c6a9SLeon Romanovsky 		}
1423c5dfe0eaSLeon Romanovsky 		nla_nest_end(skb, entry_attr);
14247c77c6a9SLeon Romanovsky again:		xa_lock(&rt->xa);
1425b5fa635aSLeon Romanovsky next:		idx++;
1426b5fa635aSLeon Romanovsky 	}
14277c77c6a9SLeon Romanovsky 	xa_unlock(&rt->xa);
1428b5fa635aSLeon Romanovsky 
14297c77c6a9SLeon Romanovsky msg_full:
1430b5fa635aSLeon Romanovsky 	nla_nest_end(skb, table_attr);
1431b5fa635aSLeon Romanovsky 	nlmsg_end(skb, nlh);
1432b5fa635aSLeon Romanovsky 	cb->args[0] = idx;
1433b5fa635aSLeon Romanovsky 
1434b5fa635aSLeon Romanovsky 	/*
1435d12ff624SSteve Wise 	 * No more entries to fill, cancel the message and
1436b5fa635aSLeon Romanovsky 	 * return 0 to mark end of dumpit.
1437b5fa635aSLeon Romanovsky 	 */
1438d12ff624SSteve Wise 	if (!filled)
1439b5fa635aSLeon Romanovsky 		goto err;
1440b5fa635aSLeon Romanovsky 
144101b67117SParav Pandit 	ib_device_put(device);
1442b5fa635aSLeon Romanovsky 	return skb->len;
1443b5fa635aSLeon Romanovsky 
1444b5fa635aSLeon Romanovsky res_err:
1445b5fa635aSLeon Romanovsky 	nla_nest_cancel(skb, table_attr);
1446b5fa635aSLeon Romanovsky 
1447b5fa635aSLeon Romanovsky err:
1448b5fa635aSLeon Romanovsky 	nlmsg_cancel(skb, nlh);
1449b5fa635aSLeon Romanovsky 
1450b5fa635aSLeon Romanovsky err_index:
145101b67117SParav Pandit 	ib_device_put(device);
1452b5fa635aSLeon Romanovsky 	return ret;
1453b5fa635aSLeon Romanovsky }
1454b5fa635aSLeon Romanovsky 
1455f732e713SLeon Romanovsky #define RES_GET_FUNCS(name, type)                                              \
1456f732e713SLeon Romanovsky 	static int nldev_res_get_##name##_dumpit(struct sk_buff *skb,          \
1457f732e713SLeon Romanovsky 						 struct netlink_callback *cb)  \
1458f732e713SLeon Romanovsky 	{                                                                      \
1459fb910690SErez Alfasi 		return res_get_common_dumpit(skb, cb, type,                    \
1460fb910690SErez Alfasi 					     fill_res_##name##_entry);         \
1461c5dfe0eaSLeon Romanovsky 	}                                                                      \
1462c5dfe0eaSLeon Romanovsky 	static int nldev_res_get_##name##_doit(struct sk_buff *skb,            \
1463c5dfe0eaSLeon Romanovsky 					       struct nlmsghdr *nlh,           \
1464c5dfe0eaSLeon Romanovsky 					       struct netlink_ext_ack *extack) \
1465c5dfe0eaSLeon Romanovsky 	{                                                                      \
1466fb910690SErez Alfasi 		return res_get_common_doit(skb, nlh, extack, type,             \
1467fb910690SErez Alfasi 					   fill_res_##name##_entry);           \
1468d12ff624SSteve Wise 	}
1469d12ff624SSteve Wise 
1470f732e713SLeon Romanovsky RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
147165959522SMaor Gottlieb RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP);
1472f732e713SLeon Romanovsky RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
1473f732e713SLeon Romanovsky RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
147465959522SMaor Gottlieb RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ);
1475f732e713SLeon Romanovsky RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
1476f732e713SLeon Romanovsky RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
147765959522SMaor Gottlieb RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR);
1478c4ffee7cSMark Zhang RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
147929cf1351SSteve Wise 
14803856ec4bSSteve Wise static LIST_HEAD(link_ops);
14813856ec4bSSteve Wise static DECLARE_RWSEM(link_ops_rwsem);
14823856ec4bSSteve Wise 
14833856ec4bSSteve Wise static const struct rdma_link_ops *link_ops_get(const char *type)
14843856ec4bSSteve Wise {
14853856ec4bSSteve Wise 	const struct rdma_link_ops *ops;
14863856ec4bSSteve Wise 
14873856ec4bSSteve Wise 	list_for_each_entry(ops, &link_ops, list) {
14883856ec4bSSteve Wise 		if (!strcmp(ops->type, type))
14893856ec4bSSteve Wise 			goto out;
14903856ec4bSSteve Wise 	}
14913856ec4bSSteve Wise 	ops = NULL;
14923856ec4bSSteve Wise out:
14933856ec4bSSteve Wise 	return ops;
14943856ec4bSSteve Wise }
14953856ec4bSSteve Wise 
14963856ec4bSSteve Wise void rdma_link_register(struct rdma_link_ops *ops)
14973856ec4bSSteve Wise {
14983856ec4bSSteve Wise 	down_write(&link_ops_rwsem);
1499afc1990eSDan Carpenter 	if (WARN_ON_ONCE(link_ops_get(ops->type)))
15003856ec4bSSteve Wise 		goto out;
15013856ec4bSSteve Wise 	list_add(&ops->list, &link_ops);
15023856ec4bSSteve Wise out:
15033856ec4bSSteve Wise 	up_write(&link_ops_rwsem);
15043856ec4bSSteve Wise }
15053856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_register);
15063856ec4bSSteve Wise 
15073856ec4bSSteve Wise void rdma_link_unregister(struct rdma_link_ops *ops)
15083856ec4bSSteve Wise {
15093856ec4bSSteve Wise 	down_write(&link_ops_rwsem);
15103856ec4bSSteve Wise 	list_del(&ops->list);
15113856ec4bSSteve Wise 	up_write(&link_ops_rwsem);
15123856ec4bSSteve Wise }
15133856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_unregister);
15143856ec4bSSteve Wise 
15153856ec4bSSteve Wise static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
15163856ec4bSSteve Wise 			  struct netlink_ext_ack *extack)
15173856ec4bSSteve Wise {
15183856ec4bSSteve Wise 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
15193856ec4bSSteve Wise 	char ibdev_name[IB_DEVICE_NAME_MAX];
15203856ec4bSSteve Wise 	const struct rdma_link_ops *ops;
15213856ec4bSSteve Wise 	char ndev_name[IFNAMSIZ];
15223856ec4bSSteve Wise 	struct net_device *ndev;
15233856ec4bSSteve Wise 	char type[IFNAMSIZ];
15243856ec4bSSteve Wise 	int err;
15253856ec4bSSteve Wise 
15268cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
15273856ec4bSSteve Wise 				     nldev_policy, extack);
15283856ec4bSSteve Wise 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
15293856ec4bSSteve Wise 	    !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
15303856ec4bSSteve Wise 		return -EINVAL;
15313856ec4bSSteve Wise 
1532872f6903SFrancis Laniel 	nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
15333856ec4bSSteve Wise 		    sizeof(ibdev_name));
15347aefa623SJason Gunthorpe 	if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
15353856ec4bSSteve Wise 		return -EINVAL;
15363856ec4bSSteve Wise 
1537872f6903SFrancis Laniel 	nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
1538872f6903SFrancis Laniel 	nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
15393856ec4bSSteve Wise 		    sizeof(ndev_name));
15403856ec4bSSteve Wise 
15417a54f78dSParav Pandit 	ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
15423856ec4bSSteve Wise 	if (!ndev)
15433856ec4bSSteve Wise 		return -ENODEV;
15443856ec4bSSteve Wise 
15453856ec4bSSteve Wise 	down_read(&link_ops_rwsem);
15463856ec4bSSteve Wise 	ops = link_ops_get(type);
15473856ec4bSSteve Wise #ifdef CONFIG_MODULES
15483856ec4bSSteve Wise 	if (!ops) {
15493856ec4bSSteve Wise 		up_read(&link_ops_rwsem);
15503856ec4bSSteve Wise 		request_module("rdma-link-%s", type);
15513856ec4bSSteve Wise 		down_read(&link_ops_rwsem);
15523856ec4bSSteve Wise 		ops = link_ops_get(type);
15533856ec4bSSteve Wise 	}
15543856ec4bSSteve Wise #endif
15553856ec4bSSteve Wise 	err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
15563856ec4bSSteve Wise 	up_read(&link_ops_rwsem);
15573856ec4bSSteve Wise 	dev_put(ndev);
15583856ec4bSSteve Wise 
15593856ec4bSSteve Wise 	return err;
15603856ec4bSSteve Wise }
15613856ec4bSSteve Wise 
15623856ec4bSSteve Wise static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
15633856ec4bSSteve Wise 			  struct netlink_ext_ack *extack)
15643856ec4bSSteve Wise {
15653856ec4bSSteve Wise 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
15663856ec4bSSteve Wise 	struct ib_device *device;
15673856ec4bSSteve Wise 	u32 index;
15683856ec4bSSteve Wise 	int err;
15693856ec4bSSteve Wise 
15708cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
15713856ec4bSSteve Wise 				     nldev_policy, extack);
15723856ec4bSSteve Wise 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
15733856ec4bSSteve Wise 		return -EINVAL;
15743856ec4bSSteve Wise 
15753856ec4bSSteve Wise 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
157637eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
15773856ec4bSSteve Wise 	if (!device)
15783856ec4bSSteve Wise 		return -EINVAL;
15793856ec4bSSteve Wise 
15803856ec4bSSteve Wise 	if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
15813856ec4bSSteve Wise 		ib_device_put(device);
15823856ec4bSSteve Wise 		return -EINVAL;
15833856ec4bSSteve Wise 	}
15843856ec4bSSteve Wise 
15853856ec4bSSteve Wise 	ib_unregister_device_and_put(device);
15863856ec4bSSteve Wise 	return 0;
15873856ec4bSSteve Wise }
15883856ec4bSSteve Wise 
15890e2d00ebSJason Gunthorpe static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
15900e2d00ebSJason Gunthorpe 			     struct netlink_ext_ack *extack)
15910e2d00ebSJason Gunthorpe {
15920e2d00ebSJason Gunthorpe 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
159334d65cd8SDoug Ledford 	char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE];
15940e2d00ebSJason Gunthorpe 	struct ib_client_nl_info data = {};
15950e2d00ebSJason Gunthorpe 	struct ib_device *ibdev = NULL;
15960e2d00ebSJason Gunthorpe 	struct sk_buff *msg;
15970e2d00ebSJason Gunthorpe 	u32 index;
15980e2d00ebSJason Gunthorpe 	int err;
15990e2d00ebSJason Gunthorpe 
16000e2d00ebSJason Gunthorpe 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
16010e2d00ebSJason Gunthorpe 			  extack);
16020e2d00ebSJason Gunthorpe 	if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
16030e2d00ebSJason Gunthorpe 		return -EINVAL;
16040e2d00ebSJason Gunthorpe 
1605872f6903SFrancis Laniel 	nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
160634d65cd8SDoug Ledford 		    sizeof(client_name));
16070e2d00ebSJason Gunthorpe 
16080e2d00ebSJason Gunthorpe 	if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
16090e2d00ebSJason Gunthorpe 		index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
16100e2d00ebSJason Gunthorpe 		ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
16110e2d00ebSJason Gunthorpe 		if (!ibdev)
16120e2d00ebSJason Gunthorpe 			return -EINVAL;
16130e2d00ebSJason Gunthorpe 
16140e2d00ebSJason Gunthorpe 		if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
16150e2d00ebSJason Gunthorpe 			data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
16160e2d00ebSJason Gunthorpe 			if (!rdma_is_port_valid(ibdev, data.port)) {
16170e2d00ebSJason Gunthorpe 				err = -EINVAL;
16180e2d00ebSJason Gunthorpe 				goto out_put;
16190e2d00ebSJason Gunthorpe 			}
16200e2d00ebSJason Gunthorpe 		} else {
16210e2d00ebSJason Gunthorpe 			data.port = -1;
16220e2d00ebSJason Gunthorpe 		}
16230e2d00ebSJason Gunthorpe 	} else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
16240e2d00ebSJason Gunthorpe 		return -EINVAL;
16250e2d00ebSJason Gunthorpe 	}
16260e2d00ebSJason Gunthorpe 
16270e2d00ebSJason Gunthorpe 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
16280e2d00ebSJason Gunthorpe 	if (!msg) {
16290e2d00ebSJason Gunthorpe 		err = -ENOMEM;
16300e2d00ebSJason Gunthorpe 		goto out_put;
16310e2d00ebSJason Gunthorpe 	}
16320e2d00ebSJason Gunthorpe 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
16330e2d00ebSJason Gunthorpe 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
16340e2d00ebSJason Gunthorpe 					 RDMA_NLDEV_CMD_GET_CHARDEV),
16350e2d00ebSJason Gunthorpe 			0, 0);
16360e2d00ebSJason Gunthorpe 
16370e2d00ebSJason Gunthorpe 	data.nl_msg = msg;
16380e2d00ebSJason Gunthorpe 	err = ib_get_client_nl_info(ibdev, client_name, &data);
16390e2d00ebSJason Gunthorpe 	if (err)
16400e2d00ebSJason Gunthorpe 		goto out_nlmsg;
16410e2d00ebSJason Gunthorpe 
16420e2d00ebSJason Gunthorpe 	err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV,
16430e2d00ebSJason Gunthorpe 				huge_encode_dev(data.cdev->devt),
16440e2d00ebSJason Gunthorpe 				RDMA_NLDEV_ATTR_PAD);
16450e2d00ebSJason Gunthorpe 	if (err)
16460e2d00ebSJason Gunthorpe 		goto out_data;
16470e2d00ebSJason Gunthorpe 	err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi,
16480e2d00ebSJason Gunthorpe 				RDMA_NLDEV_ATTR_PAD);
16490e2d00ebSJason Gunthorpe 	if (err)
16500e2d00ebSJason Gunthorpe 		goto out_data;
16510e2d00ebSJason Gunthorpe 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME,
16520e2d00ebSJason Gunthorpe 			   dev_name(data.cdev))) {
16530e2d00ebSJason Gunthorpe 		err = -EMSGSIZE;
16540e2d00ebSJason Gunthorpe 		goto out_data;
16550e2d00ebSJason Gunthorpe 	}
16560e2d00ebSJason Gunthorpe 
16570e2d00ebSJason Gunthorpe 	nlmsg_end(msg, nlh);
16580e2d00ebSJason Gunthorpe 	put_device(data.cdev);
16590e2d00ebSJason Gunthorpe 	if (ibdev)
16600e2d00ebSJason Gunthorpe 		ib_device_put(ibdev);
16611d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
16620e2d00ebSJason Gunthorpe 
16630e2d00ebSJason Gunthorpe out_data:
16640e2d00ebSJason Gunthorpe 	put_device(data.cdev);
16650e2d00ebSJason Gunthorpe out_nlmsg:
16660e2d00ebSJason Gunthorpe 	nlmsg_free(msg);
16670e2d00ebSJason Gunthorpe out_put:
16680e2d00ebSJason Gunthorpe 	if (ibdev)
16690e2d00ebSJason Gunthorpe 		ib_device_put(ibdev);
16700e2d00ebSJason Gunthorpe 	return err;
16710e2d00ebSJason Gunthorpe }
16720e2d00ebSJason Gunthorpe 
16734d7ba8ceSParav Pandit static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
16744d7ba8ceSParav Pandit 			      struct netlink_ext_ack *extack)
1675cb7e0e13SParav Pandit {
1676cb7e0e13SParav Pandit 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
16774d7ba8ceSParav Pandit 	struct sk_buff *msg;
1678cb7e0e13SParav Pandit 	int err;
1679cb7e0e13SParav Pandit 
16804d7ba8ceSParav Pandit 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
16814d7ba8ceSParav Pandit 			  nldev_policy, extack);
1682cb7e0e13SParav Pandit 	if (err)
1683cb7e0e13SParav Pandit 		return err;
1684cb7e0e13SParav Pandit 
16854d7ba8ceSParav Pandit 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
16864d7ba8ceSParav Pandit 	if (!msg)
16874d7ba8ceSParav Pandit 		return -ENOMEM;
16884d7ba8ceSParav Pandit 
16894d7ba8ceSParav Pandit 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1690cb7e0e13SParav Pandit 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1691cb7e0e13SParav Pandit 					 RDMA_NLDEV_CMD_SYS_GET),
1692cb7e0e13SParav Pandit 			0, 0);
1693cb7e0e13SParav Pandit 
16944d7ba8ceSParav Pandit 	err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
1695cb7e0e13SParav Pandit 			 (u8)ib_devices_shared_netns);
1696cb7e0e13SParav Pandit 	if (err) {
16974d7ba8ceSParav Pandit 		nlmsg_free(msg);
1698cb7e0e13SParav Pandit 		return err;
1699cb7e0e13SParav Pandit 	}
17004d7ba8ceSParav Pandit 	nlmsg_end(msg, nlh);
17011d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1702cb7e0e13SParav Pandit }
1703cb7e0e13SParav Pandit 
17042b34c558SParav Pandit static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
17052b34c558SParav Pandit 				  struct netlink_ext_ack *extack)
17062b34c558SParav Pandit {
17072b34c558SParav Pandit 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
17082b34c558SParav Pandit 	u8 enable;
17092b34c558SParav Pandit 	int err;
17102b34c558SParav Pandit 
17112b34c558SParav Pandit 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
17122b34c558SParav Pandit 			  nldev_policy, extack);
17132b34c558SParav Pandit 	if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
17142b34c558SParav Pandit 		return -EINVAL;
17152b34c558SParav Pandit 
17162b34c558SParav Pandit 	enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
17172b34c558SParav Pandit 	/* Only 0 and 1 are supported */
17182b34c558SParav Pandit 	if (enable > 1)
17192b34c558SParav Pandit 		return -EINVAL;
17202b34c558SParav Pandit 
17212b34c558SParav Pandit 	err = rdma_compatdev_set(enable);
17222b34c558SParav Pandit 	return err;
17232b34c558SParav Pandit }
17242b34c558SParav Pandit 
1725b47ae6f8SMark Zhang static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1726b47ae6f8SMark Zhang 			       struct netlink_ext_ack *extack)
1727b47ae6f8SMark Zhang {
1728b389327dSMark Zhang 	u32 index, port, mode, mask = 0, qpn, cntn = 0;
1729b47ae6f8SMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1730b47ae6f8SMark Zhang 	struct ib_device *device;
1731b47ae6f8SMark Zhang 	struct sk_buff *msg;
1732b47ae6f8SMark Zhang 	int ret;
1733b47ae6f8SMark Zhang 
1734b47ae6f8SMark Zhang 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1735b47ae6f8SMark Zhang 			  nldev_policy, extack);
1736b47ae6f8SMark Zhang 	/* Currently only counter for QP is supported */
1737b47ae6f8SMark Zhang 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1738b47ae6f8SMark Zhang 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1739b47ae6f8SMark Zhang 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE])
1740b47ae6f8SMark Zhang 		return -EINVAL;
1741b47ae6f8SMark Zhang 
1742b47ae6f8SMark Zhang 	if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1743b47ae6f8SMark Zhang 		return -EINVAL;
1744b47ae6f8SMark Zhang 
1745b47ae6f8SMark Zhang 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1746b47ae6f8SMark Zhang 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1747b47ae6f8SMark Zhang 	if (!device)
1748b47ae6f8SMark Zhang 		return -EINVAL;
1749b47ae6f8SMark Zhang 
1750b47ae6f8SMark Zhang 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1751b47ae6f8SMark Zhang 	if (!rdma_is_port_valid(device, port)) {
1752b47ae6f8SMark Zhang 		ret = -EINVAL;
1753b47ae6f8SMark Zhang 		goto err;
1754b47ae6f8SMark Zhang 	}
1755b47ae6f8SMark Zhang 
1756b47ae6f8SMark Zhang 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1757b47ae6f8SMark Zhang 	if (!msg) {
1758b47ae6f8SMark Zhang 		ret = -ENOMEM;
1759b47ae6f8SMark Zhang 		goto err;
1760b47ae6f8SMark Zhang 	}
1761b47ae6f8SMark Zhang 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1762b47ae6f8SMark Zhang 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1763b47ae6f8SMark Zhang 					 RDMA_NLDEV_CMD_STAT_SET),
1764b47ae6f8SMark Zhang 			0, 0);
1765b47ae6f8SMark Zhang 
1766b47ae6f8SMark Zhang 	mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
1767b389327dSMark Zhang 	if (mode == RDMA_COUNTER_MODE_AUTO) {
1768b47ae6f8SMark Zhang 		if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
1769b389327dSMark Zhang 			mask = nla_get_u32(
1770b389327dSMark Zhang 				tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
1771*33eb12f2SPatrisious Haddad 		ret = rdma_counter_set_auto_mode(device, port, mask, extack);
1772b47ae6f8SMark Zhang 		if (ret)
1773b47ae6f8SMark Zhang 			goto err_msg;
1774b389327dSMark Zhang 	} else {
177578f34a16SMark Zhang 		if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
177678f34a16SMark Zhang 			goto err_msg;
1777b389327dSMark Zhang 		qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
1778b389327dSMark Zhang 		if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
1779b389327dSMark Zhang 			cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
1780b389327dSMark Zhang 			ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
1781b389327dSMark Zhang 		} else {
1782b389327dSMark Zhang 			ret = rdma_counter_bind_qpn_alloc(device, port,
1783b389327dSMark Zhang 							  qpn, &cntn);
1784b389327dSMark Zhang 		}
1785b389327dSMark Zhang 		if (ret)
1786b47ae6f8SMark Zhang 			goto err_msg;
1787b389327dSMark Zhang 
1788b389327dSMark Zhang 		if (fill_nldev_handle(msg, device) ||
1789b389327dSMark Zhang 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1790b389327dSMark Zhang 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
1791b389327dSMark Zhang 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
1792b389327dSMark Zhang 			ret = -EMSGSIZE;
1793b389327dSMark Zhang 			goto err_fill;
1794b389327dSMark Zhang 		}
1795b47ae6f8SMark Zhang 	}
1796b47ae6f8SMark Zhang 
1797b47ae6f8SMark Zhang 	nlmsg_end(msg, nlh);
1798b47ae6f8SMark Zhang 	ib_device_put(device);
17991d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1800b47ae6f8SMark Zhang 
1801b389327dSMark Zhang err_fill:
1802b389327dSMark Zhang 	rdma_counter_unbind_qpn(device, port, qpn, cntn);
1803b47ae6f8SMark Zhang err_msg:
1804b47ae6f8SMark Zhang 	nlmsg_free(msg);
1805b47ae6f8SMark Zhang err:
1806b47ae6f8SMark Zhang 	ib_device_put(device);
1807b47ae6f8SMark Zhang 	return ret;
1808b47ae6f8SMark Zhang }
1809b47ae6f8SMark Zhang 
1810b389327dSMark Zhang static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1811b389327dSMark Zhang 			       struct netlink_ext_ack *extack)
1812b389327dSMark Zhang {
1813b389327dSMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1814b389327dSMark Zhang 	struct ib_device *device;
1815b389327dSMark Zhang 	struct sk_buff *msg;
1816b389327dSMark Zhang 	u32 index, port, qpn, cntn;
1817b389327dSMark Zhang 	int ret;
1818b389327dSMark Zhang 
1819b389327dSMark Zhang 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1820b389327dSMark Zhang 			  nldev_policy, extack);
1821b389327dSMark Zhang 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1822b389327dSMark Zhang 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
1823b389327dSMark Zhang 	    !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
1824b389327dSMark Zhang 	    !tb[RDMA_NLDEV_ATTR_RES_LQPN])
1825b389327dSMark Zhang 		return -EINVAL;
1826b389327dSMark Zhang 
1827b389327dSMark Zhang 	if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1828b389327dSMark Zhang 		return -EINVAL;
1829b389327dSMark Zhang 
1830b389327dSMark Zhang 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1831b389327dSMark Zhang 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1832b389327dSMark Zhang 	if (!device)
1833b389327dSMark Zhang 		return -EINVAL;
1834b389327dSMark Zhang 
1835b389327dSMark Zhang 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1836b389327dSMark Zhang 	if (!rdma_is_port_valid(device, port)) {
1837b389327dSMark Zhang 		ret = -EINVAL;
1838b389327dSMark Zhang 		goto err;
1839b389327dSMark Zhang 	}
1840b389327dSMark Zhang 
1841b389327dSMark Zhang 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1842b389327dSMark Zhang 	if (!msg) {
1843b389327dSMark Zhang 		ret = -ENOMEM;
1844b389327dSMark Zhang 		goto err;
1845b389327dSMark Zhang 	}
1846b389327dSMark Zhang 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1847b389327dSMark Zhang 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1848b389327dSMark Zhang 					 RDMA_NLDEV_CMD_STAT_SET),
1849b389327dSMark Zhang 			0, 0);
1850b389327dSMark Zhang 
1851b389327dSMark Zhang 	cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
1852b389327dSMark Zhang 	qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
1853b389327dSMark Zhang 	if (fill_nldev_handle(msg, device) ||
1854b389327dSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1855b389327dSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
1856b389327dSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
1857b389327dSMark Zhang 		ret = -EMSGSIZE;
1858b389327dSMark Zhang 		goto err_fill;
1859b389327dSMark Zhang 	}
1860b389327dSMark Zhang 
1861594e6c5dSLeon Romanovsky 	ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
1862594e6c5dSLeon Romanovsky 	if (ret)
1863594e6c5dSLeon Romanovsky 		goto err_fill;
1864594e6c5dSLeon Romanovsky 
1865b389327dSMark Zhang 	nlmsg_end(msg, nlh);
1866b389327dSMark Zhang 	ib_device_put(device);
18671d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1868b389327dSMark Zhang 
1869b389327dSMark Zhang err_fill:
1870b389327dSMark Zhang 	nlmsg_free(msg);
1871b389327dSMark Zhang err:
1872b389327dSMark Zhang 	ib_device_put(device);
1873b389327dSMark Zhang 	return ret;
1874b389327dSMark Zhang }
1875b389327dSMark Zhang 
18766e7be47aSMark Zhang static int stat_get_doit_default_counter(struct sk_buff *skb,
18776e7be47aSMark Zhang 					 struct nlmsghdr *nlh,
18786e7be47aSMark Zhang 					 struct netlink_ext_ack *extack,
18796e7be47aSMark Zhang 					 struct nlattr *tb[])
18806e7be47aSMark Zhang {
18816e7be47aSMark Zhang 	struct rdma_hw_stats *stats;
18826e7be47aSMark Zhang 	struct nlattr *table_attr;
18836e7be47aSMark Zhang 	struct ib_device *device;
18846e7be47aSMark Zhang 	int ret, num_cnts, i;
18856e7be47aSMark Zhang 	struct sk_buff *msg;
18866e7be47aSMark Zhang 	u32 index, port;
18876e7be47aSMark Zhang 	u64 v;
18886e7be47aSMark Zhang 
18896e7be47aSMark Zhang 	if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
18906e7be47aSMark Zhang 		return -EINVAL;
18916e7be47aSMark Zhang 
18926e7be47aSMark Zhang 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
18936e7be47aSMark Zhang 	device = ib_device_get_by_index(sock_net(skb->sk), index);
18946e7be47aSMark Zhang 	if (!device)
18956e7be47aSMark Zhang 		return -EINVAL;
18966e7be47aSMark Zhang 
18976e7be47aSMark Zhang 	if (!device->ops.alloc_hw_stats || !device->ops.get_hw_stats) {
18986e7be47aSMark Zhang 		ret = -EINVAL;
18996e7be47aSMark Zhang 		goto err;
19006e7be47aSMark Zhang 	}
19016e7be47aSMark Zhang 
19026e7be47aSMark Zhang 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
19036e7be47aSMark Zhang 	if (!rdma_is_port_valid(device, port)) {
19046e7be47aSMark Zhang 		ret = -EINVAL;
19056e7be47aSMark Zhang 		goto err;
19066e7be47aSMark Zhang 	}
19076e7be47aSMark Zhang 
19086e7be47aSMark Zhang 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
19096e7be47aSMark Zhang 	if (!msg) {
19106e7be47aSMark Zhang 		ret = -ENOMEM;
19116e7be47aSMark Zhang 		goto err;
19126e7be47aSMark Zhang 	}
19136e7be47aSMark Zhang 
19146e7be47aSMark Zhang 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
19156e7be47aSMark Zhang 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
19166e7be47aSMark Zhang 					 RDMA_NLDEV_CMD_STAT_GET),
19176e7be47aSMark Zhang 			0, 0);
19186e7be47aSMark Zhang 
19196e7be47aSMark Zhang 	if (fill_nldev_handle(msg, device) ||
19206e7be47aSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
19216e7be47aSMark Zhang 		ret = -EMSGSIZE;
19226e7be47aSMark Zhang 		goto err_msg;
19236e7be47aSMark Zhang 	}
19246e7be47aSMark Zhang 
19256e7be47aSMark Zhang 	stats = device->port_data ? device->port_data[port].hw_stats : NULL;
19266e7be47aSMark Zhang 	if (stats == NULL) {
19276e7be47aSMark Zhang 		ret = -EINVAL;
19286e7be47aSMark Zhang 		goto err_msg;
19296e7be47aSMark Zhang 	}
19306e7be47aSMark Zhang 	mutex_lock(&stats->lock);
19316e7be47aSMark Zhang 
19326e7be47aSMark Zhang 	num_cnts = device->ops.get_hw_stats(device, stats, port, 0);
19336e7be47aSMark Zhang 	if (num_cnts < 0) {
19346e7be47aSMark Zhang 		ret = -EINVAL;
19356e7be47aSMark Zhang 		goto err_stats;
19366e7be47aSMark Zhang 	}
19376e7be47aSMark Zhang 
19386e7be47aSMark Zhang 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
19396e7be47aSMark Zhang 	if (!table_attr) {
19406e7be47aSMark Zhang 		ret = -EMSGSIZE;
19416e7be47aSMark Zhang 		goto err_stats;
19426e7be47aSMark Zhang 	}
19436e7be47aSMark Zhang 	for (i = 0; i < num_cnts; i++) {
19446e7be47aSMark Zhang 		v = stats->value[i] +
19456e7be47aSMark Zhang 			rdma_counter_get_hwstat_value(device, port, i);
19464061ff7aSErez Alfasi 		if (rdma_nl_stat_hwcounter_entry(msg, stats->names[i], v)) {
19476e7be47aSMark Zhang 			ret = -EMSGSIZE;
19486e7be47aSMark Zhang 			goto err_table;
19496e7be47aSMark Zhang 		}
19506e7be47aSMark Zhang 	}
19516e7be47aSMark Zhang 	nla_nest_end(msg, table_attr);
19526e7be47aSMark Zhang 
19536e7be47aSMark Zhang 	mutex_unlock(&stats->lock);
19546e7be47aSMark Zhang 	nlmsg_end(msg, nlh);
19556e7be47aSMark Zhang 	ib_device_put(device);
19561d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
19576e7be47aSMark Zhang 
19586e7be47aSMark Zhang err_table:
19596e7be47aSMark Zhang 	nla_nest_cancel(msg, table_attr);
19606e7be47aSMark Zhang err_stats:
19616e7be47aSMark Zhang 	mutex_unlock(&stats->lock);
19626e7be47aSMark Zhang err_msg:
19636e7be47aSMark Zhang 	nlmsg_free(msg);
19646e7be47aSMark Zhang err:
19656e7be47aSMark Zhang 	ib_device_put(device);
19666e7be47aSMark Zhang 	return ret;
19676e7be47aSMark Zhang }
19686e7be47aSMark Zhang 
196983c2c1fcSMark Zhang static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
197083c2c1fcSMark Zhang 			    struct netlink_ext_ack *extack, struct nlattr *tb[])
197183c2c1fcSMark Zhang 
197283c2c1fcSMark Zhang {
197383c2c1fcSMark Zhang 	static enum rdma_nl_counter_mode mode;
197483c2c1fcSMark Zhang 	static enum rdma_nl_counter_mask mask;
197583c2c1fcSMark Zhang 	struct ib_device *device;
197683c2c1fcSMark Zhang 	struct sk_buff *msg;
197783c2c1fcSMark Zhang 	u32 index, port;
197883c2c1fcSMark Zhang 	int ret;
197983c2c1fcSMark Zhang 
198083c2c1fcSMark Zhang 	if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID])
198183c2c1fcSMark Zhang 		return nldev_res_get_counter_doit(skb, nlh, extack);
198283c2c1fcSMark Zhang 
198383c2c1fcSMark Zhang 	if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] ||
198483c2c1fcSMark Zhang 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
198583c2c1fcSMark Zhang 		return -EINVAL;
198683c2c1fcSMark Zhang 
198783c2c1fcSMark Zhang 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
198883c2c1fcSMark Zhang 	device = ib_device_get_by_index(sock_net(skb->sk), index);
198983c2c1fcSMark Zhang 	if (!device)
199083c2c1fcSMark Zhang 		return -EINVAL;
199183c2c1fcSMark Zhang 
199283c2c1fcSMark Zhang 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
199383c2c1fcSMark Zhang 	if (!rdma_is_port_valid(device, port)) {
199483c2c1fcSMark Zhang 		ret = -EINVAL;
199583c2c1fcSMark Zhang 		goto err;
199683c2c1fcSMark Zhang 	}
199783c2c1fcSMark Zhang 
199883c2c1fcSMark Zhang 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
199983c2c1fcSMark Zhang 	if (!msg) {
200083c2c1fcSMark Zhang 		ret = -ENOMEM;
200183c2c1fcSMark Zhang 		goto err;
200283c2c1fcSMark Zhang 	}
200383c2c1fcSMark Zhang 
200483c2c1fcSMark Zhang 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
200583c2c1fcSMark Zhang 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
200683c2c1fcSMark Zhang 					 RDMA_NLDEV_CMD_STAT_GET),
200783c2c1fcSMark Zhang 			0, 0);
200883c2c1fcSMark Zhang 
200983c2c1fcSMark Zhang 	ret = rdma_counter_get_mode(device, port, &mode, &mask);
201083c2c1fcSMark Zhang 	if (ret)
201183c2c1fcSMark Zhang 		goto err_msg;
201283c2c1fcSMark Zhang 
201383c2c1fcSMark Zhang 	if (fill_nldev_handle(msg, device) ||
201483c2c1fcSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
2015932727c5SDan Carpenter 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
2016932727c5SDan Carpenter 		ret = -EMSGSIZE;
201783c2c1fcSMark Zhang 		goto err_msg;
2018932727c5SDan Carpenter 	}
201983c2c1fcSMark Zhang 
202083c2c1fcSMark Zhang 	if ((mode == RDMA_COUNTER_MODE_AUTO) &&
2021932727c5SDan Carpenter 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
2022932727c5SDan Carpenter 		ret = -EMSGSIZE;
202383c2c1fcSMark Zhang 		goto err_msg;
2024932727c5SDan Carpenter 	}
202583c2c1fcSMark Zhang 
202683c2c1fcSMark Zhang 	nlmsg_end(msg, nlh);
202783c2c1fcSMark Zhang 	ib_device_put(device);
20281d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
202983c2c1fcSMark Zhang 
203083c2c1fcSMark Zhang err_msg:
203183c2c1fcSMark Zhang 	nlmsg_free(msg);
203283c2c1fcSMark Zhang err:
203383c2c1fcSMark Zhang 	ib_device_put(device);
203483c2c1fcSMark Zhang 	return ret;
203583c2c1fcSMark Zhang }
203683c2c1fcSMark Zhang 
2037c4ffee7cSMark Zhang static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
2038c4ffee7cSMark Zhang 			       struct netlink_ext_ack *extack)
2039c4ffee7cSMark Zhang {
2040c4ffee7cSMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2041c4ffee7cSMark Zhang 	int ret;
2042c4ffee7cSMark Zhang 
2043c4ffee7cSMark Zhang 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2044c4ffee7cSMark Zhang 			  nldev_policy, extack);
20456e7be47aSMark Zhang 	if (ret)
2046c4ffee7cSMark Zhang 		return -EINVAL;
2047c4ffee7cSMark Zhang 
20486e7be47aSMark Zhang 	if (!tb[RDMA_NLDEV_ATTR_STAT_RES])
20496e7be47aSMark Zhang 		return stat_get_doit_default_counter(skb, nlh, extack, tb);
20506e7be47aSMark Zhang 
2051c4ffee7cSMark Zhang 	switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2052c4ffee7cSMark Zhang 	case RDMA_NLDEV_ATTR_RES_QP:
205383c2c1fcSMark Zhang 		ret = stat_get_doit_qp(skb, nlh, extack, tb);
2054c4ffee7cSMark Zhang 		break;
20554061ff7aSErez Alfasi 	case RDMA_NLDEV_ATTR_RES_MR:
20564061ff7aSErez Alfasi 		ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR,
20574061ff7aSErez Alfasi 					  fill_stat_mr_entry);
20584061ff7aSErez Alfasi 		break;
2059c4ffee7cSMark Zhang 	default:
2060c4ffee7cSMark Zhang 		ret = -EINVAL;
2061c4ffee7cSMark Zhang 		break;
2062c4ffee7cSMark Zhang 	}
2063c4ffee7cSMark Zhang 
2064c4ffee7cSMark Zhang 	return ret;
2065c4ffee7cSMark Zhang }
2066c4ffee7cSMark Zhang 
2067c4ffee7cSMark Zhang static int nldev_stat_get_dumpit(struct sk_buff *skb,
2068c4ffee7cSMark Zhang 				 struct netlink_callback *cb)
2069c4ffee7cSMark Zhang {
2070c4ffee7cSMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2071c4ffee7cSMark Zhang 	int ret;
2072c4ffee7cSMark Zhang 
2073c4ffee7cSMark Zhang 	ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2074c4ffee7cSMark Zhang 			  nldev_policy, NULL);
2075c4ffee7cSMark Zhang 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
2076c4ffee7cSMark Zhang 		return -EINVAL;
2077c4ffee7cSMark Zhang 
2078c4ffee7cSMark Zhang 	switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2079c4ffee7cSMark Zhang 	case RDMA_NLDEV_ATTR_RES_QP:
2080c4ffee7cSMark Zhang 		ret = nldev_res_get_counter_dumpit(skb, cb);
2081c4ffee7cSMark Zhang 		break;
20824061ff7aSErez Alfasi 	case RDMA_NLDEV_ATTR_RES_MR:
20834061ff7aSErez Alfasi 		ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR,
20844061ff7aSErez Alfasi 					    fill_stat_mr_entry);
20854061ff7aSErez Alfasi 		break;
2086c4ffee7cSMark Zhang 	default:
2087c4ffee7cSMark Zhang 		ret = -EINVAL;
2088c4ffee7cSMark Zhang 		break;
2089c4ffee7cSMark Zhang 	}
2090c4ffee7cSMark Zhang 
2091c4ffee7cSMark Zhang 	return ret;
2092c4ffee7cSMark Zhang }
2093c4ffee7cSMark Zhang 
2094d0e312feSLeon Romanovsky static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
2095b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_CMD_GET] = {
2096e5c9469eSLeon Romanovsky 		.doit = nldev_get_doit,
2097b4c598a6SLeon Romanovsky 		.dump = nldev_get_dumpit,
2098b4c598a6SLeon Romanovsky 	},
20990e2d00ebSJason Gunthorpe 	[RDMA_NLDEV_CMD_GET_CHARDEV] = {
21000e2d00ebSJason Gunthorpe 		.doit = nldev_get_chardev,
21010e2d00ebSJason Gunthorpe 	},
210205d940d3SLeon Romanovsky 	[RDMA_NLDEV_CMD_SET] = {
210305d940d3SLeon Romanovsky 		.doit = nldev_set_doit,
210405d940d3SLeon Romanovsky 		.flags = RDMA_NL_ADMIN_PERM,
210505d940d3SLeon Romanovsky 	},
21063856ec4bSSteve Wise 	[RDMA_NLDEV_CMD_NEWLINK] = {
21073856ec4bSSteve Wise 		.doit = nldev_newlink,
21083856ec4bSSteve Wise 		.flags = RDMA_NL_ADMIN_PERM,
21093856ec4bSSteve Wise 	},
21103856ec4bSSteve Wise 	[RDMA_NLDEV_CMD_DELLINK] = {
21113856ec4bSSteve Wise 		.doit = nldev_dellink,
21123856ec4bSSteve Wise 		.flags = RDMA_NL_ADMIN_PERM,
21133856ec4bSSteve Wise 	},
21147d02f605SLeon Romanovsky 	[RDMA_NLDEV_CMD_PORT_GET] = {
2115c3f66f7bSLeon Romanovsky 		.doit = nldev_port_get_doit,
21167d02f605SLeon Romanovsky 		.dump = nldev_port_get_dumpit,
21177d02f605SLeon Romanovsky 	},
2118bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_GET] = {
2119bf3c5a93SLeon Romanovsky 		.doit = nldev_res_get_doit,
2120bf3c5a93SLeon Romanovsky 		.dump = nldev_res_get_dumpit,
2121bf3c5a93SLeon Romanovsky 	},
2122b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_QP_GET] = {
2123c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_qp_doit,
2124b5fa635aSLeon Romanovsky 		.dump = nldev_res_get_qp_dumpit,
2125b5fa635aSLeon Romanovsky 	},
212600313983SSteve Wise 	[RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
2127c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_cm_id_doit,
212800313983SSteve Wise 		.dump = nldev_res_get_cm_id_dumpit,
212900313983SSteve Wise 	},
2130a34fc089SSteve Wise 	[RDMA_NLDEV_CMD_RES_CQ_GET] = {
2131c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_cq_doit,
2132a34fc089SSteve Wise 		.dump = nldev_res_get_cq_dumpit,
2133a34fc089SSteve Wise 	},
2134fccec5b8SSteve Wise 	[RDMA_NLDEV_CMD_RES_MR_GET] = {
2135c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_mr_doit,
2136fccec5b8SSteve Wise 		.dump = nldev_res_get_mr_dumpit,
2137fccec5b8SSteve Wise 	},
213829cf1351SSteve Wise 	[RDMA_NLDEV_CMD_RES_PD_GET] = {
2139c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_pd_doit,
214029cf1351SSteve Wise 		.dump = nldev_res_get_pd_dumpit,
214129cf1351SSteve Wise 	},
2142cb7e0e13SParav Pandit 	[RDMA_NLDEV_CMD_SYS_GET] = {
21434d7ba8ceSParav Pandit 		.doit = nldev_sys_get_doit,
2144cb7e0e13SParav Pandit 	},
21452b34c558SParav Pandit 	[RDMA_NLDEV_CMD_SYS_SET] = {
21462b34c558SParav Pandit 		.doit = nldev_set_sys_set_doit,
2147b47ae6f8SMark Zhang 	},
2148b47ae6f8SMark Zhang 	[RDMA_NLDEV_CMD_STAT_SET] = {
2149b47ae6f8SMark Zhang 		.doit = nldev_stat_set_doit,
21502b34c558SParav Pandit 		.flags = RDMA_NL_ADMIN_PERM,
21512b34c558SParav Pandit 	},
2152c4ffee7cSMark Zhang 	[RDMA_NLDEV_CMD_STAT_GET] = {
2153c4ffee7cSMark Zhang 		.doit = nldev_stat_get_doit,
2154c4ffee7cSMark Zhang 		.dump = nldev_stat_get_dumpit,
2155c4ffee7cSMark Zhang 	},
2156b389327dSMark Zhang 	[RDMA_NLDEV_CMD_STAT_DEL] = {
2157b389327dSMark Zhang 		.doit = nldev_stat_del_doit,
2158b389327dSMark Zhang 		.flags = RDMA_NL_ADMIN_PERM,
2159b389327dSMark Zhang 	},
216065959522SMaor Gottlieb 	[RDMA_NLDEV_CMD_RES_QP_GET_RAW] = {
216165959522SMaor Gottlieb 		.doit = nldev_res_get_qp_raw_doit,
216265959522SMaor Gottlieb 		.dump = nldev_res_get_qp_raw_dumpit,
216365959522SMaor Gottlieb 		.flags = RDMA_NL_ADMIN_PERM,
216465959522SMaor Gottlieb 	},
216565959522SMaor Gottlieb 	[RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = {
216665959522SMaor Gottlieb 		.doit = nldev_res_get_cq_raw_doit,
216765959522SMaor Gottlieb 		.dump = nldev_res_get_cq_raw_dumpit,
216865959522SMaor Gottlieb 		.flags = RDMA_NL_ADMIN_PERM,
216965959522SMaor Gottlieb 	},
217065959522SMaor Gottlieb 	[RDMA_NLDEV_CMD_RES_MR_GET_RAW] = {
217165959522SMaor Gottlieb 		.doit = nldev_res_get_mr_raw_doit,
217265959522SMaor Gottlieb 		.dump = nldev_res_get_mr_raw_dumpit,
217365959522SMaor Gottlieb 		.flags = RDMA_NL_ADMIN_PERM,
217465959522SMaor Gottlieb 	},
2175b4c598a6SLeon Romanovsky };
2176b4c598a6SLeon Romanovsky 
21776c80b41aSLeon Romanovsky void __init nldev_init(void)
21786c80b41aSLeon Romanovsky {
2179b4c598a6SLeon Romanovsky 	rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
21806c80b41aSLeon Romanovsky }
21816c80b41aSLeon Romanovsky 
21826c80b41aSLeon Romanovsky void __exit nldev_exit(void)
21836c80b41aSLeon Romanovsky {
21846c80b41aSLeon Romanovsky 	rdma_nl_unregister(RDMA_NL_NLDEV);
21856c80b41aSLeon Romanovsky }
2186e3bf14bdSJason Gunthorpe 
2187e3bf14bdSJason Gunthorpe MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
2188