xref: /openbmc/linux/drivers/infiniband/core/nldev.c (revision c6c11ad3)
16c80b41aSLeon Romanovsky /*
26c80b41aSLeon Romanovsky  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
36c80b41aSLeon Romanovsky  *
46c80b41aSLeon Romanovsky  * Redistribution and use in source and binary forms, with or without
56c80b41aSLeon Romanovsky  * modification, are permitted provided that the following conditions are met:
66c80b41aSLeon Romanovsky  *
76c80b41aSLeon Romanovsky  * 1. Redistributions of source code must retain the above copyright
86c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer.
96c80b41aSLeon Romanovsky  * 2. Redistributions in binary form must reproduce the above copyright
106c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer in the
116c80b41aSLeon Romanovsky  *    documentation and/or other materials provided with the distribution.
126c80b41aSLeon Romanovsky  * 3. Neither the names of the copyright holders nor the names of its
136c80b41aSLeon Romanovsky  *    contributors may be used to endorse or promote products derived from
146c80b41aSLeon Romanovsky  *    this software without specific prior written permission.
156c80b41aSLeon Romanovsky  *
166c80b41aSLeon Romanovsky  * Alternatively, this software may be distributed under the terms of the
176c80b41aSLeon Romanovsky  * GNU General Public License ("GPL") version 2 as published by the Free
186c80b41aSLeon Romanovsky  * Software Foundation.
196c80b41aSLeon Romanovsky  *
206c80b41aSLeon Romanovsky  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
216c80b41aSLeon Romanovsky  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
226c80b41aSLeon Romanovsky  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
236c80b41aSLeon Romanovsky  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
246c80b41aSLeon Romanovsky  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
256c80b41aSLeon Romanovsky  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
266c80b41aSLeon Romanovsky  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
276c80b41aSLeon Romanovsky  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
286c80b41aSLeon Romanovsky  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
296c80b41aSLeon Romanovsky  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
306c80b41aSLeon Romanovsky  * POSSIBILITY OF SUCH DAMAGE.
316c80b41aSLeon Romanovsky  */
326c80b41aSLeon Romanovsky 
33e3bf14bdSJason Gunthorpe #include <linux/module.h>
34bf3c5a93SLeon Romanovsky #include <linux/pid.h>
35bf3c5a93SLeon Romanovsky #include <linux/pid_namespace.h>
363856ec4bSSteve Wise #include <linux/mutex.h>
37b4c598a6SLeon Romanovsky #include <net/netlink.h>
3800313983SSteve Wise #include <rdma/rdma_cm.h>
396c80b41aSLeon Romanovsky #include <rdma/rdma_netlink.h>
406c80b41aSLeon Romanovsky 
416c80b41aSLeon Romanovsky #include "core_priv.h"
4200313983SSteve Wise #include "cma_priv.h"
4341eda65cSLeon Romanovsky #include "restrack.h"
445bd48c18SJason Gunthorpe #include "uverbs.h"
456c80b41aSLeon Romanovsky 
46fb910690SErez Alfasi typedef int (*res_fill_func_t)(struct sk_buff*, bool,
47fb910690SErez Alfasi 			       struct rdma_restrack_entry*, uint32_t);
48fb910690SErez Alfasi 
49696de2e9SDoug Ledford /*
50696de2e9SDoug Ledford  * Sort array elements by the netlink attribute name
51696de2e9SDoug Ledford  */
52b4c598a6SLeon Romanovsky static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
53696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV]		= { .type = NLA_U64 },
54696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV_ABI]		= { .type = NLA_U64 },
55696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV_NAME]		= { .type = NLA_NUL_STRING,
5634d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
57696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_CHARDEV_TYPE]		= { .type = NLA_NUL_STRING,
5834d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE },
59f8fc8cd9SYamin Friedman 	[RDMA_NLDEV_ATTR_DEV_DIM]               = { .type = NLA_U8 },
60b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_INDEX]		= { .type = NLA_U32 },
61b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_NAME]		= { .type = NLA_NUL_STRING,
6234d65cd8SDoug Ledford 					.len = IB_DEVICE_NAME_MAX },
63696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DEV_NODE_TYPE]		= { .type = NLA_U8 },
64696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DEV_PROTOCOL]		= { .type = NLA_NUL_STRING,
6534d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
66696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER]		= { .type = NLA_NESTED },
67696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_ENTRY]		= { .type = NLA_NESTED },
68696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE]	= { .type = NLA_U8 },
69696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_STRING]		= { .type = NLA_NUL_STRING,
7034d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
71696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_S32]		= { .type = NLA_S32 },
72696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_S64]		= { .type = NLA_S64 },
73696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_U32]		= { .type = NLA_U32 },
74696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_DRIVER_U64]		= { .type = NLA_U64 },
758621a7e3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_FW_VERSION]		= { .type = NLA_NUL_STRING,
7634d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
7780a06dd3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_LID]			= { .type = NLA_U32 },
78696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_LINK_TYPE]		= { .type = NLA_NUL_STRING,
7934d65cd8SDoug Ledford 					.len = IFNAMSIZ },
8034840feaSLeon Romanovsky 	[RDMA_NLDEV_ATTR_LMC]			= { .type = NLA_U8 },
815b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_INDEX]		= { .type = NLA_U32 },
825b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_NAME]		= { .type = NLA_NUL_STRING,
835b2cc79dSLeon Romanovsky 					.len = IFNAMSIZ },
84696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_NODE_GUID]		= { .type = NLA_U64 },
85696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_PORT_INDEX]		= { .type = NLA_U32 },
86696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_PORT_PHYS_STATE]	= { .type = NLA_U8 },
87696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_PORT_STATE]		= { .type = NLA_U8 },
88696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CM_ID]		= { .type = NLA_NESTED },
89517b773eSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CM_IDN]		= { .type = NLA_U32 },
90696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY]	= { .type = NLA_NESTED },
91696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQ]		= { .type = NLA_NESTED },
92696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQE]		= { .type = NLA_U32 },
93696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQN]		= { .type = NLA_U32 },
94696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_CQ_ENTRY]		= { .type = NLA_NESTED },
9512ce208fSNeta Ostrovsky 	[RDMA_NLDEV_ATTR_RES_CTX]		= { .type = NLA_NESTED },
96c3d02788SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CTXN]		= { .type = NLA_U32 },
9712ce208fSNeta Ostrovsky 	[RDMA_NLDEV_ATTR_RES_CTX_ENTRY]		= { .type = NLA_NESTED },
98696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_DST_ADDR]		= {
99696de2e9SDoug Ledford 			.len = sizeof(struct __kernel_sockaddr_storage) },
100696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_IOVA]		= { .type = NLA_U64 },
101696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_KERN_NAME]		= { .type = NLA_NUL_STRING,
10234d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
103696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_LKEY]		= { .type = NLA_U32 },
104696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY]	= { .type = NLA_U32 },
105696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_LQPN]		= { .type = NLA_U32 },
106696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MR]		= { .type = NLA_NESTED },
107696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MRLEN]		= { .type = NLA_U64 },
108696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MRN]		= { .type = NLA_U32 },
109696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_MR_ENTRY]		= { .type = NLA_NESTED },
110696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE]	= { .type = NLA_U8 },
111696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PD]		= { .type = NLA_NESTED },
112696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PDN]		= { .type = NLA_U32 },
113696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PD_ENTRY]		= { .type = NLA_NESTED },
114696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PID]		= { .type = NLA_U32 },
115696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_POLL_CTX]		= { .type = NLA_U8 },
116696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_PS]		= { .type = NLA_U32 },
117696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_QP]		= { .type = NLA_NESTED },
118696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_QP_ENTRY]		= { .type = NLA_NESTED },
11965959522SMaor Gottlieb 	[RDMA_NLDEV_ATTR_RES_RAW]		= { .type = NLA_BINARY },
120696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_RKEY]		= { .type = NLA_U32 },
121696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_RQPN]		= { .type = NLA_U32 },
122696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_RQ_PSN]		= { .type = NLA_U32 },
123696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SQ_PSN]		= { .type = NLA_U32 },
124696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SRC_ADDR]		= {
125696de2e9SDoug Ledford 			.len = sizeof(struct __kernel_sockaddr_storage) },
126696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_STATE]		= { .type = NLA_U8 },
127696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY]		= { .type = NLA_NESTED },
128696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY]	= { .type = NLA_NESTED },
129696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 },
130696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
13134d65cd8SDoug Ledford 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
132696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_TYPE]		= { .type = NLA_U8 },
133696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
134696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_RES_USECNT]		= { .type = NLA_U64 },
135391c6bd5SNeta Ostrovsky 	[RDMA_NLDEV_ATTR_RES_SRQ]		= { .type = NLA_NESTED },
136391c6bd5SNeta Ostrovsky 	[RDMA_NLDEV_ATTR_RES_SRQN]		= { .type = NLA_U32 },
137391c6bd5SNeta Ostrovsky 	[RDMA_NLDEV_ATTR_RES_SRQ_ENTRY]		= { .type = NLA_NESTED },
138*c6c11ad3SNeta Ostrovsky 	[RDMA_NLDEV_ATTR_MIN_RANGE]		= { .type = NLA_U32 },
139*c6c11ad3SNeta Ostrovsky 	[RDMA_NLDEV_ATTR_MAX_RANGE]		= { .type = NLA_U32 },
140696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_SM_LID]		= { .type = NLA_U32 },
141696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_SUBNET_PREFIX]		= { .type = NLA_U64 },
142b47ae6f8SMark Zhang 	[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]	= { .type = NLA_U32 },
143b47ae6f8SMark Zhang 	[RDMA_NLDEV_ATTR_STAT_MODE]		= { .type = NLA_U32 },
144b47ae6f8SMark Zhang 	[RDMA_NLDEV_ATTR_STAT_RES]		= { .type = NLA_U32 },
145c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_COUNTER]		= { .type = NLA_NESTED },
146c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY]	= { .type = NLA_NESTED },
147c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]       = { .type = NLA_U32 },
148c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]       = { .type = NLA_NESTED },
149c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY]  = { .type = NLA_NESTED },
150c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING },
151c4ffee7cSMark Zhang 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 },
152696de2e9SDoug Ledford 	[RDMA_NLDEV_ATTR_SYS_IMAGE_GUID]	= { .type = NLA_U64 },
1538f71bb00SJason Gunthorpe 	[RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID]	= { .type = NLA_U32 },
154696de2e9SDoug Ledford 	[RDMA_NLDEV_NET_NS_FD]			= { .type = NLA_U32 },
155696de2e9SDoug Ledford 	[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]	= { .type = NLA_U8 },
156b4c598a6SLeon Romanovsky };
157b4c598a6SLeon Romanovsky 
15873937e8aSSteve Wise static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
15973937e8aSSteve Wise 				      enum rdma_nldev_print_type print_type)
16073937e8aSSteve Wise {
16173937e8aSSteve Wise 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
16273937e8aSSteve Wise 		return -EMSGSIZE;
16373937e8aSSteve Wise 	if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
16473937e8aSSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
16573937e8aSSteve Wise 		return -EMSGSIZE;
16673937e8aSSteve Wise 
16773937e8aSSteve Wise 	return 0;
16873937e8aSSteve Wise }
16973937e8aSSteve Wise 
17073937e8aSSteve Wise static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
17173937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
17273937e8aSSteve Wise 				   u32 value)
17373937e8aSSteve Wise {
17473937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
17573937e8aSSteve Wise 		return -EMSGSIZE;
17673937e8aSSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
17773937e8aSSteve Wise 		return -EMSGSIZE;
17873937e8aSSteve Wise 
17973937e8aSSteve Wise 	return 0;
18073937e8aSSteve Wise }
18173937e8aSSteve Wise 
18273937e8aSSteve Wise static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
18373937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
18473937e8aSSteve Wise 				   u64 value)
18573937e8aSSteve Wise {
18673937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
18773937e8aSSteve Wise 		return -EMSGSIZE;
18873937e8aSSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
18973937e8aSSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
19073937e8aSSteve Wise 		return -EMSGSIZE;
19173937e8aSSteve Wise 
19273937e8aSSteve Wise 	return 0;
19373937e8aSSteve Wise }
19473937e8aSSteve Wise 
195e1b95ae0SErez Alfasi int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
196e1b95ae0SErez Alfasi 			      const char *str)
197e1b95ae0SErez Alfasi {
198e1b95ae0SErez Alfasi 	if (put_driver_name_print_type(msg, name,
199e1b95ae0SErez Alfasi 				       RDMA_NLDEV_PRINT_TYPE_UNSPEC))
200e1b95ae0SErez Alfasi 		return -EMSGSIZE;
201e1b95ae0SErez Alfasi 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str))
202e1b95ae0SErez Alfasi 		return -EMSGSIZE;
203e1b95ae0SErez Alfasi 
204e1b95ae0SErez Alfasi 	return 0;
205e1b95ae0SErez Alfasi }
206e1b95ae0SErez Alfasi EXPORT_SYMBOL(rdma_nl_put_driver_string);
207e1b95ae0SErez Alfasi 
20873937e8aSSteve Wise int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
20973937e8aSSteve Wise {
21073937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
21173937e8aSSteve Wise 				       value);
21273937e8aSSteve Wise }
21373937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32);
21473937e8aSSteve Wise 
21573937e8aSSteve Wise int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
21673937e8aSSteve Wise 			       u32 value)
21773937e8aSSteve Wise {
21873937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
21973937e8aSSteve Wise 				       value);
22073937e8aSSteve Wise }
22173937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
22273937e8aSSteve Wise 
22373937e8aSSteve Wise int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
22473937e8aSSteve Wise {
22573937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
22673937e8aSSteve Wise 				       value);
22773937e8aSSteve Wise }
22873937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64);
22973937e8aSSteve Wise 
23073937e8aSSteve Wise int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
23173937e8aSSteve Wise {
23273937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
23373937e8aSSteve Wise 				       value);
23473937e8aSSteve Wise }
23573937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
23673937e8aSSteve Wise 
237c2409810SLeon Romanovsky static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
238b4c598a6SLeon Romanovsky {
239b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
240b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
241896de009SJason Gunthorpe 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
242896de009SJason Gunthorpe 			   dev_name(&device->dev)))
243b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
244c2409810SLeon Romanovsky 
245c2409810SLeon Romanovsky 	return 0;
246c2409810SLeon Romanovsky }
247c2409810SLeon Romanovsky 
248c2409810SLeon Romanovsky static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
249c2409810SLeon Romanovsky {
250c2409810SLeon Romanovsky 	char fw[IB_FW_VERSION_NAME_MAX];
2519e886b39SLeon Romanovsky 	int ret = 0;
2521fb7f897SMark Bloch 	u32 port;
253c2409810SLeon Romanovsky 
254c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
255c2409810SLeon Romanovsky 		return -EMSGSIZE;
256c2409810SLeon Romanovsky 
257b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
258b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
259ac505253SLeon Romanovsky 
260ac505253SLeon Romanovsky 	BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
261ac505253SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
26225a0ad85SSteve Wise 			      device->attrs.device_cap_flags,
26325a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
264ac505253SLeon Romanovsky 		return -EMSGSIZE;
265ac505253SLeon Romanovsky 
2668621a7e3SLeon Romanovsky 	ib_get_device_fw_str(device, fw);
2675b2cc79dSLeon Romanovsky 	/* Device without FW has strlen(fw) = 0 */
2688621a7e3SLeon Romanovsky 	if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
2698621a7e3SLeon Romanovsky 		return -EMSGSIZE;
2708621a7e3SLeon Romanovsky 
2711aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
27225a0ad85SSteve Wise 			      be64_to_cpu(device->node_guid),
27325a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2741aaff896SLeon Romanovsky 		return -EMSGSIZE;
2751aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
27625a0ad85SSteve Wise 			      be64_to_cpu(device->attrs.sys_image_guid),
27725a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2781aaff896SLeon Romanovsky 		return -EMSGSIZE;
2791bb77b8cSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
2801bb77b8cSLeon Romanovsky 		return -EMSGSIZE;
281f8fc8cd9SYamin Friedman 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim))
282f8fc8cd9SYamin Friedman 		return -EMSGSIZE;
2839e886b39SLeon Romanovsky 
2849e886b39SLeon Romanovsky 	/*
2859e886b39SLeon Romanovsky 	 * Link type is determined on first port and mlx4 device
2869e886b39SLeon Romanovsky 	 * which can potentially have two different link type for the same
2879e886b39SLeon Romanovsky 	 * IB device is considered as better to be avoided in the future,
2889e886b39SLeon Romanovsky 	 */
2899e886b39SLeon Romanovsky 	port = rdma_start_port(device);
2909e886b39SLeon Romanovsky 	if (rdma_cap_opa_mad(device, port))
2919e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
2929e886b39SLeon Romanovsky 	else if (rdma_protocol_ib(device, port))
2939e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
2949e886b39SLeon Romanovsky 	else if (rdma_protocol_iwarp(device, port))
2959e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
2969e886b39SLeon Romanovsky 	else if (rdma_protocol_roce(device, port))
2979e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
2989e886b39SLeon Romanovsky 	else if (rdma_protocol_usnic(device, port))
2999e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
3009e886b39SLeon Romanovsky 				     "usnic");
3019e886b39SLeon Romanovsky 	return ret;
302b4c598a6SLeon Romanovsky }
303b4c598a6SLeon Romanovsky 
3047d02f605SLeon Romanovsky static int fill_port_info(struct sk_buff *msg,
3055b2cc79dSLeon Romanovsky 			  struct ib_device *device, u32 port,
3065b2cc79dSLeon Romanovsky 			  const struct net *net)
3077d02f605SLeon Romanovsky {
3085b2cc79dSLeon Romanovsky 	struct net_device *netdev = NULL;
309ac505253SLeon Romanovsky 	struct ib_port_attr attr;
310ac505253SLeon Romanovsky 	int ret;
3114fa2813dSMichael Guralnik 	u64 cap_flags = 0;
312ac505253SLeon Romanovsky 
313c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
3147d02f605SLeon Romanovsky 		return -EMSGSIZE;
315c2409810SLeon Romanovsky 
3167d02f605SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
3177d02f605SLeon Romanovsky 		return -EMSGSIZE;
318ac505253SLeon Romanovsky 
319ac505253SLeon Romanovsky 	ret = ib_query_port(device, port, &attr);
320ac505253SLeon Romanovsky 	if (ret)
321ac505253SLeon Romanovsky 		return ret;
322ac505253SLeon Romanovsky 
323dd8028f1SLeon Romanovsky 	if (rdma_protocol_ib(device, port)) {
3244fa2813dSMichael Guralnik 		BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
3254fa2813dSMichael Guralnik 				sizeof(attr.port_cap_flags2)) > sizeof(u64));
3264fa2813dSMichael Guralnik 		cap_flags = attr.port_cap_flags |
3274fa2813dSMichael Guralnik 			((u64)attr.port_cap_flags2 << 32);
328ac505253SLeon Romanovsky 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
3294fa2813dSMichael Guralnik 				      cap_flags, RDMA_NLDEV_ATTR_PAD))
330ac505253SLeon Romanovsky 			return -EMSGSIZE;
331dd8028f1SLeon Romanovsky 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
33225a0ad85SSteve Wise 				      attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
33312026fbbSLeon Romanovsky 			return -EMSGSIZE;
33480a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
33580a06dd3SLeon Romanovsky 			return -EMSGSIZE;
33680a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
33780a06dd3SLeon Romanovsky 			return -EMSGSIZE;
33834840feaSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
33934840feaSLeon Romanovsky 			return -EMSGSIZE;
34080a06dd3SLeon Romanovsky 	}
3415654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
3425654e49dSLeon Romanovsky 		return -EMSGSIZE;
3435654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
3445654e49dSLeon Romanovsky 		return -EMSGSIZE;
3455b2cc79dSLeon Romanovsky 
346c2261dd7SJason Gunthorpe 	netdev = ib_device_get_netdev(device, port);
3475b2cc79dSLeon Romanovsky 	if (netdev && net_eq(dev_net(netdev), net)) {
3485b2cc79dSLeon Romanovsky 		ret = nla_put_u32(msg,
3495b2cc79dSLeon Romanovsky 				  RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
3505b2cc79dSLeon Romanovsky 		if (ret)
3515b2cc79dSLeon Romanovsky 			goto out;
3525b2cc79dSLeon Romanovsky 		ret = nla_put_string(msg,
3535b2cc79dSLeon Romanovsky 				     RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
3545b2cc79dSLeon Romanovsky 	}
3555b2cc79dSLeon Romanovsky 
3565b2cc79dSLeon Romanovsky out:
3575b2cc79dSLeon Romanovsky 	if (netdev)
3585b2cc79dSLeon Romanovsky 		dev_put(netdev);
3595b2cc79dSLeon Romanovsky 	return ret;
3607d02f605SLeon Romanovsky }
3617d02f605SLeon Romanovsky 
362bf3c5a93SLeon Romanovsky static int fill_res_info_entry(struct sk_buff *msg,
363bf3c5a93SLeon Romanovsky 			       const char *name, u64 curr)
364bf3c5a93SLeon Romanovsky {
365bf3c5a93SLeon Romanovsky 	struct nlattr *entry_attr;
366bf3c5a93SLeon Romanovsky 
367ae0be8deSMichal Kubecek 	entry_attr = nla_nest_start_noflag(msg,
368ae0be8deSMichal Kubecek 					   RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
369bf3c5a93SLeon Romanovsky 	if (!entry_attr)
370bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
371bf3c5a93SLeon Romanovsky 
372bf3c5a93SLeon Romanovsky 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
373bf3c5a93SLeon Romanovsky 		goto err;
37425a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
37525a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
376bf3c5a93SLeon Romanovsky 		goto err;
377bf3c5a93SLeon Romanovsky 
378bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, entry_attr);
379bf3c5a93SLeon Romanovsky 	return 0;
380bf3c5a93SLeon Romanovsky 
381bf3c5a93SLeon Romanovsky err:
382bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, entry_attr);
383bf3c5a93SLeon Romanovsky 	return -EMSGSIZE;
384bf3c5a93SLeon Romanovsky }
385bf3c5a93SLeon Romanovsky 
386bf3c5a93SLeon Romanovsky static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
387bf3c5a93SLeon Romanovsky {
388bf3c5a93SLeon Romanovsky 	static const char * const names[RDMA_RESTRACK_MAX] = {
389bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_PD] = "pd",
390bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_CQ] = "cq",
391bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_QP] = "qp",
39200313983SSteve Wise 		[RDMA_RESTRACK_CM_ID] = "cm_id",
393fccec5b8SSteve Wise 		[RDMA_RESTRACK_MR] = "mr",
394ffd321e4SLeon Romanovsky 		[RDMA_RESTRACK_CTX] = "ctx",
395391c6bd5SNeta Ostrovsky 		[RDMA_RESTRACK_SRQ] = "srq",
396bf3c5a93SLeon Romanovsky 	};
397bf3c5a93SLeon Romanovsky 
398bf3c5a93SLeon Romanovsky 	struct nlattr *table_attr;
399bf3c5a93SLeon Romanovsky 	int ret, i, curr;
400bf3c5a93SLeon Romanovsky 
401bf3c5a93SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
402bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
403bf3c5a93SLeon Romanovsky 
404ae0be8deSMichal Kubecek 	table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
405bf3c5a93SLeon Romanovsky 	if (!table_attr)
406bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
407bf3c5a93SLeon Romanovsky 
408bf3c5a93SLeon Romanovsky 	for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
409bf3c5a93SLeon Romanovsky 		if (!names[i])
410bf3c5a93SLeon Romanovsky 			continue;
41160c78668SLeon Romanovsky 		curr = rdma_restrack_count(device, i);
412bf3c5a93SLeon Romanovsky 		ret = fill_res_info_entry(msg, names[i], curr);
413bf3c5a93SLeon Romanovsky 		if (ret)
414bf3c5a93SLeon Romanovsky 			goto err;
415bf3c5a93SLeon Romanovsky 	}
416bf3c5a93SLeon Romanovsky 
417bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, table_attr);
418bf3c5a93SLeon Romanovsky 	return 0;
419bf3c5a93SLeon Romanovsky 
420bf3c5a93SLeon Romanovsky err:
421bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, table_attr);
422bf3c5a93SLeon Romanovsky 	return ret;
423bf3c5a93SLeon Romanovsky }
424bf3c5a93SLeon Romanovsky 
42500313983SSteve Wise static int fill_res_name_pid(struct sk_buff *msg,
42600313983SSteve Wise 			     struct rdma_restrack_entry *res)
42700313983SSteve Wise {
428ac71ffcfSLeon Romanovsky 	int err = 0;
429ac71ffcfSLeon Romanovsky 
43000313983SSteve Wise 	/*
43100313983SSteve Wise 	 * For user resources, user is should read /proc/PID/comm to get the
43200313983SSteve Wise 	 * name of the task file.
43300313983SSteve Wise 	 */
43400313983SSteve Wise 	if (rdma_is_kernel_res(res)) {
435ac71ffcfSLeon Romanovsky 		err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
436ac71ffcfSLeon Romanovsky 				     res->kern_name);
43700313983SSteve Wise 	} else {
438ac71ffcfSLeon Romanovsky 		pid_t pid;
439ac71ffcfSLeon Romanovsky 
440ac71ffcfSLeon Romanovsky 		pid = task_pid_vnr(res->task);
441ac71ffcfSLeon Romanovsky 		/*
442ac71ffcfSLeon Romanovsky 		 * Task is dead and in zombie state.
443ac71ffcfSLeon Romanovsky 		 * There is no need to print PID anymore.
444ac71ffcfSLeon Romanovsky 		 */
445ac71ffcfSLeon Romanovsky 		if (pid)
446ac71ffcfSLeon Romanovsky 			/*
447ac71ffcfSLeon Romanovsky 			 * This part is racy, task can be killed and PID will
448ac71ffcfSLeon Romanovsky 			 * be zero right here but it is ok, next query won't
449ac71ffcfSLeon Romanovsky 			 * return PID. We don't promise real-time reflection
450ac71ffcfSLeon Romanovsky 			 * of SW objects.
451ac71ffcfSLeon Romanovsky 			 */
452ac71ffcfSLeon Romanovsky 			err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid);
45300313983SSteve Wise 	}
454ac71ffcfSLeon Romanovsky 
455ac71ffcfSLeon Romanovsky 	return err ? -EMSGSIZE : 0;
45600313983SSteve Wise }
45700313983SSteve Wise 
45865959522SMaor Gottlieb static int fill_res_qp_entry_query(struct sk_buff *msg,
45965959522SMaor Gottlieb 				   struct rdma_restrack_entry *res,
46065959522SMaor Gottlieb 				   struct ib_device *dev,
46165959522SMaor Gottlieb 				   struct ib_qp *qp)
462b5fa635aSLeon Romanovsky {
463b5fa635aSLeon Romanovsky 	struct ib_qp_init_attr qp_init_attr;
464b5fa635aSLeon Romanovsky 	struct ib_qp_attr qp_attr;
465b5fa635aSLeon Romanovsky 	int ret;
466b5fa635aSLeon Romanovsky 
467b5fa635aSLeon Romanovsky 	ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
468b5fa635aSLeon Romanovsky 	if (ret)
469b5fa635aSLeon Romanovsky 		return ret;
470b5fa635aSLeon Romanovsky 
471b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
472b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
473b5fa635aSLeon Romanovsky 				qp_attr.dest_qp_num))
474b5fa635aSLeon Romanovsky 			goto err;
475b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
476b5fa635aSLeon Romanovsky 				qp_attr.rq_psn))
477b5fa635aSLeon Romanovsky 			goto err;
478b5fa635aSLeon Romanovsky 	}
479b5fa635aSLeon Romanovsky 
480b5fa635aSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
481b5fa635aSLeon Romanovsky 		goto err;
482b5fa635aSLeon Romanovsky 
483b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
484b5fa635aSLeon Romanovsky 	    qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
485b5fa635aSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
486b5fa635aSLeon Romanovsky 			       qp_attr.path_mig_state))
487b5fa635aSLeon Romanovsky 			goto err;
488b5fa635aSLeon Romanovsky 	}
489b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
490b5fa635aSLeon Romanovsky 		goto err;
491b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
492b5fa635aSLeon Romanovsky 		goto err;
493b5fa635aSLeon Romanovsky 
4945cc34116SMaor Gottlieb 	if (dev->ops.fill_res_qp_entry)
4955cc34116SMaor Gottlieb 		return dev->ops.fill_res_qp_entry(msg, qp);
49600313983SSteve Wise 	return 0;
49700313983SSteve Wise 
498c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
49900313983SSteve Wise }
50000313983SSteve Wise 
50165959522SMaor Gottlieb static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
50265959522SMaor Gottlieb 			     struct rdma_restrack_entry *res, uint32_t port)
50365959522SMaor Gottlieb {
50465959522SMaor Gottlieb 	struct ib_qp *qp = container_of(res, struct ib_qp, res);
50565959522SMaor Gottlieb 	struct ib_device *dev = qp->device;
50665959522SMaor Gottlieb 	int ret;
50765959522SMaor Gottlieb 
50865959522SMaor Gottlieb 	if (port && port != qp->port)
50965959522SMaor Gottlieb 		return -EAGAIN;
51065959522SMaor Gottlieb 
51165959522SMaor Gottlieb 	/* In create_qp() port is not set yet */
51265959522SMaor Gottlieb 	if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port))
51365959522SMaor Gottlieb 		return -EINVAL;
51465959522SMaor Gottlieb 
51565959522SMaor Gottlieb 	ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num);
51665959522SMaor Gottlieb 	if (ret)
51765959522SMaor Gottlieb 		return -EMSGSIZE;
51865959522SMaor Gottlieb 
51965959522SMaor Gottlieb 	if (!rdma_is_kernel_res(res) &&
52065959522SMaor Gottlieb 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
52165959522SMaor Gottlieb 		return -EMSGSIZE;
52265959522SMaor Gottlieb 
52365959522SMaor Gottlieb 	ret = fill_res_name_pid(msg, res);
52465959522SMaor Gottlieb 	if (ret)
52565959522SMaor Gottlieb 		return -EMSGSIZE;
52665959522SMaor Gottlieb 
52765959522SMaor Gottlieb 	return fill_res_qp_entry_query(msg, res, dev, qp);
52865959522SMaor Gottlieb }
52965959522SMaor Gottlieb 
53065959522SMaor Gottlieb static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
53165959522SMaor Gottlieb 				 struct rdma_restrack_entry *res, uint32_t port)
53265959522SMaor Gottlieb {
53365959522SMaor Gottlieb 	struct ib_qp *qp = container_of(res, struct ib_qp, res);
53465959522SMaor Gottlieb 	struct ib_device *dev = qp->device;
53565959522SMaor Gottlieb 
53665959522SMaor Gottlieb 	if (port && port != qp->port)
53765959522SMaor Gottlieb 		return -EAGAIN;
53865959522SMaor Gottlieb 	if (!dev->ops.fill_res_qp_entry_raw)
53965959522SMaor Gottlieb 		return -EINVAL;
54065959522SMaor Gottlieb 	return dev->ops.fill_res_qp_entry_raw(msg, qp);
54165959522SMaor Gottlieb }
54265959522SMaor Gottlieb 
543659067b0SLeon Romanovsky static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
54400313983SSteve Wise 				struct rdma_restrack_entry *res, uint32_t port)
54500313983SSteve Wise {
54600313983SSteve Wise 	struct rdma_id_private *id_priv =
54700313983SSteve Wise 				container_of(res, struct rdma_id_private, res);
54802da3750SLeon Romanovsky 	struct ib_device *dev = id_priv->id.device;
54900313983SSteve Wise 	struct rdma_cm_id *cm_id = &id_priv->id;
55000313983SSteve Wise 
55100313983SSteve Wise 	if (port && port != cm_id->port_num)
55200313983SSteve Wise 		return 0;
55300313983SSteve Wise 
55400313983SSteve Wise 	if (cm_id->port_num &&
55500313983SSteve Wise 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
55600313983SSteve Wise 		goto err;
55700313983SSteve Wise 
55800313983SSteve Wise 	if (id_priv->qp_num) {
55900313983SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
56000313983SSteve Wise 			goto err;
56100313983SSteve Wise 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
562b5fa635aSLeon Romanovsky 			goto err;
563b5fa635aSLeon Romanovsky 	}
564b5fa635aSLeon Romanovsky 
56500313983SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
56600313983SSteve Wise 		goto err;
56700313983SSteve Wise 
56800313983SSteve Wise 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
56900313983SSteve Wise 		goto err;
57000313983SSteve Wise 
57100313983SSteve Wise 	if (cm_id->route.addr.src_addr.ss_family &&
57200313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
57300313983SSteve Wise 		    sizeof(cm_id->route.addr.src_addr),
57400313983SSteve Wise 		    &cm_id->route.addr.src_addr))
57500313983SSteve Wise 		goto err;
57600313983SSteve Wise 	if (cm_id->route.addr.dst_addr.ss_family &&
57700313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
57800313983SSteve Wise 		    sizeof(cm_id->route.addr.dst_addr),
57900313983SSteve Wise 		    &cm_id->route.addr.dst_addr))
58000313983SSteve Wise 		goto err;
58100313983SSteve Wise 
582517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
583517b773eSLeon Romanovsky 		goto err;
584517b773eSLeon Romanovsky 
58500313983SSteve Wise 	if (fill_res_name_pid(msg, res))
58600313983SSteve Wise 		goto err;
58700313983SSteve Wise 
588211cd945SMaor Gottlieb 	if (dev->ops.fill_res_cm_id_entry)
589211cd945SMaor Gottlieb 		return dev->ops.fill_res_cm_id_entry(msg, cm_id);
590b5fa635aSLeon Romanovsky 	return 0;
591b5fa635aSLeon Romanovsky 
592c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE;
593b5fa635aSLeon Romanovsky }
594b5fa635aSLeon Romanovsky 
595659067b0SLeon Romanovsky static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
596a34fc089SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
597a34fc089SSteve Wise {
598a34fc089SSteve Wise 	struct ib_cq *cq = container_of(res, struct ib_cq, res);
59902da3750SLeon Romanovsky 	struct ib_device *dev = cq->device;
600a34fc089SSteve Wise 
601a34fc089SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
60265959522SMaor Gottlieb 		return -EMSGSIZE;
603a34fc089SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
60425a0ad85SSteve Wise 			      atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
60565959522SMaor Gottlieb 		return -EMSGSIZE;
606a34fc089SSteve Wise 
607a34fc089SSteve Wise 	/* Poll context is only valid for kernel CQs */
608a34fc089SSteve Wise 	if (rdma_is_kernel_res(res) &&
609a34fc089SSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
61065959522SMaor Gottlieb 		return -EMSGSIZE;
611a34fc089SSteve Wise 
612f8fc8cd9SYamin Friedman 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL)))
61365959522SMaor Gottlieb 		return -EMSGSIZE;
614f8fc8cd9SYamin Friedman 
615517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
61665959522SMaor Gottlieb 		return -EMSGSIZE;
617c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
618c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
6195bd48c18SJason Gunthorpe 			cq->uobject->uevent.uobject.context->res.id))
62065959522SMaor Gottlieb 		return -EMSGSIZE;
621517b773eSLeon Romanovsky 
622a34fc089SSteve Wise 	if (fill_res_name_pid(msg, res))
62365959522SMaor Gottlieb 		return -EMSGSIZE;
624a34fc089SSteve Wise 
62565959522SMaor Gottlieb 	return (dev->ops.fill_res_cq_entry) ?
62665959522SMaor Gottlieb 		dev->ops.fill_res_cq_entry(msg, cq) : 0;
62765959522SMaor Gottlieb }
628a34fc089SSteve Wise 
62965959522SMaor Gottlieb static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
63065959522SMaor Gottlieb 				 struct rdma_restrack_entry *res, uint32_t port)
63165959522SMaor Gottlieb {
63265959522SMaor Gottlieb 	struct ib_cq *cq = container_of(res, struct ib_cq, res);
63365959522SMaor Gottlieb 	struct ib_device *dev = cq->device;
63465959522SMaor Gottlieb 
63565959522SMaor Gottlieb 	if (!dev->ops.fill_res_cq_entry_raw)
63665959522SMaor Gottlieb 		return -EINVAL;
63765959522SMaor Gottlieb 	return dev->ops.fill_res_cq_entry_raw(msg, cq);
638a34fc089SSteve Wise }
639a34fc089SSteve Wise 
640659067b0SLeon Romanovsky static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
641fccec5b8SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
642fccec5b8SSteve Wise {
643fccec5b8SSteve Wise 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
64402da3750SLeon Romanovsky 	struct ib_device *dev = mr->pd->device;
645fccec5b8SSteve Wise 
646659067b0SLeon Romanovsky 	if (has_cap_net_admin) {
647fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
64865959522SMaor Gottlieb 			return -EMSGSIZE;
649fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
65065959522SMaor Gottlieb 			return -EMSGSIZE;
651fccec5b8SSteve Wise 	}
652fccec5b8SSteve Wise 
65325a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
65425a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
65565959522SMaor Gottlieb 		return -EMSGSIZE;
656fccec5b8SSteve Wise 
657517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
65865959522SMaor Gottlieb 		return -EMSGSIZE;
659517b773eSLeon Romanovsky 
660c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
661c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
66265959522SMaor Gottlieb 		return -EMSGSIZE;
663c3d02788SLeon Romanovsky 
664fccec5b8SSteve Wise 	if (fill_res_name_pid(msg, res))
66565959522SMaor Gottlieb 		return -EMSGSIZE;
666fccec5b8SSteve Wise 
66765959522SMaor Gottlieb 	return (dev->ops.fill_res_mr_entry) ?
66865959522SMaor Gottlieb 		       dev->ops.fill_res_mr_entry(msg, mr) :
66965959522SMaor Gottlieb 		       0;
67065959522SMaor Gottlieb }
671fccec5b8SSteve Wise 
67265959522SMaor Gottlieb static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
67365959522SMaor Gottlieb 				 struct rdma_restrack_entry *res, uint32_t port)
67465959522SMaor Gottlieb {
67565959522SMaor Gottlieb 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
67665959522SMaor Gottlieb 	struct ib_device *dev = mr->pd->device;
67765959522SMaor Gottlieb 
67865959522SMaor Gottlieb 	if (!dev->ops.fill_res_mr_entry_raw)
67965959522SMaor Gottlieb 		return -EINVAL;
68065959522SMaor Gottlieb 	return dev->ops.fill_res_mr_entry_raw(msg, mr);
681fccec5b8SSteve Wise }
682fccec5b8SSteve Wise 
683659067b0SLeon Romanovsky static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
68429cf1351SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
68529cf1351SSteve Wise {
68629cf1351SSteve Wise 	struct ib_pd *pd = container_of(res, struct ib_pd, res);
68729cf1351SSteve Wise 
688659067b0SLeon Romanovsky 	if (has_cap_net_admin) {
68929cf1351SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
69029cf1351SSteve Wise 				pd->local_dma_lkey))
69129cf1351SSteve Wise 			goto err;
69229cf1351SSteve Wise 		if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
69329cf1351SSteve Wise 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
69429cf1351SSteve Wise 				pd->unsafe_global_rkey))
69529cf1351SSteve Wise 			goto err;
69629cf1351SSteve Wise 	}
69729cf1351SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
69825a0ad85SSteve Wise 			      atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
69929cf1351SSteve Wise 		goto err;
70029cf1351SSteve Wise 
701517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
702517b773eSLeon Romanovsky 		goto err;
703517b773eSLeon Romanovsky 
704c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
705c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
706c3d02788SLeon Romanovsky 			pd->uobject->context->res.id))
707c3d02788SLeon Romanovsky 		goto err;
708c3d02788SLeon Romanovsky 
70924fd6d6fSMaor Gottlieb 	return fill_res_name_pid(msg, res);
71029cf1351SSteve Wise 
711c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
71229cf1351SSteve Wise }
71329cf1351SSteve Wise 
71412ce208fSNeta Ostrovsky static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin,
71512ce208fSNeta Ostrovsky 			      struct rdma_restrack_entry *res, uint32_t port)
71612ce208fSNeta Ostrovsky {
71712ce208fSNeta Ostrovsky 	struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res);
71812ce208fSNeta Ostrovsky 
71912ce208fSNeta Ostrovsky 	if (rdma_is_kernel_res(res))
72012ce208fSNeta Ostrovsky 		return 0;
72112ce208fSNeta Ostrovsky 
72212ce208fSNeta Ostrovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id))
72312ce208fSNeta Ostrovsky 		return -EMSGSIZE;
72412ce208fSNeta Ostrovsky 
72512ce208fSNeta Ostrovsky 	return fill_res_name_pid(msg, res);
72612ce208fSNeta Ostrovsky }
72712ce208fSNeta Ostrovsky 
728*c6c11ad3SNeta Ostrovsky static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range,
729*c6c11ad3SNeta Ostrovsky 				   uint32_t max_range)
730*c6c11ad3SNeta Ostrovsky {
731*c6c11ad3SNeta Ostrovsky 	struct nlattr *entry_attr;
732*c6c11ad3SNeta Ostrovsky 
733*c6c11ad3SNeta Ostrovsky 	if (!min_range)
734*c6c11ad3SNeta Ostrovsky 		return 0;
735*c6c11ad3SNeta Ostrovsky 
736*c6c11ad3SNeta Ostrovsky 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
737*c6c11ad3SNeta Ostrovsky 	if (!entry_attr)
738*c6c11ad3SNeta Ostrovsky 		return -EMSGSIZE;
739*c6c11ad3SNeta Ostrovsky 
740*c6c11ad3SNeta Ostrovsky 	if (min_range == max_range) {
741*c6c11ad3SNeta Ostrovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range))
742*c6c11ad3SNeta Ostrovsky 			goto err;
743*c6c11ad3SNeta Ostrovsky 	} else {
744*c6c11ad3SNeta Ostrovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range))
745*c6c11ad3SNeta Ostrovsky 			goto err;
746*c6c11ad3SNeta Ostrovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range))
747*c6c11ad3SNeta Ostrovsky 			goto err;
748*c6c11ad3SNeta Ostrovsky 	}
749*c6c11ad3SNeta Ostrovsky 	nla_nest_end(msg, entry_attr);
750*c6c11ad3SNeta Ostrovsky 	return 0;
751*c6c11ad3SNeta Ostrovsky 
752*c6c11ad3SNeta Ostrovsky err:
753*c6c11ad3SNeta Ostrovsky 	nla_nest_cancel(msg, entry_attr);
754*c6c11ad3SNeta Ostrovsky 	return -EMSGSIZE;
755*c6c11ad3SNeta Ostrovsky }
756*c6c11ad3SNeta Ostrovsky 
757*c6c11ad3SNeta Ostrovsky static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq)
758*c6c11ad3SNeta Ostrovsky {
759*c6c11ad3SNeta Ostrovsky 	uint32_t min_range = 0, prev = 0;
760*c6c11ad3SNeta Ostrovsky 	struct rdma_restrack_entry *res;
761*c6c11ad3SNeta Ostrovsky 	struct rdma_restrack_root *rt;
762*c6c11ad3SNeta Ostrovsky 	struct nlattr *table_attr;
763*c6c11ad3SNeta Ostrovsky 	struct ib_qp *qp = NULL;
764*c6c11ad3SNeta Ostrovsky 	unsigned long id = 0;
765*c6c11ad3SNeta Ostrovsky 
766*c6c11ad3SNeta Ostrovsky 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
767*c6c11ad3SNeta Ostrovsky 	if (!table_attr)
768*c6c11ad3SNeta Ostrovsky 		return -EMSGSIZE;
769*c6c11ad3SNeta Ostrovsky 
770*c6c11ad3SNeta Ostrovsky 	rt = &srq->device->res[RDMA_RESTRACK_QP];
771*c6c11ad3SNeta Ostrovsky 	xa_lock(&rt->xa);
772*c6c11ad3SNeta Ostrovsky 	xa_for_each(&rt->xa, id, res) {
773*c6c11ad3SNeta Ostrovsky 		if (!rdma_restrack_get(res))
774*c6c11ad3SNeta Ostrovsky 			continue;
775*c6c11ad3SNeta Ostrovsky 
776*c6c11ad3SNeta Ostrovsky 		qp = container_of(res, struct ib_qp, res);
777*c6c11ad3SNeta Ostrovsky 		if (!qp->srq || (qp->srq->res.id != srq->res.id)) {
778*c6c11ad3SNeta Ostrovsky 			rdma_restrack_put(res);
779*c6c11ad3SNeta Ostrovsky 			continue;
780*c6c11ad3SNeta Ostrovsky 		}
781*c6c11ad3SNeta Ostrovsky 
782*c6c11ad3SNeta Ostrovsky 		if (qp->qp_num < prev)
783*c6c11ad3SNeta Ostrovsky 			/* qp_num should be ascending */
784*c6c11ad3SNeta Ostrovsky 			goto err_loop;
785*c6c11ad3SNeta Ostrovsky 
786*c6c11ad3SNeta Ostrovsky 		if (min_range == 0) {
787*c6c11ad3SNeta Ostrovsky 			min_range = qp->qp_num;
788*c6c11ad3SNeta Ostrovsky 		} else if (qp->qp_num > (prev + 1)) {
789*c6c11ad3SNeta Ostrovsky 			if (fill_res_range_qp_entry(msg, min_range, prev))
790*c6c11ad3SNeta Ostrovsky 				goto err_loop;
791*c6c11ad3SNeta Ostrovsky 
792*c6c11ad3SNeta Ostrovsky 			min_range = qp->qp_num;
793*c6c11ad3SNeta Ostrovsky 		}
794*c6c11ad3SNeta Ostrovsky 		prev = qp->qp_num;
795*c6c11ad3SNeta Ostrovsky 		rdma_restrack_put(res);
796*c6c11ad3SNeta Ostrovsky 	}
797*c6c11ad3SNeta Ostrovsky 
798*c6c11ad3SNeta Ostrovsky 	xa_unlock(&rt->xa);
799*c6c11ad3SNeta Ostrovsky 
800*c6c11ad3SNeta Ostrovsky 	if (fill_res_range_qp_entry(msg, min_range, prev))
801*c6c11ad3SNeta Ostrovsky 		goto err;
802*c6c11ad3SNeta Ostrovsky 
803*c6c11ad3SNeta Ostrovsky 	nla_nest_end(msg, table_attr);
804*c6c11ad3SNeta Ostrovsky 	return 0;
805*c6c11ad3SNeta Ostrovsky 
806*c6c11ad3SNeta Ostrovsky err_loop:
807*c6c11ad3SNeta Ostrovsky 	rdma_restrack_put(res);
808*c6c11ad3SNeta Ostrovsky 	xa_unlock(&rt->xa);
809*c6c11ad3SNeta Ostrovsky err:
810*c6c11ad3SNeta Ostrovsky 	nla_nest_cancel(msg, table_attr);
811*c6c11ad3SNeta Ostrovsky 	return -EMSGSIZE;
812*c6c11ad3SNeta Ostrovsky }
813*c6c11ad3SNeta Ostrovsky 
814391c6bd5SNeta Ostrovsky static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin,
815391c6bd5SNeta Ostrovsky 			      struct rdma_restrack_entry *res, uint32_t port)
816391c6bd5SNeta Ostrovsky {
817391c6bd5SNeta Ostrovsky 	struct ib_srq *srq = container_of(res, struct ib_srq, res);
818391c6bd5SNeta Ostrovsky 
819391c6bd5SNeta Ostrovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id))
820391c6bd5SNeta Ostrovsky 		goto err;
821391c6bd5SNeta Ostrovsky 
822391c6bd5SNeta Ostrovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type))
823391c6bd5SNeta Ostrovsky 		goto err;
824391c6bd5SNeta Ostrovsky 
825391c6bd5SNeta Ostrovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id))
826391c6bd5SNeta Ostrovsky 		goto err;
827391c6bd5SNeta Ostrovsky 
828391c6bd5SNeta Ostrovsky 	if (ib_srq_has_cq(srq->srq_type)) {
829391c6bd5SNeta Ostrovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN,
830391c6bd5SNeta Ostrovsky 				srq->ext.cq->res.id))
831391c6bd5SNeta Ostrovsky 			goto err;
832391c6bd5SNeta Ostrovsky 	}
833391c6bd5SNeta Ostrovsky 
834*c6c11ad3SNeta Ostrovsky 	if (fill_res_srq_qps(msg, srq))
835*c6c11ad3SNeta Ostrovsky 		goto err;
836*c6c11ad3SNeta Ostrovsky 
837391c6bd5SNeta Ostrovsky 	return fill_res_name_pid(msg, res);
838391c6bd5SNeta Ostrovsky 
839391c6bd5SNeta Ostrovsky err:
840391c6bd5SNeta Ostrovsky 	return -EMSGSIZE;
841391c6bd5SNeta Ostrovsky }
842391c6bd5SNeta Ostrovsky 
843c4ffee7cSMark Zhang static int fill_stat_counter_mode(struct sk_buff *msg,
844c4ffee7cSMark Zhang 				  struct rdma_counter *counter)
845c4ffee7cSMark Zhang {
846c4ffee7cSMark Zhang 	struct rdma_counter_mode *m = &counter->mode;
847c4ffee7cSMark Zhang 
848c4ffee7cSMark Zhang 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode))
849c4ffee7cSMark Zhang 		return -EMSGSIZE;
850c4ffee7cSMark Zhang 
8517c97f3adSMark Zhang 	if (m->mode == RDMA_COUNTER_MODE_AUTO) {
852c4ffee7cSMark Zhang 		if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) &&
853c4ffee7cSMark Zhang 		    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type))
854c4ffee7cSMark Zhang 			return -EMSGSIZE;
855c4ffee7cSMark Zhang 
8567c97f3adSMark Zhang 		if ((m->mask & RDMA_COUNTER_MASK_PID) &&
8577c97f3adSMark Zhang 		    fill_res_name_pid(msg, &counter->res))
8587c97f3adSMark Zhang 			return -EMSGSIZE;
8597c97f3adSMark Zhang 	}
8607c97f3adSMark Zhang 
861c4ffee7cSMark Zhang 	return 0;
862c4ffee7cSMark Zhang }
863c4ffee7cSMark Zhang 
864c4ffee7cSMark Zhang static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn)
865c4ffee7cSMark Zhang {
866c4ffee7cSMark Zhang 	struct nlattr *entry_attr;
867c4ffee7cSMark Zhang 
868c4ffee7cSMark Zhang 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
869c4ffee7cSMark Zhang 	if (!entry_attr)
870c4ffee7cSMark Zhang 		return -EMSGSIZE;
871c4ffee7cSMark Zhang 
872c4ffee7cSMark Zhang 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn))
873c4ffee7cSMark Zhang 		goto err;
874c4ffee7cSMark Zhang 
875c4ffee7cSMark Zhang 	nla_nest_end(msg, entry_attr);
876c4ffee7cSMark Zhang 	return 0;
877c4ffee7cSMark Zhang 
878c4ffee7cSMark Zhang err:
879c4ffee7cSMark Zhang 	nla_nest_cancel(msg, entry_attr);
880c4ffee7cSMark Zhang 	return -EMSGSIZE;
881c4ffee7cSMark Zhang }
882c4ffee7cSMark Zhang 
883c4ffee7cSMark Zhang static int fill_stat_counter_qps(struct sk_buff *msg,
884c4ffee7cSMark Zhang 				 struct rdma_counter *counter)
885c4ffee7cSMark Zhang {
886c4ffee7cSMark Zhang 	struct rdma_restrack_entry *res;
887c4ffee7cSMark Zhang 	struct rdma_restrack_root *rt;
888c4ffee7cSMark Zhang 	struct nlattr *table_attr;
889c4ffee7cSMark Zhang 	struct ib_qp *qp = NULL;
890c4ffee7cSMark Zhang 	unsigned long id = 0;
891c4ffee7cSMark Zhang 	int ret = 0;
892c4ffee7cSMark Zhang 
893c4ffee7cSMark Zhang 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
894c4ffee7cSMark Zhang 
895c4ffee7cSMark Zhang 	rt = &counter->device->res[RDMA_RESTRACK_QP];
896c4ffee7cSMark Zhang 	xa_lock(&rt->xa);
897c4ffee7cSMark Zhang 	xa_for_each(&rt->xa, id, res) {
898c4ffee7cSMark Zhang 		qp = container_of(res, struct ib_qp, res);
899c4ffee7cSMark Zhang 		if (!qp->counter || (qp->counter->id != counter->id))
900c4ffee7cSMark Zhang 			continue;
901c4ffee7cSMark Zhang 
902c4ffee7cSMark Zhang 		ret = fill_stat_counter_qp_entry(msg, qp->qp_num);
903c4ffee7cSMark Zhang 		if (ret)
904c4ffee7cSMark Zhang 			goto err;
905c4ffee7cSMark Zhang 	}
906c4ffee7cSMark Zhang 
907c4ffee7cSMark Zhang 	xa_unlock(&rt->xa);
908c4ffee7cSMark Zhang 	nla_nest_end(msg, table_attr);
909c4ffee7cSMark Zhang 	return 0;
910c4ffee7cSMark Zhang 
911c4ffee7cSMark Zhang err:
912c4ffee7cSMark Zhang 	xa_unlock(&rt->xa);
913c4ffee7cSMark Zhang 	nla_nest_cancel(msg, table_attr);
914c4ffee7cSMark Zhang 	return ret;
915c4ffee7cSMark Zhang }
916c4ffee7cSMark Zhang 
9174061ff7aSErez Alfasi int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
9184061ff7aSErez Alfasi 				 u64 value)
919c4ffee7cSMark Zhang {
920c4ffee7cSMark Zhang 	struct nlattr *entry_attr;
921c4ffee7cSMark Zhang 
922c4ffee7cSMark Zhang 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
923c4ffee7cSMark Zhang 	if (!entry_attr)
924c4ffee7cSMark Zhang 		return -EMSGSIZE;
925c4ffee7cSMark Zhang 
926c4ffee7cSMark Zhang 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
927c4ffee7cSMark Zhang 			   name))
928c4ffee7cSMark Zhang 		goto err;
929c4ffee7cSMark Zhang 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE,
930c4ffee7cSMark Zhang 			      value, RDMA_NLDEV_ATTR_PAD))
931c4ffee7cSMark Zhang 		goto err;
932c4ffee7cSMark Zhang 
933c4ffee7cSMark Zhang 	nla_nest_end(msg, entry_attr);
934c4ffee7cSMark Zhang 	return 0;
935c4ffee7cSMark Zhang 
936c4ffee7cSMark Zhang err:
937c4ffee7cSMark Zhang 	nla_nest_cancel(msg, entry_attr);
938c4ffee7cSMark Zhang 	return -EMSGSIZE;
939c4ffee7cSMark Zhang }
9404061ff7aSErez Alfasi EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry);
9414061ff7aSErez Alfasi 
9424061ff7aSErez Alfasi static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
9434061ff7aSErez Alfasi 			      struct rdma_restrack_entry *res, uint32_t port)
9444061ff7aSErez Alfasi {
9454061ff7aSErez Alfasi 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
9464061ff7aSErez Alfasi 	struct ib_device *dev = mr->pd->device;
9474061ff7aSErez Alfasi 
9484061ff7aSErez Alfasi 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
9494061ff7aSErez Alfasi 		goto err;
9504061ff7aSErez Alfasi 
951f4434529SMaor Gottlieb 	if (dev->ops.fill_stat_mr_entry)
952f4434529SMaor Gottlieb 		return dev->ops.fill_stat_mr_entry(msg, mr);
9534061ff7aSErez Alfasi 	return 0;
9544061ff7aSErez Alfasi 
9554061ff7aSErez Alfasi err:
9564061ff7aSErez Alfasi 	return -EMSGSIZE;
9574061ff7aSErez Alfasi }
958c4ffee7cSMark Zhang 
959c4ffee7cSMark Zhang static int fill_stat_counter_hwcounters(struct sk_buff *msg,
960c4ffee7cSMark Zhang 					struct rdma_counter *counter)
961c4ffee7cSMark Zhang {
962c4ffee7cSMark Zhang 	struct rdma_hw_stats *st = counter->stats;
963c4ffee7cSMark Zhang 	struct nlattr *table_attr;
964c4ffee7cSMark Zhang 	int i;
965c4ffee7cSMark Zhang 
966c4ffee7cSMark Zhang 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
967c4ffee7cSMark Zhang 	if (!table_attr)
968c4ffee7cSMark Zhang 		return -EMSGSIZE;
969c4ffee7cSMark Zhang 
970c4ffee7cSMark Zhang 	for (i = 0; i < st->num_counters; i++)
9714061ff7aSErez Alfasi 		if (rdma_nl_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
972c4ffee7cSMark Zhang 			goto err;
973c4ffee7cSMark Zhang 
974c4ffee7cSMark Zhang 	nla_nest_end(msg, table_attr);
975c4ffee7cSMark Zhang 	return 0;
976c4ffee7cSMark Zhang 
977c4ffee7cSMark Zhang err:
978c4ffee7cSMark Zhang 	nla_nest_cancel(msg, table_attr);
979c4ffee7cSMark Zhang 	return -EMSGSIZE;
980c4ffee7cSMark Zhang }
981c4ffee7cSMark Zhang 
982c4ffee7cSMark Zhang static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
983c4ffee7cSMark Zhang 				  struct rdma_restrack_entry *res,
984c4ffee7cSMark Zhang 				  uint32_t port)
985c4ffee7cSMark Zhang {
986c4ffee7cSMark Zhang 	struct rdma_counter *counter =
987c4ffee7cSMark Zhang 		container_of(res, struct rdma_counter, res);
988c4ffee7cSMark Zhang 
989c4ffee7cSMark Zhang 	if (port && port != counter->port)
990a15542bbSMark Zhang 		return -EAGAIN;
991c4ffee7cSMark Zhang 
992c4ffee7cSMark Zhang 	/* Dump it even query failed */
993c4ffee7cSMark Zhang 	rdma_counter_query_stats(counter);
994c4ffee7cSMark Zhang 
995c4ffee7cSMark Zhang 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) ||
996c4ffee7cSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) ||
997c4ffee7cSMark Zhang 	    fill_stat_counter_mode(msg, counter) ||
998c4ffee7cSMark Zhang 	    fill_stat_counter_qps(msg, counter) ||
999c4ffee7cSMark Zhang 	    fill_stat_counter_hwcounters(msg, counter))
1000c4ffee7cSMark Zhang 		return -EMSGSIZE;
1001c4ffee7cSMark Zhang 
1002c4ffee7cSMark Zhang 	return 0;
1003c4ffee7cSMark Zhang }
1004c4ffee7cSMark Zhang 
1005e5c9469eSLeon Romanovsky static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1006e5c9469eSLeon Romanovsky 			  struct netlink_ext_ack *extack)
1007e5c9469eSLeon Romanovsky {
1008e5c9469eSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1009e5c9469eSLeon Romanovsky 	struct ib_device *device;
1010e5c9469eSLeon Romanovsky 	struct sk_buff *msg;
1011e5c9469eSLeon Romanovsky 	u32 index;
1012e5c9469eSLeon Romanovsky 	int err;
1013e5c9469eSLeon Romanovsky 
10148cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1015e5c9469eSLeon Romanovsky 				     nldev_policy, extack);
1016e5c9469eSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1017e5c9469eSLeon Romanovsky 		return -EINVAL;
1018e5c9469eSLeon Romanovsky 
1019e5c9469eSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1020e5c9469eSLeon Romanovsky 
102137eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1022e5c9469eSLeon Romanovsky 	if (!device)
1023e5c9469eSLeon Romanovsky 		return -EINVAL;
1024e5c9469eSLeon Romanovsky 
1025e5c9469eSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1026f8978bd9SLeon Romanovsky 	if (!msg) {
1027f8978bd9SLeon Romanovsky 		err = -ENOMEM;
1028f8978bd9SLeon Romanovsky 		goto err;
1029f8978bd9SLeon Romanovsky 	}
1030e5c9469eSLeon Romanovsky 
1031e5c9469eSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1032e5c9469eSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1033e5c9469eSLeon Romanovsky 			0, 0);
1034e5c9469eSLeon Romanovsky 
1035e5c9469eSLeon Romanovsky 	err = fill_dev_info(msg, device);
1036f8978bd9SLeon Romanovsky 	if (err)
1037f8978bd9SLeon Romanovsky 		goto err_free;
1038e5c9469eSLeon Romanovsky 
1039e5c9469eSLeon Romanovsky 	nlmsg_end(msg, nlh);
1040e5c9469eSLeon Romanovsky 
104101b67117SParav Pandit 	ib_device_put(device);
10421d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1043f8978bd9SLeon Romanovsky 
1044f8978bd9SLeon Romanovsky err_free:
1045f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
1046f8978bd9SLeon Romanovsky err:
104701b67117SParav Pandit 	ib_device_put(device);
1048f8978bd9SLeon Romanovsky 	return err;
1049e5c9469eSLeon Romanovsky }
1050e5c9469eSLeon Romanovsky 
105105d940d3SLeon Romanovsky static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
105205d940d3SLeon Romanovsky 			  struct netlink_ext_ack *extack)
105305d940d3SLeon Romanovsky {
105405d940d3SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
105505d940d3SLeon Romanovsky 	struct ib_device *device;
105605d940d3SLeon Romanovsky 	u32 index;
105705d940d3SLeon Romanovsky 	int err;
105805d940d3SLeon Romanovsky 
10598cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
10608cb08174SJohannes Berg 				     nldev_policy, extack);
106105d940d3SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
106205d940d3SLeon Romanovsky 		return -EINVAL;
106305d940d3SLeon Romanovsky 
106405d940d3SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
106537eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
106605d940d3SLeon Romanovsky 	if (!device)
106705d940d3SLeon Romanovsky 		return -EINVAL;
106805d940d3SLeon Romanovsky 
106905d940d3SLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
107005d940d3SLeon Romanovsky 		char name[IB_DEVICE_NAME_MAX] = {};
107105d940d3SLeon Romanovsky 
1072872f6903SFrancis Laniel 		nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
107305d940d3SLeon Romanovsky 			    IB_DEVICE_NAME_MAX);
10747aefa623SJason Gunthorpe 		if (strlen(name) == 0) {
10757aefa623SJason Gunthorpe 			err = -EINVAL;
10767aefa623SJason Gunthorpe 			goto done;
10777aefa623SJason Gunthorpe 		}
107805d940d3SLeon Romanovsky 		err = ib_device_rename(device, name);
10792e5b8a01SParav Pandit 		goto done;
108005d940d3SLeon Romanovsky 	}
108105d940d3SLeon Romanovsky 
10822e5b8a01SParav Pandit 	if (tb[RDMA_NLDEV_NET_NS_FD]) {
10832e5b8a01SParav Pandit 		u32 ns_fd;
10842e5b8a01SParav Pandit 
10852e5b8a01SParav Pandit 		ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
10862e5b8a01SParav Pandit 		err = ib_device_set_netns_put(skb, device, ns_fd);
10872e5b8a01SParav Pandit 		goto put_done;
10882e5b8a01SParav Pandit 	}
10892e5b8a01SParav Pandit 
1090f8fc8cd9SYamin Friedman 	if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) {
1091f8fc8cd9SYamin Friedman 		u8 use_dim;
1092f8fc8cd9SYamin Friedman 
1093f8fc8cd9SYamin Friedman 		use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]);
1094f8fc8cd9SYamin Friedman 		err = ib_device_set_dim(device,  use_dim);
1095f8fc8cd9SYamin Friedman 		goto done;
1096f8fc8cd9SYamin Friedman 	}
1097f8fc8cd9SYamin Friedman 
10982e5b8a01SParav Pandit done:
109901b67117SParav Pandit 	ib_device_put(device);
11002e5b8a01SParav Pandit put_done:
110105d940d3SLeon Romanovsky 	return err;
110205d940d3SLeon Romanovsky }
110305d940d3SLeon Romanovsky 
1104b4c598a6SLeon Romanovsky static int _nldev_get_dumpit(struct ib_device *device,
1105b4c598a6SLeon Romanovsky 			     struct sk_buff *skb,
1106b4c598a6SLeon Romanovsky 			     struct netlink_callback *cb,
1107b4c598a6SLeon Romanovsky 			     unsigned int idx)
1108b4c598a6SLeon Romanovsky {
1109b4c598a6SLeon Romanovsky 	int start = cb->args[0];
1110b4c598a6SLeon Romanovsky 	struct nlmsghdr *nlh;
1111b4c598a6SLeon Romanovsky 
1112b4c598a6SLeon Romanovsky 	if (idx < start)
1113b4c598a6SLeon Romanovsky 		return 0;
1114b4c598a6SLeon Romanovsky 
1115b4c598a6SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1116b4c598a6SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1117b4c598a6SLeon Romanovsky 			0, NLM_F_MULTI);
1118b4c598a6SLeon Romanovsky 
1119b4c598a6SLeon Romanovsky 	if (fill_dev_info(skb, device)) {
1120b4c598a6SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
1121b4c598a6SLeon Romanovsky 		goto out;
1122b4c598a6SLeon Romanovsky 	}
1123b4c598a6SLeon Romanovsky 
1124b4c598a6SLeon Romanovsky 	nlmsg_end(skb, nlh);
1125b4c598a6SLeon Romanovsky 
1126b4c598a6SLeon Romanovsky 	idx++;
1127b4c598a6SLeon Romanovsky 
1128b4c598a6SLeon Romanovsky out:	cb->args[0] = idx;
1129b4c598a6SLeon Romanovsky 	return skb->len;
1130b4c598a6SLeon Romanovsky }
1131b4c598a6SLeon Romanovsky 
1132b4c598a6SLeon Romanovsky static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1133b4c598a6SLeon Romanovsky {
1134b4c598a6SLeon Romanovsky 	/*
1135b4c598a6SLeon Romanovsky 	 * There is no need to take lock, because
113637eeab55SParav Pandit 	 * we are relying on ib_core's locking.
1137b4c598a6SLeon Romanovsky 	 */
1138b4c598a6SLeon Romanovsky 	return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
1139b4c598a6SLeon Romanovsky }
1140b4c598a6SLeon Romanovsky 
1141c3f66f7bSLeon Romanovsky static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1142c3f66f7bSLeon Romanovsky 			       struct netlink_ext_ack *extack)
1143c3f66f7bSLeon Romanovsky {
1144c3f66f7bSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1145c3f66f7bSLeon Romanovsky 	struct ib_device *device;
1146c3f66f7bSLeon Romanovsky 	struct sk_buff *msg;
1147c3f66f7bSLeon Romanovsky 	u32 index;
1148c3f66f7bSLeon Romanovsky 	u32 port;
1149c3f66f7bSLeon Romanovsky 	int err;
1150c3f66f7bSLeon Romanovsky 
11518cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1152c3f66f7bSLeon Romanovsky 				     nldev_policy, extack);
1153287683d0SLeon Romanovsky 	if (err ||
1154287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1155287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
1156c3f66f7bSLeon Romanovsky 		return -EINVAL;
1157c3f66f7bSLeon Romanovsky 
1158c3f66f7bSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
115937eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1160c3f66f7bSLeon Romanovsky 	if (!device)
1161c3f66f7bSLeon Romanovsky 		return -EINVAL;
1162c3f66f7bSLeon Romanovsky 
1163c3f66f7bSLeon Romanovsky 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1164f8978bd9SLeon Romanovsky 	if (!rdma_is_port_valid(device, port)) {
1165f8978bd9SLeon Romanovsky 		err = -EINVAL;
1166f8978bd9SLeon Romanovsky 		goto err;
1167f8978bd9SLeon Romanovsky 	}
1168c3f66f7bSLeon Romanovsky 
1169c3f66f7bSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1170f8978bd9SLeon Romanovsky 	if (!msg) {
1171f8978bd9SLeon Romanovsky 		err = -ENOMEM;
1172f8978bd9SLeon Romanovsky 		goto err;
1173f8978bd9SLeon Romanovsky 	}
1174c3f66f7bSLeon Romanovsky 
1175c3f66f7bSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1176c3f66f7bSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1177c3f66f7bSLeon Romanovsky 			0, 0);
1178c3f66f7bSLeon Romanovsky 
11795b2cc79dSLeon Romanovsky 	err = fill_port_info(msg, device, port, sock_net(skb->sk));
1180f8978bd9SLeon Romanovsky 	if (err)
1181f8978bd9SLeon Romanovsky 		goto err_free;
1182c3f66f7bSLeon Romanovsky 
1183c3f66f7bSLeon Romanovsky 	nlmsg_end(msg, nlh);
118401b67117SParav Pandit 	ib_device_put(device);
1185c3f66f7bSLeon Romanovsky 
11861d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1187f8978bd9SLeon Romanovsky 
1188f8978bd9SLeon Romanovsky err_free:
1189f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
1190f8978bd9SLeon Romanovsky err:
119101b67117SParav Pandit 	ib_device_put(device);
1192f8978bd9SLeon Romanovsky 	return err;
1193c3f66f7bSLeon Romanovsky }
1194c3f66f7bSLeon Romanovsky 
11957d02f605SLeon Romanovsky static int nldev_port_get_dumpit(struct sk_buff *skb,
11967d02f605SLeon Romanovsky 				 struct netlink_callback *cb)
11977d02f605SLeon Romanovsky {
11987d02f605SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
11997d02f605SLeon Romanovsky 	struct ib_device *device;
12007d02f605SLeon Romanovsky 	int start = cb->args[0];
12017d02f605SLeon Romanovsky 	struct nlmsghdr *nlh;
12027d02f605SLeon Romanovsky 	u32 idx = 0;
12037d02f605SLeon Romanovsky 	u32 ifindex;
12047d02f605SLeon Romanovsky 	int err;
1205ea1075edSJason Gunthorpe 	unsigned int p;
12067d02f605SLeon Romanovsky 
12078cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
12087d02f605SLeon Romanovsky 				     nldev_policy, NULL);
12097d02f605SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
12107d02f605SLeon Romanovsky 		return -EINVAL;
12117d02f605SLeon Romanovsky 
12127d02f605SLeon Romanovsky 	ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
121337eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
12147d02f605SLeon Romanovsky 	if (!device)
12157d02f605SLeon Romanovsky 		return -EINVAL;
12167d02f605SLeon Romanovsky 
1217ea1075edSJason Gunthorpe 	rdma_for_each_port (device, p) {
12187d02f605SLeon Romanovsky 		/*
12197d02f605SLeon Romanovsky 		 * The dumpit function returns all information from specific
12207d02f605SLeon Romanovsky 		 * index. This specific index is taken from the netlink
12217d02f605SLeon Romanovsky 		 * messages request sent by user and it is available
12227d02f605SLeon Romanovsky 		 * in cb->args[0].
12237d02f605SLeon Romanovsky 		 *
12247d02f605SLeon Romanovsky 		 * Usually, the user doesn't fill this field and it causes
12257d02f605SLeon Romanovsky 		 * to return everything.
12267d02f605SLeon Romanovsky 		 *
12277d02f605SLeon Romanovsky 		 */
12287d02f605SLeon Romanovsky 		if (idx < start) {
12297d02f605SLeon Romanovsky 			idx++;
12307d02f605SLeon Romanovsky 			continue;
12317d02f605SLeon Romanovsky 		}
12327d02f605SLeon Romanovsky 
12337d02f605SLeon Romanovsky 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
12347d02f605SLeon Romanovsky 				cb->nlh->nlmsg_seq,
12357d02f605SLeon Romanovsky 				RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
12367d02f605SLeon Romanovsky 						 RDMA_NLDEV_CMD_PORT_GET),
12377d02f605SLeon Romanovsky 				0, NLM_F_MULTI);
12387d02f605SLeon Romanovsky 
12395b2cc79dSLeon Romanovsky 		if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
12407d02f605SLeon Romanovsky 			nlmsg_cancel(skb, nlh);
12417d02f605SLeon Romanovsky 			goto out;
12427d02f605SLeon Romanovsky 		}
12437d02f605SLeon Romanovsky 		idx++;
12447d02f605SLeon Romanovsky 		nlmsg_end(skb, nlh);
12457d02f605SLeon Romanovsky 	}
12467d02f605SLeon Romanovsky 
1247f8978bd9SLeon Romanovsky out:
124801b67117SParav Pandit 	ib_device_put(device);
1249f8978bd9SLeon Romanovsky 	cb->args[0] = idx;
12507d02f605SLeon Romanovsky 	return skb->len;
12517d02f605SLeon Romanovsky }
12527d02f605SLeon Romanovsky 
1253bf3c5a93SLeon Romanovsky static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1254bf3c5a93SLeon Romanovsky 			      struct netlink_ext_ack *extack)
1255bf3c5a93SLeon Romanovsky {
1256bf3c5a93SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1257bf3c5a93SLeon Romanovsky 	struct ib_device *device;
1258bf3c5a93SLeon Romanovsky 	struct sk_buff *msg;
1259bf3c5a93SLeon Romanovsky 	u32 index;
1260bf3c5a93SLeon Romanovsky 	int ret;
1261bf3c5a93SLeon Romanovsky 
12628cb08174SJohannes Berg 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1263bf3c5a93SLeon Romanovsky 				     nldev_policy, extack);
1264bf3c5a93SLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1265bf3c5a93SLeon Romanovsky 		return -EINVAL;
1266bf3c5a93SLeon Romanovsky 
1267bf3c5a93SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
126837eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1269bf3c5a93SLeon Romanovsky 	if (!device)
1270bf3c5a93SLeon Romanovsky 		return -EINVAL;
1271bf3c5a93SLeon Romanovsky 
1272bf3c5a93SLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1273f34727a1SDan Carpenter 	if (!msg) {
1274f34727a1SDan Carpenter 		ret = -ENOMEM;
1275bf3c5a93SLeon Romanovsky 		goto err;
1276f34727a1SDan Carpenter 	}
1277bf3c5a93SLeon Romanovsky 
1278bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1279bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1280bf3c5a93SLeon Romanovsky 			0, 0);
1281bf3c5a93SLeon Romanovsky 
1282bf3c5a93SLeon Romanovsky 	ret = fill_res_info(msg, device);
1283bf3c5a93SLeon Romanovsky 	if (ret)
1284bf3c5a93SLeon Romanovsky 		goto err_free;
1285bf3c5a93SLeon Romanovsky 
1286bf3c5a93SLeon Romanovsky 	nlmsg_end(msg, nlh);
128701b67117SParav Pandit 	ib_device_put(device);
12881d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1289bf3c5a93SLeon Romanovsky 
1290bf3c5a93SLeon Romanovsky err_free:
1291bf3c5a93SLeon Romanovsky 	nlmsg_free(msg);
1292bf3c5a93SLeon Romanovsky err:
129301b67117SParav Pandit 	ib_device_put(device);
1294bf3c5a93SLeon Romanovsky 	return ret;
1295bf3c5a93SLeon Romanovsky }
1296bf3c5a93SLeon Romanovsky 
1297bf3c5a93SLeon Romanovsky static int _nldev_res_get_dumpit(struct ib_device *device,
1298bf3c5a93SLeon Romanovsky 				 struct sk_buff *skb,
1299bf3c5a93SLeon Romanovsky 				 struct netlink_callback *cb,
1300bf3c5a93SLeon Romanovsky 				 unsigned int idx)
1301bf3c5a93SLeon Romanovsky {
1302bf3c5a93SLeon Romanovsky 	int start = cb->args[0];
1303bf3c5a93SLeon Romanovsky 	struct nlmsghdr *nlh;
1304bf3c5a93SLeon Romanovsky 
1305bf3c5a93SLeon Romanovsky 	if (idx < start)
1306bf3c5a93SLeon Romanovsky 		return 0;
1307bf3c5a93SLeon Romanovsky 
1308bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1309bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1310bf3c5a93SLeon Romanovsky 			0, NLM_F_MULTI);
1311bf3c5a93SLeon Romanovsky 
1312bf3c5a93SLeon Romanovsky 	if (fill_res_info(skb, device)) {
1313bf3c5a93SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
1314bf3c5a93SLeon Romanovsky 		goto out;
1315bf3c5a93SLeon Romanovsky 	}
1316bf3c5a93SLeon Romanovsky 	nlmsg_end(skb, nlh);
1317bf3c5a93SLeon Romanovsky 
1318bf3c5a93SLeon Romanovsky 	idx++;
1319bf3c5a93SLeon Romanovsky 
1320bf3c5a93SLeon Romanovsky out:
1321bf3c5a93SLeon Romanovsky 	cb->args[0] = idx;
1322bf3c5a93SLeon Romanovsky 	return skb->len;
1323bf3c5a93SLeon Romanovsky }
1324bf3c5a93SLeon Romanovsky 
1325bf3c5a93SLeon Romanovsky static int nldev_res_get_dumpit(struct sk_buff *skb,
1326bf3c5a93SLeon Romanovsky 				struct netlink_callback *cb)
1327bf3c5a93SLeon Romanovsky {
1328bf3c5a93SLeon Romanovsky 	return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
1329bf3c5a93SLeon Romanovsky }
1330bf3c5a93SLeon Romanovsky 
1331d12ff624SSteve Wise struct nldev_fill_res_entry {
1332d12ff624SSteve Wise 	enum rdma_nldev_attr nldev_attr;
1333c5dfe0eaSLeon Romanovsky 	u8 flags;
1334c5dfe0eaSLeon Romanovsky 	u32 entry;
1335c5dfe0eaSLeon Romanovsky 	u32 id;
1336c5dfe0eaSLeon Romanovsky };
1337c5dfe0eaSLeon Romanovsky 
1338c5dfe0eaSLeon Romanovsky enum nldev_res_flags {
1339c5dfe0eaSLeon Romanovsky 	NLDEV_PER_DEV = 1 << 0,
1340d12ff624SSteve Wise };
1341d12ff624SSteve Wise 
1342d12ff624SSteve Wise static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
1343d12ff624SSteve Wise 	[RDMA_RESTRACK_QP] = {
1344d12ff624SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
1345c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
13461b8b7788SLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_LQPN,
1347d12ff624SSteve Wise 	},
134800313983SSteve Wise 	[RDMA_RESTRACK_CM_ID] = {
134900313983SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
1350c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
1351517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_CM_IDN,
135200313983SSteve Wise 	},
1353a34fc089SSteve Wise 	[RDMA_RESTRACK_CQ] = {
1354a34fc089SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
1355c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
1356c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
1357517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_CQN,
1358a34fc089SSteve Wise 	},
1359fccec5b8SSteve Wise 	[RDMA_RESTRACK_MR] = {
1360fccec5b8SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
1361c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
1362c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
1363517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_MRN,
1364fccec5b8SSteve Wise 	},
136529cf1351SSteve Wise 	[RDMA_RESTRACK_PD] = {
136629cf1351SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
1367c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
1368c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
1369517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_PDN,
137029cf1351SSteve Wise 	},
1371c4ffee7cSMark Zhang 	[RDMA_RESTRACK_COUNTER] = {
1372c4ffee7cSMark Zhang 		.nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
1373c4ffee7cSMark Zhang 		.entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
1374c4ffee7cSMark Zhang 		.id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
1375c4ffee7cSMark Zhang 	},
137612ce208fSNeta Ostrovsky 	[RDMA_RESTRACK_CTX] = {
137712ce208fSNeta Ostrovsky 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CTX,
137812ce208fSNeta Ostrovsky 		.flags = NLDEV_PER_DEV,
137912ce208fSNeta Ostrovsky 		.entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY,
138012ce208fSNeta Ostrovsky 		.id = RDMA_NLDEV_ATTR_RES_CTXN,
138112ce208fSNeta Ostrovsky 	},
1382391c6bd5SNeta Ostrovsky 	[RDMA_RESTRACK_SRQ] = {
1383391c6bd5SNeta Ostrovsky 		.nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ,
1384391c6bd5SNeta Ostrovsky 		.flags = NLDEV_PER_DEV,
1385391c6bd5SNeta Ostrovsky 		.entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY,
1386391c6bd5SNeta Ostrovsky 		.id = RDMA_NLDEV_ATTR_RES_SRQN,
1387391c6bd5SNeta Ostrovsky 	},
1388391c6bd5SNeta Ostrovsky 
1389d12ff624SSteve Wise };
1390d12ff624SSteve Wise 
1391c5dfe0eaSLeon Romanovsky static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1392c5dfe0eaSLeon Romanovsky 			       struct netlink_ext_ack *extack,
1393fb910690SErez Alfasi 			       enum rdma_restrack_type res_type,
1394fb910690SErez Alfasi 			       res_fill_func_t fill_func)
1395c5dfe0eaSLeon Romanovsky {
1396c5dfe0eaSLeon Romanovsky 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1397c5dfe0eaSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1398c5dfe0eaSLeon Romanovsky 	struct rdma_restrack_entry *res;
1399c5dfe0eaSLeon Romanovsky 	struct ib_device *device;
1400c5dfe0eaSLeon Romanovsky 	u32 index, id, port = 0;
1401c5dfe0eaSLeon Romanovsky 	bool has_cap_net_admin;
1402c5dfe0eaSLeon Romanovsky 	struct sk_buff *msg;
1403c5dfe0eaSLeon Romanovsky 	int ret;
1404c5dfe0eaSLeon Romanovsky 
14058cb08174SJohannes Berg 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1406c5dfe0eaSLeon Romanovsky 				     nldev_policy, extack);
1407c5dfe0eaSLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
1408c5dfe0eaSLeon Romanovsky 		return -EINVAL;
1409c5dfe0eaSLeon Romanovsky 
1410c5dfe0eaSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
141137eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1412c5dfe0eaSLeon Romanovsky 	if (!device)
1413c5dfe0eaSLeon Romanovsky 		return -EINVAL;
1414c5dfe0eaSLeon Romanovsky 
1415c5dfe0eaSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1416c5dfe0eaSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1417c5dfe0eaSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
1418c5dfe0eaSLeon Romanovsky 			ret = -EINVAL;
1419c5dfe0eaSLeon Romanovsky 			goto err;
1420c5dfe0eaSLeon Romanovsky 		}
1421c5dfe0eaSLeon Romanovsky 	}
1422c5dfe0eaSLeon Romanovsky 
1423c5dfe0eaSLeon Romanovsky 	if ((port && fe->flags & NLDEV_PER_DEV) ||
1424c5dfe0eaSLeon Romanovsky 	    (!port && ~fe->flags & NLDEV_PER_DEV)) {
1425c5dfe0eaSLeon Romanovsky 		ret = -EINVAL;
1426c5dfe0eaSLeon Romanovsky 		goto err;
1427c5dfe0eaSLeon Romanovsky 	}
1428c5dfe0eaSLeon Romanovsky 
1429c5dfe0eaSLeon Romanovsky 	id = nla_get_u32(tb[fe->id]);
1430c5dfe0eaSLeon Romanovsky 	res = rdma_restrack_get_byid(device, res_type, id);
1431c5dfe0eaSLeon Romanovsky 	if (IS_ERR(res)) {
1432c5dfe0eaSLeon Romanovsky 		ret = PTR_ERR(res);
1433c5dfe0eaSLeon Romanovsky 		goto err;
1434c5dfe0eaSLeon Romanovsky 	}
1435c5dfe0eaSLeon Romanovsky 
1436c5dfe0eaSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1437c5dfe0eaSLeon Romanovsky 	if (!msg) {
1438c5dfe0eaSLeon Romanovsky 		ret = -ENOMEM;
1439ab59ca3eSChristophe JAILLET 		goto err_get;
1440c5dfe0eaSLeon Romanovsky 	}
1441c5dfe0eaSLeon Romanovsky 
1442c5dfe0eaSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
144365959522SMaor Gottlieb 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
144465959522SMaor Gottlieb 					 RDMA_NL_GET_OP(nlh->nlmsg_type)),
1445c5dfe0eaSLeon Romanovsky 			0, 0);
1446c5dfe0eaSLeon Romanovsky 
1447c5dfe0eaSLeon Romanovsky 	if (fill_nldev_handle(msg, device)) {
1448c5dfe0eaSLeon Romanovsky 		ret = -EMSGSIZE;
1449c5dfe0eaSLeon Romanovsky 		goto err_free;
1450c5dfe0eaSLeon Romanovsky 	}
1451c5dfe0eaSLeon Romanovsky 
1452c5dfe0eaSLeon Romanovsky 	has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1453fb910690SErez Alfasi 
1454fb910690SErez Alfasi 	ret = fill_func(msg, has_cap_net_admin, res, port);
1455c5dfe0eaSLeon Romanovsky 	if (ret)
1456c5dfe0eaSLeon Romanovsky 		goto err_free;
1457c5dfe0eaSLeon Romanovsky 
145850bbe3d3SMaor Gottlieb 	rdma_restrack_put(res);
1459c5dfe0eaSLeon Romanovsky 	nlmsg_end(msg, nlh);
1460c5dfe0eaSLeon Romanovsky 	ib_device_put(device);
14611d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1462c5dfe0eaSLeon Romanovsky 
1463c5dfe0eaSLeon Romanovsky err_free:
1464c5dfe0eaSLeon Romanovsky 	nlmsg_free(msg);
1465c5dfe0eaSLeon Romanovsky err_get:
1466c5dfe0eaSLeon Romanovsky 	rdma_restrack_put(res);
1467c5dfe0eaSLeon Romanovsky err:
1468c5dfe0eaSLeon Romanovsky 	ib_device_put(device);
1469c5dfe0eaSLeon Romanovsky 	return ret;
1470c5dfe0eaSLeon Romanovsky }
1471c5dfe0eaSLeon Romanovsky 
1472d12ff624SSteve Wise static int res_get_common_dumpit(struct sk_buff *skb,
1473d12ff624SSteve Wise 				 struct netlink_callback *cb,
1474fb910690SErez Alfasi 				 enum rdma_restrack_type res_type,
1475fb910690SErez Alfasi 				 res_fill_func_t fill_func)
1476b5fa635aSLeon Romanovsky {
1477d12ff624SSteve Wise 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1478b5fa635aSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1479b5fa635aSLeon Romanovsky 	struct rdma_restrack_entry *res;
14807c77c6a9SLeon Romanovsky 	struct rdma_restrack_root *rt;
1481b5fa635aSLeon Romanovsky 	int err, ret = 0, idx = 0;
1482b5fa635aSLeon Romanovsky 	struct nlattr *table_attr;
1483c5dfe0eaSLeon Romanovsky 	struct nlattr *entry_attr;
1484b5fa635aSLeon Romanovsky 	struct ib_device *device;
1485b5fa635aSLeon Romanovsky 	int start = cb->args[0];
1486659067b0SLeon Romanovsky 	bool has_cap_net_admin;
1487b5fa635aSLeon Romanovsky 	struct nlmsghdr *nlh;
1488fd47c2f9SLeon Romanovsky 	unsigned long id;
1489b5fa635aSLeon Romanovsky 	u32 index, port = 0;
1490d12ff624SSteve Wise 	bool filled = false;
1491b5fa635aSLeon Romanovsky 
14928cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1493b5fa635aSLeon Romanovsky 				     nldev_policy, NULL);
1494b5fa635aSLeon Romanovsky 	/*
1495d12ff624SSteve Wise 	 * Right now, we are expecting the device index to get res information,
1496b5fa635aSLeon Romanovsky 	 * but it is possible to extend this code to return all devices in
1497b5fa635aSLeon Romanovsky 	 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
1498b5fa635aSLeon Romanovsky 	 * if it doesn't exist, we will iterate over all devices.
1499b5fa635aSLeon Romanovsky 	 *
1500b5fa635aSLeon Romanovsky 	 * But it is not needed for now.
1501b5fa635aSLeon Romanovsky 	 */
1502b5fa635aSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1503b5fa635aSLeon Romanovsky 		return -EINVAL;
1504b5fa635aSLeon Romanovsky 
1505b5fa635aSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
150637eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1507b5fa635aSLeon Romanovsky 	if (!device)
1508b5fa635aSLeon Romanovsky 		return -EINVAL;
1509b5fa635aSLeon Romanovsky 
1510b5fa635aSLeon Romanovsky 	/*
1511b5fa635aSLeon Romanovsky 	 * If no PORT_INDEX is supplied, we will return all QPs from that device
1512b5fa635aSLeon Romanovsky 	 */
1513b5fa635aSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1514b5fa635aSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1515b5fa635aSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
1516b5fa635aSLeon Romanovsky 			ret = -EINVAL;
1517b5fa635aSLeon Romanovsky 			goto err_index;
1518b5fa635aSLeon Romanovsky 		}
1519b5fa635aSLeon Romanovsky 	}
1520b5fa635aSLeon Romanovsky 
1521b5fa635aSLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
152265959522SMaor Gottlieb 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
152365959522SMaor Gottlieb 					 RDMA_NL_GET_OP(cb->nlh->nlmsg_type)),
1524b5fa635aSLeon Romanovsky 			0, NLM_F_MULTI);
1525b5fa635aSLeon Romanovsky 
1526b5fa635aSLeon Romanovsky 	if (fill_nldev_handle(skb, device)) {
1527b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
1528b5fa635aSLeon Romanovsky 		goto err;
1529b5fa635aSLeon Romanovsky 	}
1530b5fa635aSLeon Romanovsky 
1531ae0be8deSMichal Kubecek 	table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
1532b5fa635aSLeon Romanovsky 	if (!table_attr) {
1533b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
1534b5fa635aSLeon Romanovsky 		goto err;
1535b5fa635aSLeon Romanovsky 	}
1536b5fa635aSLeon Romanovsky 
1537659067b0SLeon Romanovsky 	has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
1538659067b0SLeon Romanovsky 
15397c77c6a9SLeon Romanovsky 	rt = &device->res[res_type];
15407c77c6a9SLeon Romanovsky 	xa_lock(&rt->xa);
1541fd47c2f9SLeon Romanovsky 	/*
1542fd47c2f9SLeon Romanovsky 	 * FIXME: if the skip ahead is something common this loop should
1543fd47c2f9SLeon Romanovsky 	 * use xas_for_each & xas_pause to optimize, we can have a lot of
1544fd47c2f9SLeon Romanovsky 	 * objects.
1545fd47c2f9SLeon Romanovsky 	 */
15467c77c6a9SLeon Romanovsky 	xa_for_each(&rt->xa, id, res) {
1547f2a0e45fSLeon Romanovsky 		if (idx < start || !rdma_restrack_get(res))
1548b5fa635aSLeon Romanovsky 			goto next;
1549b5fa635aSLeon Romanovsky 
15507c77c6a9SLeon Romanovsky 		xa_unlock(&rt->xa);
15517c77c6a9SLeon Romanovsky 
1552d12ff624SSteve Wise 		filled = true;
1553b5fa635aSLeon Romanovsky 
1554ae0be8deSMichal Kubecek 		entry_attr = nla_nest_start_noflag(skb, fe->entry);
1555c5dfe0eaSLeon Romanovsky 		if (!entry_attr) {
1556c5dfe0eaSLeon Romanovsky 			ret = -EMSGSIZE;
1557c5dfe0eaSLeon Romanovsky 			rdma_restrack_put(res);
15587c77c6a9SLeon Romanovsky 			goto msg_full;
1559c5dfe0eaSLeon Romanovsky 		}
1560c5dfe0eaSLeon Romanovsky 
1561fb910690SErez Alfasi 		ret = fill_func(skb, has_cap_net_admin, res, port);
1562fb910690SErez Alfasi 
1563b5fa635aSLeon Romanovsky 		rdma_restrack_put(res);
1564b5fa635aSLeon Romanovsky 
15657c77c6a9SLeon Romanovsky 		if (ret) {
1566c5dfe0eaSLeon Romanovsky 			nla_nest_cancel(skb, entry_attr);
1567b5fa635aSLeon Romanovsky 			if (ret == -EMSGSIZE)
15687c77c6a9SLeon Romanovsky 				goto msg_full;
1569c5dfe0eaSLeon Romanovsky 			if (ret == -EAGAIN)
15707c77c6a9SLeon Romanovsky 				goto again;
1571b5fa635aSLeon Romanovsky 			goto res_err;
15727c77c6a9SLeon Romanovsky 		}
1573c5dfe0eaSLeon Romanovsky 		nla_nest_end(skb, entry_attr);
15747c77c6a9SLeon Romanovsky again:		xa_lock(&rt->xa);
1575b5fa635aSLeon Romanovsky next:		idx++;
1576b5fa635aSLeon Romanovsky 	}
15777c77c6a9SLeon Romanovsky 	xa_unlock(&rt->xa);
1578b5fa635aSLeon Romanovsky 
15797c77c6a9SLeon Romanovsky msg_full:
1580b5fa635aSLeon Romanovsky 	nla_nest_end(skb, table_attr);
1581b5fa635aSLeon Romanovsky 	nlmsg_end(skb, nlh);
1582b5fa635aSLeon Romanovsky 	cb->args[0] = idx;
1583b5fa635aSLeon Romanovsky 
1584b5fa635aSLeon Romanovsky 	/*
1585d12ff624SSteve Wise 	 * No more entries to fill, cancel the message and
1586b5fa635aSLeon Romanovsky 	 * return 0 to mark end of dumpit.
1587b5fa635aSLeon Romanovsky 	 */
1588d12ff624SSteve Wise 	if (!filled)
1589b5fa635aSLeon Romanovsky 		goto err;
1590b5fa635aSLeon Romanovsky 
159101b67117SParav Pandit 	ib_device_put(device);
1592b5fa635aSLeon Romanovsky 	return skb->len;
1593b5fa635aSLeon Romanovsky 
1594b5fa635aSLeon Romanovsky res_err:
1595b5fa635aSLeon Romanovsky 	nla_nest_cancel(skb, table_attr);
1596b5fa635aSLeon Romanovsky 
1597b5fa635aSLeon Romanovsky err:
1598b5fa635aSLeon Romanovsky 	nlmsg_cancel(skb, nlh);
1599b5fa635aSLeon Romanovsky 
1600b5fa635aSLeon Romanovsky err_index:
160101b67117SParav Pandit 	ib_device_put(device);
1602b5fa635aSLeon Romanovsky 	return ret;
1603b5fa635aSLeon Romanovsky }
1604b5fa635aSLeon Romanovsky 
1605f732e713SLeon Romanovsky #define RES_GET_FUNCS(name, type)                                              \
1606f732e713SLeon Romanovsky 	static int nldev_res_get_##name##_dumpit(struct sk_buff *skb,          \
1607f732e713SLeon Romanovsky 						 struct netlink_callback *cb)  \
1608f732e713SLeon Romanovsky 	{                                                                      \
1609fb910690SErez Alfasi 		return res_get_common_dumpit(skb, cb, type,                    \
1610fb910690SErez Alfasi 					     fill_res_##name##_entry);         \
1611c5dfe0eaSLeon Romanovsky 	}                                                                      \
1612c5dfe0eaSLeon Romanovsky 	static int nldev_res_get_##name##_doit(struct sk_buff *skb,            \
1613c5dfe0eaSLeon Romanovsky 					       struct nlmsghdr *nlh,           \
1614c5dfe0eaSLeon Romanovsky 					       struct netlink_ext_ack *extack) \
1615c5dfe0eaSLeon Romanovsky 	{                                                                      \
1616fb910690SErez Alfasi 		return res_get_common_doit(skb, nlh, extack, type,             \
1617fb910690SErez Alfasi 					   fill_res_##name##_entry);           \
1618d12ff624SSteve Wise 	}
1619d12ff624SSteve Wise 
1620f732e713SLeon Romanovsky RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
162165959522SMaor Gottlieb RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP);
1622f732e713SLeon Romanovsky RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
1623f732e713SLeon Romanovsky RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
162465959522SMaor Gottlieb RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ);
1625f732e713SLeon Romanovsky RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
1626f732e713SLeon Romanovsky RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
162765959522SMaor Gottlieb RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR);
1628c4ffee7cSMark Zhang RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
162912ce208fSNeta Ostrovsky RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX);
1630391c6bd5SNeta Ostrovsky RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ);
163129cf1351SSteve Wise 
16323856ec4bSSteve Wise static LIST_HEAD(link_ops);
16333856ec4bSSteve Wise static DECLARE_RWSEM(link_ops_rwsem);
16343856ec4bSSteve Wise 
16353856ec4bSSteve Wise static const struct rdma_link_ops *link_ops_get(const char *type)
16363856ec4bSSteve Wise {
16373856ec4bSSteve Wise 	const struct rdma_link_ops *ops;
16383856ec4bSSteve Wise 
16393856ec4bSSteve Wise 	list_for_each_entry(ops, &link_ops, list) {
16403856ec4bSSteve Wise 		if (!strcmp(ops->type, type))
16413856ec4bSSteve Wise 			goto out;
16423856ec4bSSteve Wise 	}
16433856ec4bSSteve Wise 	ops = NULL;
16443856ec4bSSteve Wise out:
16453856ec4bSSteve Wise 	return ops;
16463856ec4bSSteve Wise }
16473856ec4bSSteve Wise 
16483856ec4bSSteve Wise void rdma_link_register(struct rdma_link_ops *ops)
16493856ec4bSSteve Wise {
16503856ec4bSSteve Wise 	down_write(&link_ops_rwsem);
1651afc1990eSDan Carpenter 	if (WARN_ON_ONCE(link_ops_get(ops->type)))
16523856ec4bSSteve Wise 		goto out;
16533856ec4bSSteve Wise 	list_add(&ops->list, &link_ops);
16543856ec4bSSteve Wise out:
16553856ec4bSSteve Wise 	up_write(&link_ops_rwsem);
16563856ec4bSSteve Wise }
16573856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_register);
16583856ec4bSSteve Wise 
16593856ec4bSSteve Wise void rdma_link_unregister(struct rdma_link_ops *ops)
16603856ec4bSSteve Wise {
16613856ec4bSSteve Wise 	down_write(&link_ops_rwsem);
16623856ec4bSSteve Wise 	list_del(&ops->list);
16633856ec4bSSteve Wise 	up_write(&link_ops_rwsem);
16643856ec4bSSteve Wise }
16653856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_unregister);
16663856ec4bSSteve Wise 
16673856ec4bSSteve Wise static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
16683856ec4bSSteve Wise 			  struct netlink_ext_ack *extack)
16693856ec4bSSteve Wise {
16703856ec4bSSteve Wise 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
16713856ec4bSSteve Wise 	char ibdev_name[IB_DEVICE_NAME_MAX];
16723856ec4bSSteve Wise 	const struct rdma_link_ops *ops;
16733856ec4bSSteve Wise 	char ndev_name[IFNAMSIZ];
16743856ec4bSSteve Wise 	struct net_device *ndev;
16753856ec4bSSteve Wise 	char type[IFNAMSIZ];
16763856ec4bSSteve Wise 	int err;
16773856ec4bSSteve Wise 
16788cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
16793856ec4bSSteve Wise 				     nldev_policy, extack);
16803856ec4bSSteve Wise 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
16813856ec4bSSteve Wise 	    !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
16823856ec4bSSteve Wise 		return -EINVAL;
16833856ec4bSSteve Wise 
1684872f6903SFrancis Laniel 	nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
16853856ec4bSSteve Wise 		    sizeof(ibdev_name));
16867aefa623SJason Gunthorpe 	if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
16873856ec4bSSteve Wise 		return -EINVAL;
16883856ec4bSSteve Wise 
1689872f6903SFrancis Laniel 	nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
1690872f6903SFrancis Laniel 	nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
16913856ec4bSSteve Wise 		    sizeof(ndev_name));
16923856ec4bSSteve Wise 
16937a54f78dSParav Pandit 	ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
16943856ec4bSSteve Wise 	if (!ndev)
16953856ec4bSSteve Wise 		return -ENODEV;
16963856ec4bSSteve Wise 
16973856ec4bSSteve Wise 	down_read(&link_ops_rwsem);
16983856ec4bSSteve Wise 	ops = link_ops_get(type);
16993856ec4bSSteve Wise #ifdef CONFIG_MODULES
17003856ec4bSSteve Wise 	if (!ops) {
17013856ec4bSSteve Wise 		up_read(&link_ops_rwsem);
17023856ec4bSSteve Wise 		request_module("rdma-link-%s", type);
17033856ec4bSSteve Wise 		down_read(&link_ops_rwsem);
17043856ec4bSSteve Wise 		ops = link_ops_get(type);
17053856ec4bSSteve Wise 	}
17063856ec4bSSteve Wise #endif
17073856ec4bSSteve Wise 	err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
17083856ec4bSSteve Wise 	up_read(&link_ops_rwsem);
17093856ec4bSSteve Wise 	dev_put(ndev);
17103856ec4bSSteve Wise 
17113856ec4bSSteve Wise 	return err;
17123856ec4bSSteve Wise }
17133856ec4bSSteve Wise 
17143856ec4bSSteve Wise static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
17153856ec4bSSteve Wise 			  struct netlink_ext_ack *extack)
17163856ec4bSSteve Wise {
17173856ec4bSSteve Wise 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
17183856ec4bSSteve Wise 	struct ib_device *device;
17193856ec4bSSteve Wise 	u32 index;
17203856ec4bSSteve Wise 	int err;
17213856ec4bSSteve Wise 
17228cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
17233856ec4bSSteve Wise 				     nldev_policy, extack);
17243856ec4bSSteve Wise 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
17253856ec4bSSteve Wise 		return -EINVAL;
17263856ec4bSSteve Wise 
17273856ec4bSSteve Wise 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
172837eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
17293856ec4bSSteve Wise 	if (!device)
17303856ec4bSSteve Wise 		return -EINVAL;
17313856ec4bSSteve Wise 
17323856ec4bSSteve Wise 	if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
17333856ec4bSSteve Wise 		ib_device_put(device);
17343856ec4bSSteve Wise 		return -EINVAL;
17353856ec4bSSteve Wise 	}
17363856ec4bSSteve Wise 
17373856ec4bSSteve Wise 	ib_unregister_device_and_put(device);
17383856ec4bSSteve Wise 	return 0;
17393856ec4bSSteve Wise }
17403856ec4bSSteve Wise 
17410e2d00ebSJason Gunthorpe static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
17420e2d00ebSJason Gunthorpe 			     struct netlink_ext_ack *extack)
17430e2d00ebSJason Gunthorpe {
17440e2d00ebSJason Gunthorpe 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
174534d65cd8SDoug Ledford 	char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE];
17460e2d00ebSJason Gunthorpe 	struct ib_client_nl_info data = {};
17470e2d00ebSJason Gunthorpe 	struct ib_device *ibdev = NULL;
17480e2d00ebSJason Gunthorpe 	struct sk_buff *msg;
17490e2d00ebSJason Gunthorpe 	u32 index;
17500e2d00ebSJason Gunthorpe 	int err;
17510e2d00ebSJason Gunthorpe 
17520e2d00ebSJason Gunthorpe 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
17530e2d00ebSJason Gunthorpe 			  extack);
17540e2d00ebSJason Gunthorpe 	if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
17550e2d00ebSJason Gunthorpe 		return -EINVAL;
17560e2d00ebSJason Gunthorpe 
1757872f6903SFrancis Laniel 	nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
175834d65cd8SDoug Ledford 		    sizeof(client_name));
17590e2d00ebSJason Gunthorpe 
17600e2d00ebSJason Gunthorpe 	if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
17610e2d00ebSJason Gunthorpe 		index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
17620e2d00ebSJason Gunthorpe 		ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
17630e2d00ebSJason Gunthorpe 		if (!ibdev)
17640e2d00ebSJason Gunthorpe 			return -EINVAL;
17650e2d00ebSJason Gunthorpe 
17660e2d00ebSJason Gunthorpe 		if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
17670e2d00ebSJason Gunthorpe 			data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
17680e2d00ebSJason Gunthorpe 			if (!rdma_is_port_valid(ibdev, data.port)) {
17690e2d00ebSJason Gunthorpe 				err = -EINVAL;
17700e2d00ebSJason Gunthorpe 				goto out_put;
17710e2d00ebSJason Gunthorpe 			}
17720e2d00ebSJason Gunthorpe 		} else {
17730e2d00ebSJason Gunthorpe 			data.port = -1;
17740e2d00ebSJason Gunthorpe 		}
17750e2d00ebSJason Gunthorpe 	} else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
17760e2d00ebSJason Gunthorpe 		return -EINVAL;
17770e2d00ebSJason Gunthorpe 	}
17780e2d00ebSJason Gunthorpe 
17790e2d00ebSJason Gunthorpe 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
17800e2d00ebSJason Gunthorpe 	if (!msg) {
17810e2d00ebSJason Gunthorpe 		err = -ENOMEM;
17820e2d00ebSJason Gunthorpe 		goto out_put;
17830e2d00ebSJason Gunthorpe 	}
17840e2d00ebSJason Gunthorpe 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
17850e2d00ebSJason Gunthorpe 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
17860e2d00ebSJason Gunthorpe 					 RDMA_NLDEV_CMD_GET_CHARDEV),
17870e2d00ebSJason Gunthorpe 			0, 0);
17880e2d00ebSJason Gunthorpe 
17890e2d00ebSJason Gunthorpe 	data.nl_msg = msg;
17900e2d00ebSJason Gunthorpe 	err = ib_get_client_nl_info(ibdev, client_name, &data);
17910e2d00ebSJason Gunthorpe 	if (err)
17920e2d00ebSJason Gunthorpe 		goto out_nlmsg;
17930e2d00ebSJason Gunthorpe 
17940e2d00ebSJason Gunthorpe 	err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV,
17950e2d00ebSJason Gunthorpe 				huge_encode_dev(data.cdev->devt),
17960e2d00ebSJason Gunthorpe 				RDMA_NLDEV_ATTR_PAD);
17970e2d00ebSJason Gunthorpe 	if (err)
17980e2d00ebSJason Gunthorpe 		goto out_data;
17990e2d00ebSJason Gunthorpe 	err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi,
18000e2d00ebSJason Gunthorpe 				RDMA_NLDEV_ATTR_PAD);
18010e2d00ebSJason Gunthorpe 	if (err)
18020e2d00ebSJason Gunthorpe 		goto out_data;
18030e2d00ebSJason Gunthorpe 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME,
18040e2d00ebSJason Gunthorpe 			   dev_name(data.cdev))) {
18050e2d00ebSJason Gunthorpe 		err = -EMSGSIZE;
18060e2d00ebSJason Gunthorpe 		goto out_data;
18070e2d00ebSJason Gunthorpe 	}
18080e2d00ebSJason Gunthorpe 
18090e2d00ebSJason Gunthorpe 	nlmsg_end(msg, nlh);
18100e2d00ebSJason Gunthorpe 	put_device(data.cdev);
18110e2d00ebSJason Gunthorpe 	if (ibdev)
18120e2d00ebSJason Gunthorpe 		ib_device_put(ibdev);
18131d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
18140e2d00ebSJason Gunthorpe 
18150e2d00ebSJason Gunthorpe out_data:
18160e2d00ebSJason Gunthorpe 	put_device(data.cdev);
18170e2d00ebSJason Gunthorpe out_nlmsg:
18180e2d00ebSJason Gunthorpe 	nlmsg_free(msg);
18190e2d00ebSJason Gunthorpe out_put:
18200e2d00ebSJason Gunthorpe 	if (ibdev)
18210e2d00ebSJason Gunthorpe 		ib_device_put(ibdev);
18220e2d00ebSJason Gunthorpe 	return err;
18230e2d00ebSJason Gunthorpe }
18240e2d00ebSJason Gunthorpe 
18254d7ba8ceSParav Pandit static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
18264d7ba8ceSParav Pandit 			      struct netlink_ext_ack *extack)
1827cb7e0e13SParav Pandit {
1828cb7e0e13SParav Pandit 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
18294d7ba8ceSParav Pandit 	struct sk_buff *msg;
1830cb7e0e13SParav Pandit 	int err;
1831cb7e0e13SParav Pandit 
18324d7ba8ceSParav Pandit 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
18334d7ba8ceSParav Pandit 			  nldev_policy, extack);
1834cb7e0e13SParav Pandit 	if (err)
1835cb7e0e13SParav Pandit 		return err;
1836cb7e0e13SParav Pandit 
18374d7ba8ceSParav Pandit 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
18384d7ba8ceSParav Pandit 	if (!msg)
18394d7ba8ceSParav Pandit 		return -ENOMEM;
18404d7ba8ceSParav Pandit 
18414d7ba8ceSParav Pandit 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1842cb7e0e13SParav Pandit 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1843cb7e0e13SParav Pandit 					 RDMA_NLDEV_CMD_SYS_GET),
1844cb7e0e13SParav Pandit 			0, 0);
1845cb7e0e13SParav Pandit 
18464d7ba8ceSParav Pandit 	err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
1847cb7e0e13SParav Pandit 			 (u8)ib_devices_shared_netns);
1848cb7e0e13SParav Pandit 	if (err) {
18494d7ba8ceSParav Pandit 		nlmsg_free(msg);
1850cb7e0e13SParav Pandit 		return err;
1851cb7e0e13SParav Pandit 	}
18524d7ba8ceSParav Pandit 	nlmsg_end(msg, nlh);
18531d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1854cb7e0e13SParav Pandit }
1855cb7e0e13SParav Pandit 
18562b34c558SParav Pandit static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
18572b34c558SParav Pandit 				  struct netlink_ext_ack *extack)
18582b34c558SParav Pandit {
18592b34c558SParav Pandit 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
18602b34c558SParav Pandit 	u8 enable;
18612b34c558SParav Pandit 	int err;
18622b34c558SParav Pandit 
18632b34c558SParav Pandit 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
18642b34c558SParav Pandit 			  nldev_policy, extack);
18652b34c558SParav Pandit 	if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
18662b34c558SParav Pandit 		return -EINVAL;
18672b34c558SParav Pandit 
18682b34c558SParav Pandit 	enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
18692b34c558SParav Pandit 	/* Only 0 and 1 are supported */
18702b34c558SParav Pandit 	if (enable > 1)
18712b34c558SParav Pandit 		return -EINVAL;
18722b34c558SParav Pandit 
18732b34c558SParav Pandit 	err = rdma_compatdev_set(enable);
18742b34c558SParav Pandit 	return err;
18752b34c558SParav Pandit }
18762b34c558SParav Pandit 
1877b47ae6f8SMark Zhang static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1878b47ae6f8SMark Zhang 			       struct netlink_ext_ack *extack)
1879b47ae6f8SMark Zhang {
1880b389327dSMark Zhang 	u32 index, port, mode, mask = 0, qpn, cntn = 0;
1881b47ae6f8SMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1882b47ae6f8SMark Zhang 	struct ib_device *device;
1883b47ae6f8SMark Zhang 	struct sk_buff *msg;
1884b47ae6f8SMark Zhang 	int ret;
1885b47ae6f8SMark Zhang 
1886b47ae6f8SMark Zhang 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1887b47ae6f8SMark Zhang 			  nldev_policy, extack);
1888b47ae6f8SMark Zhang 	/* Currently only counter for QP is supported */
1889b47ae6f8SMark Zhang 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1890b47ae6f8SMark Zhang 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1891b47ae6f8SMark Zhang 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE])
1892b47ae6f8SMark Zhang 		return -EINVAL;
1893b47ae6f8SMark Zhang 
1894b47ae6f8SMark Zhang 	if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1895b47ae6f8SMark Zhang 		return -EINVAL;
1896b47ae6f8SMark Zhang 
1897b47ae6f8SMark Zhang 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1898b47ae6f8SMark Zhang 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1899b47ae6f8SMark Zhang 	if (!device)
1900b47ae6f8SMark Zhang 		return -EINVAL;
1901b47ae6f8SMark Zhang 
1902b47ae6f8SMark Zhang 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1903b47ae6f8SMark Zhang 	if (!rdma_is_port_valid(device, port)) {
1904b47ae6f8SMark Zhang 		ret = -EINVAL;
1905b47ae6f8SMark Zhang 		goto err;
1906b47ae6f8SMark Zhang 	}
1907b47ae6f8SMark Zhang 
1908b47ae6f8SMark Zhang 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1909b47ae6f8SMark Zhang 	if (!msg) {
1910b47ae6f8SMark Zhang 		ret = -ENOMEM;
1911b47ae6f8SMark Zhang 		goto err;
1912b47ae6f8SMark Zhang 	}
1913b47ae6f8SMark Zhang 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1914b47ae6f8SMark Zhang 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1915b47ae6f8SMark Zhang 					 RDMA_NLDEV_CMD_STAT_SET),
1916b47ae6f8SMark Zhang 			0, 0);
1917b47ae6f8SMark Zhang 
1918b47ae6f8SMark Zhang 	mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
1919b389327dSMark Zhang 	if (mode == RDMA_COUNTER_MODE_AUTO) {
1920b47ae6f8SMark Zhang 		if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
1921b389327dSMark Zhang 			mask = nla_get_u32(
1922b389327dSMark Zhang 				tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
192333eb12f2SPatrisious Haddad 		ret = rdma_counter_set_auto_mode(device, port, mask, extack);
1924b47ae6f8SMark Zhang 		if (ret)
1925b47ae6f8SMark Zhang 			goto err_msg;
1926b389327dSMark Zhang 	} else {
192778f34a16SMark Zhang 		if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
192878f34a16SMark Zhang 			goto err_msg;
1929b389327dSMark Zhang 		qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
1930b389327dSMark Zhang 		if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
1931b389327dSMark Zhang 			cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
1932b389327dSMark Zhang 			ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
1933b389327dSMark Zhang 		} else {
1934b389327dSMark Zhang 			ret = rdma_counter_bind_qpn_alloc(device, port,
1935b389327dSMark Zhang 							  qpn, &cntn);
1936b389327dSMark Zhang 		}
1937b389327dSMark Zhang 		if (ret)
1938b47ae6f8SMark Zhang 			goto err_msg;
1939b389327dSMark Zhang 
1940b389327dSMark Zhang 		if (fill_nldev_handle(msg, device) ||
1941b389327dSMark Zhang 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1942b389327dSMark Zhang 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
1943b389327dSMark Zhang 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
1944b389327dSMark Zhang 			ret = -EMSGSIZE;
1945b389327dSMark Zhang 			goto err_fill;
1946b389327dSMark Zhang 		}
1947b47ae6f8SMark Zhang 	}
1948b47ae6f8SMark Zhang 
1949b47ae6f8SMark Zhang 	nlmsg_end(msg, nlh);
1950b47ae6f8SMark Zhang 	ib_device_put(device);
19511d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1952b47ae6f8SMark Zhang 
1953b389327dSMark Zhang err_fill:
1954b389327dSMark Zhang 	rdma_counter_unbind_qpn(device, port, qpn, cntn);
1955b47ae6f8SMark Zhang err_msg:
1956b47ae6f8SMark Zhang 	nlmsg_free(msg);
1957b47ae6f8SMark Zhang err:
1958b47ae6f8SMark Zhang 	ib_device_put(device);
1959b47ae6f8SMark Zhang 	return ret;
1960b47ae6f8SMark Zhang }
1961b47ae6f8SMark Zhang 
1962b389327dSMark Zhang static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1963b389327dSMark Zhang 			       struct netlink_ext_ack *extack)
1964b389327dSMark Zhang {
1965b389327dSMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1966b389327dSMark Zhang 	struct ib_device *device;
1967b389327dSMark Zhang 	struct sk_buff *msg;
1968b389327dSMark Zhang 	u32 index, port, qpn, cntn;
1969b389327dSMark Zhang 	int ret;
1970b389327dSMark Zhang 
1971b389327dSMark Zhang 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1972b389327dSMark Zhang 			  nldev_policy, extack);
1973b389327dSMark Zhang 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1974b389327dSMark Zhang 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
1975b389327dSMark Zhang 	    !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
1976b389327dSMark Zhang 	    !tb[RDMA_NLDEV_ATTR_RES_LQPN])
1977b389327dSMark Zhang 		return -EINVAL;
1978b389327dSMark Zhang 
1979b389327dSMark Zhang 	if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1980b389327dSMark Zhang 		return -EINVAL;
1981b389327dSMark Zhang 
1982b389327dSMark Zhang 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1983b389327dSMark Zhang 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1984b389327dSMark Zhang 	if (!device)
1985b389327dSMark Zhang 		return -EINVAL;
1986b389327dSMark Zhang 
1987b389327dSMark Zhang 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1988b389327dSMark Zhang 	if (!rdma_is_port_valid(device, port)) {
1989b389327dSMark Zhang 		ret = -EINVAL;
1990b389327dSMark Zhang 		goto err;
1991b389327dSMark Zhang 	}
1992b389327dSMark Zhang 
1993b389327dSMark Zhang 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1994b389327dSMark Zhang 	if (!msg) {
1995b389327dSMark Zhang 		ret = -ENOMEM;
1996b389327dSMark Zhang 		goto err;
1997b389327dSMark Zhang 	}
1998b389327dSMark Zhang 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1999b389327dSMark Zhang 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2000b389327dSMark Zhang 					 RDMA_NLDEV_CMD_STAT_SET),
2001b389327dSMark Zhang 			0, 0);
2002b389327dSMark Zhang 
2003b389327dSMark Zhang 	cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
2004b389327dSMark Zhang 	qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
2005b389327dSMark Zhang 	if (fill_nldev_handle(msg, device) ||
2006b389327dSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
2007b389327dSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
2008b389327dSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
2009b389327dSMark Zhang 		ret = -EMSGSIZE;
2010b389327dSMark Zhang 		goto err_fill;
2011b389327dSMark Zhang 	}
2012b389327dSMark Zhang 
2013594e6c5dSLeon Romanovsky 	ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
2014594e6c5dSLeon Romanovsky 	if (ret)
2015594e6c5dSLeon Romanovsky 		goto err_fill;
2016594e6c5dSLeon Romanovsky 
2017b389327dSMark Zhang 	nlmsg_end(msg, nlh);
2018b389327dSMark Zhang 	ib_device_put(device);
20191d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2020b389327dSMark Zhang 
2021b389327dSMark Zhang err_fill:
2022b389327dSMark Zhang 	nlmsg_free(msg);
2023b389327dSMark Zhang err:
2024b389327dSMark Zhang 	ib_device_put(device);
2025b389327dSMark Zhang 	return ret;
2026b389327dSMark Zhang }
2027b389327dSMark Zhang 
20286e7be47aSMark Zhang static int stat_get_doit_default_counter(struct sk_buff *skb,
20296e7be47aSMark Zhang 					 struct nlmsghdr *nlh,
20306e7be47aSMark Zhang 					 struct netlink_ext_ack *extack,
20316e7be47aSMark Zhang 					 struct nlattr *tb[])
20326e7be47aSMark Zhang {
20336e7be47aSMark Zhang 	struct rdma_hw_stats *stats;
20346e7be47aSMark Zhang 	struct nlattr *table_attr;
20356e7be47aSMark Zhang 	struct ib_device *device;
20366e7be47aSMark Zhang 	int ret, num_cnts, i;
20376e7be47aSMark Zhang 	struct sk_buff *msg;
20386e7be47aSMark Zhang 	u32 index, port;
20396e7be47aSMark Zhang 	u64 v;
20406e7be47aSMark Zhang 
20416e7be47aSMark Zhang 	if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
20426e7be47aSMark Zhang 		return -EINVAL;
20436e7be47aSMark Zhang 
20446e7be47aSMark Zhang 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
20456e7be47aSMark Zhang 	device = ib_device_get_by_index(sock_net(skb->sk), index);
20466e7be47aSMark Zhang 	if (!device)
20476e7be47aSMark Zhang 		return -EINVAL;
20486e7be47aSMark Zhang 
20496e7be47aSMark Zhang 	if (!device->ops.alloc_hw_stats || !device->ops.get_hw_stats) {
20506e7be47aSMark Zhang 		ret = -EINVAL;
20516e7be47aSMark Zhang 		goto err;
20526e7be47aSMark Zhang 	}
20536e7be47aSMark Zhang 
20546e7be47aSMark Zhang 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
20556e7be47aSMark Zhang 	if (!rdma_is_port_valid(device, port)) {
20566e7be47aSMark Zhang 		ret = -EINVAL;
20576e7be47aSMark Zhang 		goto err;
20586e7be47aSMark Zhang 	}
20596e7be47aSMark Zhang 
20606e7be47aSMark Zhang 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
20616e7be47aSMark Zhang 	if (!msg) {
20626e7be47aSMark Zhang 		ret = -ENOMEM;
20636e7be47aSMark Zhang 		goto err;
20646e7be47aSMark Zhang 	}
20656e7be47aSMark Zhang 
20666e7be47aSMark Zhang 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
20676e7be47aSMark Zhang 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
20686e7be47aSMark Zhang 					 RDMA_NLDEV_CMD_STAT_GET),
20696e7be47aSMark Zhang 			0, 0);
20706e7be47aSMark Zhang 
20716e7be47aSMark Zhang 	if (fill_nldev_handle(msg, device) ||
20726e7be47aSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
20736e7be47aSMark Zhang 		ret = -EMSGSIZE;
20746e7be47aSMark Zhang 		goto err_msg;
20756e7be47aSMark Zhang 	}
20766e7be47aSMark Zhang 
20776e7be47aSMark Zhang 	stats = device->port_data ? device->port_data[port].hw_stats : NULL;
20786e7be47aSMark Zhang 	if (stats == NULL) {
20796e7be47aSMark Zhang 		ret = -EINVAL;
20806e7be47aSMark Zhang 		goto err_msg;
20816e7be47aSMark Zhang 	}
20826e7be47aSMark Zhang 	mutex_lock(&stats->lock);
20836e7be47aSMark Zhang 
20846e7be47aSMark Zhang 	num_cnts = device->ops.get_hw_stats(device, stats, port, 0);
20856e7be47aSMark Zhang 	if (num_cnts < 0) {
20866e7be47aSMark Zhang 		ret = -EINVAL;
20876e7be47aSMark Zhang 		goto err_stats;
20886e7be47aSMark Zhang 	}
20896e7be47aSMark Zhang 
20906e7be47aSMark Zhang 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
20916e7be47aSMark Zhang 	if (!table_attr) {
20926e7be47aSMark Zhang 		ret = -EMSGSIZE;
20936e7be47aSMark Zhang 		goto err_stats;
20946e7be47aSMark Zhang 	}
20956e7be47aSMark Zhang 	for (i = 0; i < num_cnts; i++) {
20966e7be47aSMark Zhang 		v = stats->value[i] +
20976e7be47aSMark Zhang 			rdma_counter_get_hwstat_value(device, port, i);
20984061ff7aSErez Alfasi 		if (rdma_nl_stat_hwcounter_entry(msg, stats->names[i], v)) {
20996e7be47aSMark Zhang 			ret = -EMSGSIZE;
21006e7be47aSMark Zhang 			goto err_table;
21016e7be47aSMark Zhang 		}
21026e7be47aSMark Zhang 	}
21036e7be47aSMark Zhang 	nla_nest_end(msg, table_attr);
21046e7be47aSMark Zhang 
21056e7be47aSMark Zhang 	mutex_unlock(&stats->lock);
21066e7be47aSMark Zhang 	nlmsg_end(msg, nlh);
21076e7be47aSMark Zhang 	ib_device_put(device);
21081d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
21096e7be47aSMark Zhang 
21106e7be47aSMark Zhang err_table:
21116e7be47aSMark Zhang 	nla_nest_cancel(msg, table_attr);
21126e7be47aSMark Zhang err_stats:
21136e7be47aSMark Zhang 	mutex_unlock(&stats->lock);
21146e7be47aSMark Zhang err_msg:
21156e7be47aSMark Zhang 	nlmsg_free(msg);
21166e7be47aSMark Zhang err:
21176e7be47aSMark Zhang 	ib_device_put(device);
21186e7be47aSMark Zhang 	return ret;
21196e7be47aSMark Zhang }
21206e7be47aSMark Zhang 
212183c2c1fcSMark Zhang static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
212283c2c1fcSMark Zhang 			    struct netlink_ext_ack *extack, struct nlattr *tb[])
212383c2c1fcSMark Zhang 
212483c2c1fcSMark Zhang {
212583c2c1fcSMark Zhang 	static enum rdma_nl_counter_mode mode;
212683c2c1fcSMark Zhang 	static enum rdma_nl_counter_mask mask;
212783c2c1fcSMark Zhang 	struct ib_device *device;
212883c2c1fcSMark Zhang 	struct sk_buff *msg;
212983c2c1fcSMark Zhang 	u32 index, port;
213083c2c1fcSMark Zhang 	int ret;
213183c2c1fcSMark Zhang 
213283c2c1fcSMark Zhang 	if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID])
213383c2c1fcSMark Zhang 		return nldev_res_get_counter_doit(skb, nlh, extack);
213483c2c1fcSMark Zhang 
213583c2c1fcSMark Zhang 	if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] ||
213683c2c1fcSMark Zhang 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
213783c2c1fcSMark Zhang 		return -EINVAL;
213883c2c1fcSMark Zhang 
213983c2c1fcSMark Zhang 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
214083c2c1fcSMark Zhang 	device = ib_device_get_by_index(sock_net(skb->sk), index);
214183c2c1fcSMark Zhang 	if (!device)
214283c2c1fcSMark Zhang 		return -EINVAL;
214383c2c1fcSMark Zhang 
214483c2c1fcSMark Zhang 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
214583c2c1fcSMark Zhang 	if (!rdma_is_port_valid(device, port)) {
214683c2c1fcSMark Zhang 		ret = -EINVAL;
214783c2c1fcSMark Zhang 		goto err;
214883c2c1fcSMark Zhang 	}
214983c2c1fcSMark Zhang 
215083c2c1fcSMark Zhang 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
215183c2c1fcSMark Zhang 	if (!msg) {
215283c2c1fcSMark Zhang 		ret = -ENOMEM;
215383c2c1fcSMark Zhang 		goto err;
215483c2c1fcSMark Zhang 	}
215583c2c1fcSMark Zhang 
215683c2c1fcSMark Zhang 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
215783c2c1fcSMark Zhang 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
215883c2c1fcSMark Zhang 					 RDMA_NLDEV_CMD_STAT_GET),
215983c2c1fcSMark Zhang 			0, 0);
216083c2c1fcSMark Zhang 
216183c2c1fcSMark Zhang 	ret = rdma_counter_get_mode(device, port, &mode, &mask);
216283c2c1fcSMark Zhang 	if (ret)
216383c2c1fcSMark Zhang 		goto err_msg;
216483c2c1fcSMark Zhang 
216583c2c1fcSMark Zhang 	if (fill_nldev_handle(msg, device) ||
216683c2c1fcSMark Zhang 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
2167932727c5SDan Carpenter 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
2168932727c5SDan Carpenter 		ret = -EMSGSIZE;
216983c2c1fcSMark Zhang 		goto err_msg;
2170932727c5SDan Carpenter 	}
217183c2c1fcSMark Zhang 
217283c2c1fcSMark Zhang 	if ((mode == RDMA_COUNTER_MODE_AUTO) &&
2173932727c5SDan Carpenter 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
2174932727c5SDan Carpenter 		ret = -EMSGSIZE;
217583c2c1fcSMark Zhang 		goto err_msg;
2176932727c5SDan Carpenter 	}
217783c2c1fcSMark Zhang 
217883c2c1fcSMark Zhang 	nlmsg_end(msg, nlh);
217983c2c1fcSMark Zhang 	ib_device_put(device);
21801d2fedd8SParav Pandit 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
218183c2c1fcSMark Zhang 
218283c2c1fcSMark Zhang err_msg:
218383c2c1fcSMark Zhang 	nlmsg_free(msg);
218483c2c1fcSMark Zhang err:
218583c2c1fcSMark Zhang 	ib_device_put(device);
218683c2c1fcSMark Zhang 	return ret;
218783c2c1fcSMark Zhang }
218883c2c1fcSMark Zhang 
2189c4ffee7cSMark Zhang static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
2190c4ffee7cSMark Zhang 			       struct netlink_ext_ack *extack)
2191c4ffee7cSMark Zhang {
2192c4ffee7cSMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2193c4ffee7cSMark Zhang 	int ret;
2194c4ffee7cSMark Zhang 
2195c4ffee7cSMark Zhang 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2196c4ffee7cSMark Zhang 			  nldev_policy, extack);
21976e7be47aSMark Zhang 	if (ret)
2198c4ffee7cSMark Zhang 		return -EINVAL;
2199c4ffee7cSMark Zhang 
22006e7be47aSMark Zhang 	if (!tb[RDMA_NLDEV_ATTR_STAT_RES])
22016e7be47aSMark Zhang 		return stat_get_doit_default_counter(skb, nlh, extack, tb);
22026e7be47aSMark Zhang 
2203c4ffee7cSMark Zhang 	switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2204c4ffee7cSMark Zhang 	case RDMA_NLDEV_ATTR_RES_QP:
220583c2c1fcSMark Zhang 		ret = stat_get_doit_qp(skb, nlh, extack, tb);
2206c4ffee7cSMark Zhang 		break;
22074061ff7aSErez Alfasi 	case RDMA_NLDEV_ATTR_RES_MR:
22084061ff7aSErez Alfasi 		ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR,
22094061ff7aSErez Alfasi 					  fill_stat_mr_entry);
22104061ff7aSErez Alfasi 		break;
2211c4ffee7cSMark Zhang 	default:
2212c4ffee7cSMark Zhang 		ret = -EINVAL;
2213c4ffee7cSMark Zhang 		break;
2214c4ffee7cSMark Zhang 	}
2215c4ffee7cSMark Zhang 
2216c4ffee7cSMark Zhang 	return ret;
2217c4ffee7cSMark Zhang }
2218c4ffee7cSMark Zhang 
2219c4ffee7cSMark Zhang static int nldev_stat_get_dumpit(struct sk_buff *skb,
2220c4ffee7cSMark Zhang 				 struct netlink_callback *cb)
2221c4ffee7cSMark Zhang {
2222c4ffee7cSMark Zhang 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2223c4ffee7cSMark Zhang 	int ret;
2224c4ffee7cSMark Zhang 
2225c4ffee7cSMark Zhang 	ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2226c4ffee7cSMark Zhang 			  nldev_policy, NULL);
2227c4ffee7cSMark Zhang 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
2228c4ffee7cSMark Zhang 		return -EINVAL;
2229c4ffee7cSMark Zhang 
2230c4ffee7cSMark Zhang 	switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2231c4ffee7cSMark Zhang 	case RDMA_NLDEV_ATTR_RES_QP:
2232c4ffee7cSMark Zhang 		ret = nldev_res_get_counter_dumpit(skb, cb);
2233c4ffee7cSMark Zhang 		break;
22344061ff7aSErez Alfasi 	case RDMA_NLDEV_ATTR_RES_MR:
22354061ff7aSErez Alfasi 		ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR,
22364061ff7aSErez Alfasi 					    fill_stat_mr_entry);
22374061ff7aSErez Alfasi 		break;
2238c4ffee7cSMark Zhang 	default:
2239c4ffee7cSMark Zhang 		ret = -EINVAL;
2240c4ffee7cSMark Zhang 		break;
2241c4ffee7cSMark Zhang 	}
2242c4ffee7cSMark Zhang 
2243c4ffee7cSMark Zhang 	return ret;
2244c4ffee7cSMark Zhang }
2245c4ffee7cSMark Zhang 
2246d0e312feSLeon Romanovsky static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
2247b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_CMD_GET] = {
2248e5c9469eSLeon Romanovsky 		.doit = nldev_get_doit,
2249b4c598a6SLeon Romanovsky 		.dump = nldev_get_dumpit,
2250b4c598a6SLeon Romanovsky 	},
22510e2d00ebSJason Gunthorpe 	[RDMA_NLDEV_CMD_GET_CHARDEV] = {
22520e2d00ebSJason Gunthorpe 		.doit = nldev_get_chardev,
22530e2d00ebSJason Gunthorpe 	},
225405d940d3SLeon Romanovsky 	[RDMA_NLDEV_CMD_SET] = {
225505d940d3SLeon Romanovsky 		.doit = nldev_set_doit,
225605d940d3SLeon Romanovsky 		.flags = RDMA_NL_ADMIN_PERM,
225705d940d3SLeon Romanovsky 	},
22583856ec4bSSteve Wise 	[RDMA_NLDEV_CMD_NEWLINK] = {
22593856ec4bSSteve Wise 		.doit = nldev_newlink,
22603856ec4bSSteve Wise 		.flags = RDMA_NL_ADMIN_PERM,
22613856ec4bSSteve Wise 	},
22623856ec4bSSteve Wise 	[RDMA_NLDEV_CMD_DELLINK] = {
22633856ec4bSSteve Wise 		.doit = nldev_dellink,
22643856ec4bSSteve Wise 		.flags = RDMA_NL_ADMIN_PERM,
22653856ec4bSSteve Wise 	},
22667d02f605SLeon Romanovsky 	[RDMA_NLDEV_CMD_PORT_GET] = {
2267c3f66f7bSLeon Romanovsky 		.doit = nldev_port_get_doit,
22687d02f605SLeon Romanovsky 		.dump = nldev_port_get_dumpit,
22697d02f605SLeon Romanovsky 	},
2270bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_GET] = {
2271bf3c5a93SLeon Romanovsky 		.doit = nldev_res_get_doit,
2272bf3c5a93SLeon Romanovsky 		.dump = nldev_res_get_dumpit,
2273bf3c5a93SLeon Romanovsky 	},
2274b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_QP_GET] = {
2275c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_qp_doit,
2276b5fa635aSLeon Romanovsky 		.dump = nldev_res_get_qp_dumpit,
2277b5fa635aSLeon Romanovsky 	},
227800313983SSteve Wise 	[RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
2279c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_cm_id_doit,
228000313983SSteve Wise 		.dump = nldev_res_get_cm_id_dumpit,
228100313983SSteve Wise 	},
2282a34fc089SSteve Wise 	[RDMA_NLDEV_CMD_RES_CQ_GET] = {
2283c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_cq_doit,
2284a34fc089SSteve Wise 		.dump = nldev_res_get_cq_dumpit,
2285a34fc089SSteve Wise 	},
2286fccec5b8SSteve Wise 	[RDMA_NLDEV_CMD_RES_MR_GET] = {
2287c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_mr_doit,
2288fccec5b8SSteve Wise 		.dump = nldev_res_get_mr_dumpit,
2289fccec5b8SSteve Wise 	},
229029cf1351SSteve Wise 	[RDMA_NLDEV_CMD_RES_PD_GET] = {
2291c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_pd_doit,
229229cf1351SSteve Wise 		.dump = nldev_res_get_pd_dumpit,
229329cf1351SSteve Wise 	},
229412ce208fSNeta Ostrovsky 	[RDMA_NLDEV_CMD_RES_CTX_GET] = {
229512ce208fSNeta Ostrovsky 		.doit = nldev_res_get_ctx_doit,
229612ce208fSNeta Ostrovsky 		.dump = nldev_res_get_ctx_dumpit,
229712ce208fSNeta Ostrovsky 	},
2298391c6bd5SNeta Ostrovsky 	[RDMA_NLDEV_CMD_RES_SRQ_GET] = {
2299391c6bd5SNeta Ostrovsky 		.doit = nldev_res_get_srq_doit,
2300391c6bd5SNeta Ostrovsky 		.dump = nldev_res_get_srq_dumpit,
2301391c6bd5SNeta Ostrovsky 	},
2302cb7e0e13SParav Pandit 	[RDMA_NLDEV_CMD_SYS_GET] = {
23034d7ba8ceSParav Pandit 		.doit = nldev_sys_get_doit,
2304cb7e0e13SParav Pandit 	},
23052b34c558SParav Pandit 	[RDMA_NLDEV_CMD_SYS_SET] = {
23062b34c558SParav Pandit 		.doit = nldev_set_sys_set_doit,
2307b47ae6f8SMark Zhang 	},
2308b47ae6f8SMark Zhang 	[RDMA_NLDEV_CMD_STAT_SET] = {
2309b47ae6f8SMark Zhang 		.doit = nldev_stat_set_doit,
23102b34c558SParav Pandit 		.flags = RDMA_NL_ADMIN_PERM,
23112b34c558SParav Pandit 	},
2312c4ffee7cSMark Zhang 	[RDMA_NLDEV_CMD_STAT_GET] = {
2313c4ffee7cSMark Zhang 		.doit = nldev_stat_get_doit,
2314c4ffee7cSMark Zhang 		.dump = nldev_stat_get_dumpit,
2315c4ffee7cSMark Zhang 	},
2316b389327dSMark Zhang 	[RDMA_NLDEV_CMD_STAT_DEL] = {
2317b389327dSMark Zhang 		.doit = nldev_stat_del_doit,
2318b389327dSMark Zhang 		.flags = RDMA_NL_ADMIN_PERM,
2319b389327dSMark Zhang 	},
232065959522SMaor Gottlieb 	[RDMA_NLDEV_CMD_RES_QP_GET_RAW] = {
232165959522SMaor Gottlieb 		.doit = nldev_res_get_qp_raw_doit,
232265959522SMaor Gottlieb 		.dump = nldev_res_get_qp_raw_dumpit,
232365959522SMaor Gottlieb 		.flags = RDMA_NL_ADMIN_PERM,
232465959522SMaor Gottlieb 	},
232565959522SMaor Gottlieb 	[RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = {
232665959522SMaor Gottlieb 		.doit = nldev_res_get_cq_raw_doit,
232765959522SMaor Gottlieb 		.dump = nldev_res_get_cq_raw_dumpit,
232865959522SMaor Gottlieb 		.flags = RDMA_NL_ADMIN_PERM,
232965959522SMaor Gottlieb 	},
233065959522SMaor Gottlieb 	[RDMA_NLDEV_CMD_RES_MR_GET_RAW] = {
233165959522SMaor Gottlieb 		.doit = nldev_res_get_mr_raw_doit,
233265959522SMaor Gottlieb 		.dump = nldev_res_get_mr_raw_dumpit,
233365959522SMaor Gottlieb 		.flags = RDMA_NL_ADMIN_PERM,
233465959522SMaor Gottlieb 	},
2335b4c598a6SLeon Romanovsky };
2336b4c598a6SLeon Romanovsky 
23376c80b41aSLeon Romanovsky void __init nldev_init(void)
23386c80b41aSLeon Romanovsky {
2339b4c598a6SLeon Romanovsky 	rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
23406c80b41aSLeon Romanovsky }
23416c80b41aSLeon Romanovsky 
23426c80b41aSLeon Romanovsky void __exit nldev_exit(void)
23436c80b41aSLeon Romanovsky {
23446c80b41aSLeon Romanovsky 	rdma_nl_unregister(RDMA_NL_NLDEV);
23456c80b41aSLeon Romanovsky }
2346e3bf14bdSJason Gunthorpe 
2347e3bf14bdSJason Gunthorpe MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
2348