xref: /openbmc/linux/drivers/infiniband/core/nldev.c (revision 0e2d00eb)
16c80b41aSLeon Romanovsky /*
26c80b41aSLeon Romanovsky  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
36c80b41aSLeon Romanovsky  *
46c80b41aSLeon Romanovsky  * Redistribution and use in source and binary forms, with or without
56c80b41aSLeon Romanovsky  * modification, are permitted provided that the following conditions are met:
66c80b41aSLeon Romanovsky  *
76c80b41aSLeon Romanovsky  * 1. Redistributions of source code must retain the above copyright
86c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer.
96c80b41aSLeon Romanovsky  * 2. Redistributions in binary form must reproduce the above copyright
106c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer in the
116c80b41aSLeon Romanovsky  *    documentation and/or other materials provided with the distribution.
126c80b41aSLeon Romanovsky  * 3. Neither the names of the copyright holders nor the names of its
136c80b41aSLeon Romanovsky  *    contributors may be used to endorse or promote products derived from
146c80b41aSLeon Romanovsky  *    this software without specific prior written permission.
156c80b41aSLeon Romanovsky  *
166c80b41aSLeon Romanovsky  * Alternatively, this software may be distributed under the terms of the
176c80b41aSLeon Romanovsky  * GNU General Public License ("GPL") version 2 as published by the Free
186c80b41aSLeon Romanovsky  * Software Foundation.
196c80b41aSLeon Romanovsky  *
206c80b41aSLeon Romanovsky  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
216c80b41aSLeon Romanovsky  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
226c80b41aSLeon Romanovsky  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
236c80b41aSLeon Romanovsky  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
246c80b41aSLeon Romanovsky  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
256c80b41aSLeon Romanovsky  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
266c80b41aSLeon Romanovsky  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
276c80b41aSLeon Romanovsky  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
286c80b41aSLeon Romanovsky  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
296c80b41aSLeon Romanovsky  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
306c80b41aSLeon Romanovsky  * POSSIBILITY OF SUCH DAMAGE.
316c80b41aSLeon Romanovsky  */
326c80b41aSLeon Romanovsky 
33e3bf14bdSJason Gunthorpe #include <linux/module.h>
34bf3c5a93SLeon Romanovsky #include <linux/pid.h>
35bf3c5a93SLeon Romanovsky #include <linux/pid_namespace.h>
363856ec4bSSteve Wise #include <linux/mutex.h>
37b4c598a6SLeon Romanovsky #include <net/netlink.h>
3800313983SSteve Wise #include <rdma/rdma_cm.h>
396c80b41aSLeon Romanovsky #include <rdma/rdma_netlink.h>
406c80b41aSLeon Romanovsky 
416c80b41aSLeon Romanovsky #include "core_priv.h"
4200313983SSteve Wise #include "cma_priv.h"
4341eda65cSLeon Romanovsky #include "restrack.h"
446c80b41aSLeon Romanovsky 
45b4c598a6SLeon Romanovsky static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
46b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_INDEX]     = { .type = NLA_U32 },
47b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_NAME]	= { .type = NLA_NUL_STRING,
48b4c598a6SLeon Romanovsky 					    .len = IB_DEVICE_NAME_MAX - 1},
49b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_PORT_INDEX]	= { .type = NLA_U32 },
508621a7e3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_FW_VERSION]	= { .type = NLA_NUL_STRING,
518621a7e3SLeon Romanovsky 					    .len = IB_FW_VERSION_NAME_MAX - 1},
521aaff896SLeon Romanovsky 	[RDMA_NLDEV_ATTR_NODE_GUID]	= { .type = NLA_U64 },
531aaff896SLeon Romanovsky 	[RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
5412026fbbSLeon Romanovsky 	[RDMA_NLDEV_ATTR_SUBNET_PREFIX]	= { .type = NLA_U64 },
5580a06dd3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_LID]		= { .type = NLA_U32 },
5680a06dd3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_SM_LID]	= { .type = NLA_U32 },
5734840feaSLeon Romanovsky 	[RDMA_NLDEV_ATTR_LMC]		= { .type = NLA_U8 },
585654e49dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_PORT_STATE]	= { .type = NLA_U8 },
595654e49dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
601bb77b8cSLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
61bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SUMMARY]	= { .type = NLA_NESTED },
62bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY]	= { .type = NLA_NESTED },
63bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
64bf3c5a93SLeon Romanovsky 					     .len = 16 },
65bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
66b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_QP]		= { .type = NLA_NESTED },
67b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_QP_ENTRY]		= { .type = NLA_NESTED },
68b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_LQPN]		= { .type = NLA_U32 },
69b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_RQPN]		= { .type = NLA_U32 },
70b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_RQ_PSN]		= { .type = NLA_U32 },
71b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SQ_PSN]		= { .type = NLA_U32 },
72b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
73b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_TYPE]		= { .type = NLA_U8 },
74b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_STATE]		= { .type = NLA_U8 },
75b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_PID]		= { .type = NLA_U32 },
76b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_KERN_NAME]		= { .type = NLA_NUL_STRING,
77b5fa635aSLeon Romanovsky 						    .len = TASK_COMM_LEN },
7800313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CM_ID]		= { .type = NLA_NESTED },
7900313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY]	= { .type = NLA_NESTED },
8000313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_PS]		= { .type = NLA_U32 },
8100313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_SRC_ADDR]	= {
8200313983SSteve Wise 			.len = sizeof(struct __kernel_sockaddr_storage) },
8300313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_DST_ADDR]	= {
8400313983SSteve Wise 			.len = sizeof(struct __kernel_sockaddr_storage) },
85a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CQ]		= { .type = NLA_NESTED },
86a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CQ_ENTRY]		= { .type = NLA_NESTED },
87a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CQE]		= { .type = NLA_U32 },
88a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_USECNT]		= { .type = NLA_U64 },
89a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_POLL_CTX]		= { .type = NLA_U8 },
90fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_MR]		= { .type = NLA_NESTED },
91fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_MR_ENTRY]		= { .type = NLA_NESTED },
92fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_RKEY]		= { .type = NLA_U32 },
93fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_LKEY]		= { .type = NLA_U32 },
94fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_IOVA]		= { .type = NLA_U64 },
95fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_MRLEN]		= { .type = NLA_U64 },
9629cf1351SSteve Wise 	[RDMA_NLDEV_ATTR_RES_PD]		= { .type = NLA_NESTED },
9729cf1351SSteve Wise 	[RDMA_NLDEV_ATTR_RES_PD_ENTRY]		= { .type = NLA_NESTED },
9829cf1351SSteve Wise 	[RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY]	= { .type = NLA_U32 },
9929cf1351SSteve Wise 	[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY] = { .type = NLA_U32 },
1005b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_INDEX]		= { .type = NLA_U32 },
1015b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_NAME]		= { .type = NLA_NUL_STRING,
1025b2cc79dSLeon Romanovsky 						    .len = IFNAMSIZ },
103da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER]		= { .type = NLA_NESTED },
104da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_ENTRY]		= { .type = NLA_NESTED },
105da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_STRING]		= { .type = NLA_NUL_STRING,
106da5c8507SSteve Wise 				    .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
107da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE]	= { .type = NLA_U8 },
108da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_S32]		= { .type = NLA_S32 },
109da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_U32]		= { .type = NLA_U32 },
110da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_S64]		= { .type = NLA_S64 },
111da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_U64]		= { .type = NLA_U64 },
112517b773eSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_PDN]		= { .type = NLA_U32 },
113517b773eSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CQN]               = { .type = NLA_U32 },
114517b773eSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_MRN]               = { .type = NLA_U32 },
115517b773eSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CM_IDN]            = { .type = NLA_U32 },
116c3d02788SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CTXN]              = { .type = NLA_U32 },
1173856ec4bSSteve Wise 	[RDMA_NLDEV_ATTR_LINK_TYPE]		= { .type = NLA_NUL_STRING,
1183856ec4bSSteve Wise 				    .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
119cb7e0e13SParav Pandit 	[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]	= { .type = NLA_U8 },
1209e886b39SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_PROTOCOL]		= { .type = NLA_NUL_STRING,
1219e886b39SLeon Romanovsky 				    .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
1222e5b8a01SParav Pandit 	[RDMA_NLDEV_NET_NS_FD]			= { .type = NLA_U32 },
1230e2d00ebSJason Gunthorpe 	[RDMA_NLDEV_ATTR_CHARDEV]		= { .type = NLA_U64 },
1240e2d00ebSJason Gunthorpe 	[RDMA_NLDEV_ATTR_CHARDEV_ABI]		= { .type = NLA_U64 },
1250e2d00ebSJason Gunthorpe 	[RDMA_NLDEV_ATTR_CHARDEV_TYPE]		= { .type = NLA_NUL_STRING,
1260e2d00ebSJason Gunthorpe 				    .len = 128 },
1270e2d00ebSJason Gunthorpe 	[RDMA_NLDEV_ATTR_CHARDEV_NAME]		= { .type = NLA_NUL_STRING,
1280e2d00ebSJason Gunthorpe 				    .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
129b4c598a6SLeon Romanovsky };
130b4c598a6SLeon Romanovsky 
13173937e8aSSteve Wise static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
13273937e8aSSteve Wise 				      enum rdma_nldev_print_type print_type)
13373937e8aSSteve Wise {
13473937e8aSSteve Wise 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
13573937e8aSSteve Wise 		return -EMSGSIZE;
13673937e8aSSteve Wise 	if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
13773937e8aSSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
13873937e8aSSteve Wise 		return -EMSGSIZE;
13973937e8aSSteve Wise 
14073937e8aSSteve Wise 	return 0;
14173937e8aSSteve Wise }
14273937e8aSSteve Wise 
14373937e8aSSteve Wise static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
14473937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
14573937e8aSSteve Wise 				   u32 value)
14673937e8aSSteve Wise {
14773937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
14873937e8aSSteve Wise 		return -EMSGSIZE;
14973937e8aSSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
15073937e8aSSteve Wise 		return -EMSGSIZE;
15173937e8aSSteve Wise 
15273937e8aSSteve Wise 	return 0;
15373937e8aSSteve Wise }
15473937e8aSSteve Wise 
15573937e8aSSteve Wise static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
15673937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
15773937e8aSSteve Wise 				   u64 value)
15873937e8aSSteve Wise {
15973937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
16073937e8aSSteve Wise 		return -EMSGSIZE;
16173937e8aSSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
16273937e8aSSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
16373937e8aSSteve Wise 		return -EMSGSIZE;
16473937e8aSSteve Wise 
16573937e8aSSteve Wise 	return 0;
16673937e8aSSteve Wise }
16773937e8aSSteve Wise 
16873937e8aSSteve Wise int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
16973937e8aSSteve Wise {
17073937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
17173937e8aSSteve Wise 				       value);
17273937e8aSSteve Wise }
17373937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32);
17473937e8aSSteve Wise 
17573937e8aSSteve Wise int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
17673937e8aSSteve Wise 			       u32 value)
17773937e8aSSteve Wise {
17873937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
17973937e8aSSteve Wise 				       value);
18073937e8aSSteve Wise }
18173937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
18273937e8aSSteve Wise 
18373937e8aSSteve Wise int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
18473937e8aSSteve Wise {
18573937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
18673937e8aSSteve Wise 				       value);
18773937e8aSSteve Wise }
18873937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64);
18973937e8aSSteve Wise 
19073937e8aSSteve Wise int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
19173937e8aSSteve Wise {
19273937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
19373937e8aSSteve Wise 				       value);
19473937e8aSSteve Wise }
19573937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
19673937e8aSSteve Wise 
197c2409810SLeon Romanovsky static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
198b4c598a6SLeon Romanovsky {
199b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
200b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
201896de009SJason Gunthorpe 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
202896de009SJason Gunthorpe 			   dev_name(&device->dev)))
203b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
204c2409810SLeon Romanovsky 
205c2409810SLeon Romanovsky 	return 0;
206c2409810SLeon Romanovsky }
207c2409810SLeon Romanovsky 
208c2409810SLeon Romanovsky static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
209c2409810SLeon Romanovsky {
210c2409810SLeon Romanovsky 	char fw[IB_FW_VERSION_NAME_MAX];
2119e886b39SLeon Romanovsky 	int ret = 0;
2129e886b39SLeon Romanovsky 	u8 port;
213c2409810SLeon Romanovsky 
214c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
215c2409810SLeon Romanovsky 		return -EMSGSIZE;
216c2409810SLeon Romanovsky 
217b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
218b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
219ac505253SLeon Romanovsky 
220ac505253SLeon Romanovsky 	BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
221ac505253SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
22225a0ad85SSteve Wise 			      device->attrs.device_cap_flags,
22325a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
224ac505253SLeon Romanovsky 		return -EMSGSIZE;
225ac505253SLeon Romanovsky 
2268621a7e3SLeon Romanovsky 	ib_get_device_fw_str(device, fw);
2275b2cc79dSLeon Romanovsky 	/* Device without FW has strlen(fw) = 0 */
2288621a7e3SLeon Romanovsky 	if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
2298621a7e3SLeon Romanovsky 		return -EMSGSIZE;
2308621a7e3SLeon Romanovsky 
2311aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
23225a0ad85SSteve Wise 			      be64_to_cpu(device->node_guid),
23325a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2341aaff896SLeon Romanovsky 		return -EMSGSIZE;
2351aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
23625a0ad85SSteve Wise 			      be64_to_cpu(device->attrs.sys_image_guid),
23725a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2381aaff896SLeon Romanovsky 		return -EMSGSIZE;
2391bb77b8cSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
2401bb77b8cSLeon Romanovsky 		return -EMSGSIZE;
2419e886b39SLeon Romanovsky 
2429e886b39SLeon Romanovsky 	/*
2439e886b39SLeon Romanovsky 	 * Link type is determined on first port and mlx4 device
2449e886b39SLeon Romanovsky 	 * which can potentially have two different link type for the same
2459e886b39SLeon Romanovsky 	 * IB device is considered as better to be avoided in the future,
2469e886b39SLeon Romanovsky 	 */
2479e886b39SLeon Romanovsky 	port = rdma_start_port(device);
2489e886b39SLeon Romanovsky 	if (rdma_cap_opa_mad(device, port))
2499e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
2509e886b39SLeon Romanovsky 	else if (rdma_protocol_ib(device, port))
2519e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
2529e886b39SLeon Romanovsky 	else if (rdma_protocol_iwarp(device, port))
2539e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
2549e886b39SLeon Romanovsky 	else if (rdma_protocol_roce(device, port))
2559e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
2569e886b39SLeon Romanovsky 	else if (rdma_protocol_usnic(device, port))
2579e886b39SLeon Romanovsky 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
2589e886b39SLeon Romanovsky 				     "usnic");
2599e886b39SLeon Romanovsky 	return ret;
260b4c598a6SLeon Romanovsky }
261b4c598a6SLeon Romanovsky 
2627d02f605SLeon Romanovsky static int fill_port_info(struct sk_buff *msg,
2635b2cc79dSLeon Romanovsky 			  struct ib_device *device, u32 port,
2645b2cc79dSLeon Romanovsky 			  const struct net *net)
2657d02f605SLeon Romanovsky {
2665b2cc79dSLeon Romanovsky 	struct net_device *netdev = NULL;
267ac505253SLeon Romanovsky 	struct ib_port_attr attr;
268ac505253SLeon Romanovsky 	int ret;
2694fa2813dSMichael Guralnik 	u64 cap_flags = 0;
270ac505253SLeon Romanovsky 
271c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
2727d02f605SLeon Romanovsky 		return -EMSGSIZE;
273c2409810SLeon Romanovsky 
2747d02f605SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
2757d02f605SLeon Romanovsky 		return -EMSGSIZE;
276ac505253SLeon Romanovsky 
277ac505253SLeon Romanovsky 	ret = ib_query_port(device, port, &attr);
278ac505253SLeon Romanovsky 	if (ret)
279ac505253SLeon Romanovsky 		return ret;
280ac505253SLeon Romanovsky 
281dd8028f1SLeon Romanovsky 	if (rdma_protocol_ib(device, port)) {
2824fa2813dSMichael Guralnik 		BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
2834fa2813dSMichael Guralnik 				sizeof(attr.port_cap_flags2)) > sizeof(u64));
2844fa2813dSMichael Guralnik 		cap_flags = attr.port_cap_flags |
2854fa2813dSMichael Guralnik 			((u64)attr.port_cap_flags2 << 32);
286ac505253SLeon Romanovsky 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
2874fa2813dSMichael Guralnik 				      cap_flags, RDMA_NLDEV_ATTR_PAD))
288ac505253SLeon Romanovsky 			return -EMSGSIZE;
289dd8028f1SLeon Romanovsky 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
29025a0ad85SSteve Wise 				      attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
29112026fbbSLeon Romanovsky 			return -EMSGSIZE;
29280a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
29380a06dd3SLeon Romanovsky 			return -EMSGSIZE;
29480a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
29580a06dd3SLeon Romanovsky 			return -EMSGSIZE;
29634840feaSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
29734840feaSLeon Romanovsky 			return -EMSGSIZE;
29880a06dd3SLeon Romanovsky 	}
2995654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
3005654e49dSLeon Romanovsky 		return -EMSGSIZE;
3015654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
3025654e49dSLeon Romanovsky 		return -EMSGSIZE;
3035b2cc79dSLeon Romanovsky 
304c2261dd7SJason Gunthorpe 	netdev = ib_device_get_netdev(device, port);
3055b2cc79dSLeon Romanovsky 	if (netdev && net_eq(dev_net(netdev), net)) {
3065b2cc79dSLeon Romanovsky 		ret = nla_put_u32(msg,
3075b2cc79dSLeon Romanovsky 				  RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
3085b2cc79dSLeon Romanovsky 		if (ret)
3095b2cc79dSLeon Romanovsky 			goto out;
3105b2cc79dSLeon Romanovsky 		ret = nla_put_string(msg,
3115b2cc79dSLeon Romanovsky 				     RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
3125b2cc79dSLeon Romanovsky 	}
3135b2cc79dSLeon Romanovsky 
3145b2cc79dSLeon Romanovsky out:
3155b2cc79dSLeon Romanovsky 	if (netdev)
3165b2cc79dSLeon Romanovsky 		dev_put(netdev);
3175b2cc79dSLeon Romanovsky 	return ret;
3187d02f605SLeon Romanovsky }
3197d02f605SLeon Romanovsky 
320bf3c5a93SLeon Romanovsky static int fill_res_info_entry(struct sk_buff *msg,
321bf3c5a93SLeon Romanovsky 			       const char *name, u64 curr)
322bf3c5a93SLeon Romanovsky {
323bf3c5a93SLeon Romanovsky 	struct nlattr *entry_attr;
324bf3c5a93SLeon Romanovsky 
325ae0be8deSMichal Kubecek 	entry_attr = nla_nest_start_noflag(msg,
326ae0be8deSMichal Kubecek 					   RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
327bf3c5a93SLeon Romanovsky 	if (!entry_attr)
328bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
329bf3c5a93SLeon Romanovsky 
330bf3c5a93SLeon Romanovsky 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
331bf3c5a93SLeon Romanovsky 		goto err;
33225a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
33325a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
334bf3c5a93SLeon Romanovsky 		goto err;
335bf3c5a93SLeon Romanovsky 
336bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, entry_attr);
337bf3c5a93SLeon Romanovsky 	return 0;
338bf3c5a93SLeon Romanovsky 
339bf3c5a93SLeon Romanovsky err:
340bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, entry_attr);
341bf3c5a93SLeon Romanovsky 	return -EMSGSIZE;
342bf3c5a93SLeon Romanovsky }
343bf3c5a93SLeon Romanovsky 
344bf3c5a93SLeon Romanovsky static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
345bf3c5a93SLeon Romanovsky {
346bf3c5a93SLeon Romanovsky 	static const char * const names[RDMA_RESTRACK_MAX] = {
347bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_PD] = "pd",
348bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_CQ] = "cq",
349bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_QP] = "qp",
35000313983SSteve Wise 		[RDMA_RESTRACK_CM_ID] = "cm_id",
351fccec5b8SSteve Wise 		[RDMA_RESTRACK_MR] = "mr",
352ffd321e4SLeon Romanovsky 		[RDMA_RESTRACK_CTX] = "ctx",
353bf3c5a93SLeon Romanovsky 	};
354bf3c5a93SLeon Romanovsky 
355bf3c5a93SLeon Romanovsky 	struct nlattr *table_attr;
356bf3c5a93SLeon Romanovsky 	int ret, i, curr;
357bf3c5a93SLeon Romanovsky 
358bf3c5a93SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
359bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
360bf3c5a93SLeon Romanovsky 
361ae0be8deSMichal Kubecek 	table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
362bf3c5a93SLeon Romanovsky 	if (!table_attr)
363bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
364bf3c5a93SLeon Romanovsky 
365bf3c5a93SLeon Romanovsky 	for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
366bf3c5a93SLeon Romanovsky 		if (!names[i])
367bf3c5a93SLeon Romanovsky 			continue;
3680ad699c0SLeon Romanovsky 		curr = rdma_restrack_count(device, i,
3690ad699c0SLeon Romanovsky 					   task_active_pid_ns(current));
370bf3c5a93SLeon Romanovsky 		ret = fill_res_info_entry(msg, names[i], curr);
371bf3c5a93SLeon Romanovsky 		if (ret)
372bf3c5a93SLeon Romanovsky 			goto err;
373bf3c5a93SLeon Romanovsky 	}
374bf3c5a93SLeon Romanovsky 
375bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, table_attr);
376bf3c5a93SLeon Romanovsky 	return 0;
377bf3c5a93SLeon Romanovsky 
378bf3c5a93SLeon Romanovsky err:
379bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, table_attr);
380bf3c5a93SLeon Romanovsky 	return ret;
381bf3c5a93SLeon Romanovsky }
382bf3c5a93SLeon Romanovsky 
38300313983SSteve Wise static int fill_res_name_pid(struct sk_buff *msg,
38400313983SSteve Wise 			     struct rdma_restrack_entry *res)
38500313983SSteve Wise {
38600313983SSteve Wise 	/*
38700313983SSteve Wise 	 * For user resources, user is should read /proc/PID/comm to get the
38800313983SSteve Wise 	 * name of the task file.
38900313983SSteve Wise 	 */
39000313983SSteve Wise 	if (rdma_is_kernel_res(res)) {
39100313983SSteve Wise 		if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
39200313983SSteve Wise 		    res->kern_name))
39300313983SSteve Wise 			return -EMSGSIZE;
39400313983SSteve Wise 	} else {
39500313983SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
39600313983SSteve Wise 		    task_pid_vnr(res->task)))
39700313983SSteve Wise 			return -EMSGSIZE;
39800313983SSteve Wise 	}
39900313983SSteve Wise 	return 0;
40000313983SSteve Wise }
40100313983SSteve Wise 
40202da3750SLeon Romanovsky static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
40302da3750SLeon Romanovsky 			   struct rdma_restrack_entry *res)
40402da3750SLeon Romanovsky {
40502da3750SLeon Romanovsky 	if (!dev->ops.fill_res_entry)
40602da3750SLeon Romanovsky 		return false;
40702da3750SLeon Romanovsky 	return dev->ops.fill_res_entry(msg, res);
40802da3750SLeon Romanovsky }
40902da3750SLeon Romanovsky 
410659067b0SLeon Romanovsky static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
411d12ff624SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
412b5fa635aSLeon Romanovsky {
413d12ff624SSteve Wise 	struct ib_qp *qp = container_of(res, struct ib_qp, res);
41402da3750SLeon Romanovsky 	struct ib_device *dev = qp->device;
415b5fa635aSLeon Romanovsky 	struct ib_qp_init_attr qp_init_attr;
416b5fa635aSLeon Romanovsky 	struct ib_qp_attr qp_attr;
417b5fa635aSLeon Romanovsky 	int ret;
418b5fa635aSLeon Romanovsky 
419b5fa635aSLeon Romanovsky 	ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
420b5fa635aSLeon Romanovsky 	if (ret)
421b5fa635aSLeon Romanovsky 		return ret;
422b5fa635aSLeon Romanovsky 
423b5fa635aSLeon Romanovsky 	if (port && port != qp_attr.port_num)
424c5dfe0eaSLeon Romanovsky 		return -EAGAIN;
425b5fa635aSLeon Romanovsky 
426b5fa635aSLeon Romanovsky 	/* In create_qp() port is not set yet */
427b5fa635aSLeon Romanovsky 	if (qp_attr.port_num &&
428b5fa635aSLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
429b5fa635aSLeon Romanovsky 		goto err;
430b5fa635aSLeon Romanovsky 
431b5fa635aSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
432b5fa635aSLeon Romanovsky 		goto err;
433b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
434b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
435b5fa635aSLeon Romanovsky 				qp_attr.dest_qp_num))
436b5fa635aSLeon Romanovsky 			goto err;
437b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
438b5fa635aSLeon Romanovsky 				qp_attr.rq_psn))
439b5fa635aSLeon Romanovsky 			goto err;
440b5fa635aSLeon Romanovsky 	}
441b5fa635aSLeon Romanovsky 
442b5fa635aSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
443b5fa635aSLeon Romanovsky 		goto err;
444b5fa635aSLeon Romanovsky 
445b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
446b5fa635aSLeon Romanovsky 	    qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
447b5fa635aSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
448b5fa635aSLeon Romanovsky 			       qp_attr.path_mig_state))
449b5fa635aSLeon Romanovsky 			goto err;
450b5fa635aSLeon Romanovsky 	}
451b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
452b5fa635aSLeon Romanovsky 		goto err;
453b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
454b5fa635aSLeon Romanovsky 		goto err;
455b5fa635aSLeon Romanovsky 
456c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
457c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
458c3d02788SLeon Romanovsky 		goto err;
459c3d02788SLeon Romanovsky 
46000313983SSteve Wise 	if (fill_res_name_pid(msg, res))
461b5fa635aSLeon Romanovsky 		goto err;
46200313983SSteve Wise 
46302da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
464da5c8507SSteve Wise 		goto err;
465da5c8507SSteve Wise 
46600313983SSteve Wise 	return 0;
46700313983SSteve Wise 
468c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
46900313983SSteve Wise }
47000313983SSteve Wise 
471659067b0SLeon Romanovsky static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
47200313983SSteve Wise 				struct rdma_restrack_entry *res, uint32_t port)
47300313983SSteve Wise {
47400313983SSteve Wise 	struct rdma_id_private *id_priv =
47500313983SSteve Wise 				container_of(res, struct rdma_id_private, res);
47602da3750SLeon Romanovsky 	struct ib_device *dev = id_priv->id.device;
47700313983SSteve Wise 	struct rdma_cm_id *cm_id = &id_priv->id;
47800313983SSteve Wise 
47900313983SSteve Wise 	if (port && port != cm_id->port_num)
48000313983SSteve Wise 		return 0;
48100313983SSteve Wise 
48200313983SSteve Wise 	if (cm_id->port_num &&
48300313983SSteve Wise 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
48400313983SSteve Wise 		goto err;
48500313983SSteve Wise 
48600313983SSteve Wise 	if (id_priv->qp_num) {
48700313983SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
48800313983SSteve Wise 			goto err;
48900313983SSteve Wise 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
490b5fa635aSLeon Romanovsky 			goto err;
491b5fa635aSLeon Romanovsky 	}
492b5fa635aSLeon Romanovsky 
49300313983SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
49400313983SSteve Wise 		goto err;
49500313983SSteve Wise 
49600313983SSteve Wise 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
49700313983SSteve Wise 		goto err;
49800313983SSteve Wise 
49900313983SSteve Wise 	if (cm_id->route.addr.src_addr.ss_family &&
50000313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
50100313983SSteve Wise 		    sizeof(cm_id->route.addr.src_addr),
50200313983SSteve Wise 		    &cm_id->route.addr.src_addr))
50300313983SSteve Wise 		goto err;
50400313983SSteve Wise 	if (cm_id->route.addr.dst_addr.ss_family &&
50500313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
50600313983SSteve Wise 		    sizeof(cm_id->route.addr.dst_addr),
50700313983SSteve Wise 		    &cm_id->route.addr.dst_addr))
50800313983SSteve Wise 		goto err;
50900313983SSteve Wise 
510517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
511517b773eSLeon Romanovsky 		goto err;
512517b773eSLeon Romanovsky 
51300313983SSteve Wise 	if (fill_res_name_pid(msg, res))
51400313983SSteve Wise 		goto err;
51500313983SSteve Wise 
51602da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
517da5c8507SSteve Wise 		goto err;
518da5c8507SSteve Wise 
519b5fa635aSLeon Romanovsky 	return 0;
520b5fa635aSLeon Romanovsky 
521c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE;
522b5fa635aSLeon Romanovsky }
523b5fa635aSLeon Romanovsky 
524659067b0SLeon Romanovsky static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
525a34fc089SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
526a34fc089SSteve Wise {
527a34fc089SSteve Wise 	struct ib_cq *cq = container_of(res, struct ib_cq, res);
52802da3750SLeon Romanovsky 	struct ib_device *dev = cq->device;
529a34fc089SSteve Wise 
530a34fc089SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
531a34fc089SSteve Wise 		goto err;
532a34fc089SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
53325a0ad85SSteve Wise 			      atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
534a34fc089SSteve Wise 		goto err;
535a34fc089SSteve Wise 
536a34fc089SSteve Wise 	/* Poll context is only valid for kernel CQs */
537a34fc089SSteve Wise 	if (rdma_is_kernel_res(res) &&
538a34fc089SSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
539a34fc089SSteve Wise 		goto err;
540a34fc089SSteve Wise 
541517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
542517b773eSLeon Romanovsky 		goto err;
543c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
544c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
545c3d02788SLeon Romanovsky 			cq->uobject->context->res.id))
546c3d02788SLeon Romanovsky 		goto err;
547517b773eSLeon Romanovsky 
548a34fc089SSteve Wise 	if (fill_res_name_pid(msg, res))
549a34fc089SSteve Wise 		goto err;
550a34fc089SSteve Wise 
55102da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
552da5c8507SSteve Wise 		goto err;
553da5c8507SSteve Wise 
554a34fc089SSteve Wise 	return 0;
555a34fc089SSteve Wise 
556c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
557a34fc089SSteve Wise }
558a34fc089SSteve Wise 
559659067b0SLeon Romanovsky static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
560fccec5b8SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
561fccec5b8SSteve Wise {
562fccec5b8SSteve Wise 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
56302da3750SLeon Romanovsky 	struct ib_device *dev = mr->pd->device;
564fccec5b8SSteve Wise 
565659067b0SLeon Romanovsky 	if (has_cap_net_admin) {
566fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
567fccec5b8SSteve Wise 			goto err;
568fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
569fccec5b8SSteve Wise 			goto err;
570fccec5b8SSteve Wise 	}
571fccec5b8SSteve Wise 
57225a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
57325a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
574fccec5b8SSteve Wise 		goto err;
575fccec5b8SSteve Wise 
576517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
577517b773eSLeon Romanovsky 		goto err;
578517b773eSLeon Romanovsky 
579c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
580c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
581c3d02788SLeon Romanovsky 		goto err;
582c3d02788SLeon Romanovsky 
583fccec5b8SSteve Wise 	if (fill_res_name_pid(msg, res))
584fccec5b8SSteve Wise 		goto err;
585fccec5b8SSteve Wise 
58602da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
587da5c8507SSteve Wise 		goto err;
588da5c8507SSteve Wise 
589fccec5b8SSteve Wise 	return 0;
590fccec5b8SSteve Wise 
591c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
592fccec5b8SSteve Wise }
593fccec5b8SSteve Wise 
594659067b0SLeon Romanovsky static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
59529cf1351SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
59629cf1351SSteve Wise {
59729cf1351SSteve Wise 	struct ib_pd *pd = container_of(res, struct ib_pd, res);
59802da3750SLeon Romanovsky 	struct ib_device *dev = pd->device;
59929cf1351SSteve Wise 
600659067b0SLeon Romanovsky 	if (has_cap_net_admin) {
60129cf1351SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
60229cf1351SSteve Wise 				pd->local_dma_lkey))
60329cf1351SSteve Wise 			goto err;
60429cf1351SSteve Wise 		if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
60529cf1351SSteve Wise 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
60629cf1351SSteve Wise 				pd->unsafe_global_rkey))
60729cf1351SSteve Wise 			goto err;
60829cf1351SSteve Wise 	}
60929cf1351SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
61025a0ad85SSteve Wise 			      atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
61129cf1351SSteve Wise 		goto err;
61229cf1351SSteve Wise 
613517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
614517b773eSLeon Romanovsky 		goto err;
615517b773eSLeon Romanovsky 
616c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
617c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
618c3d02788SLeon Romanovsky 			pd->uobject->context->res.id))
619c3d02788SLeon Romanovsky 		goto err;
620c3d02788SLeon Romanovsky 
62129cf1351SSteve Wise 	if (fill_res_name_pid(msg, res))
62229cf1351SSteve Wise 		goto err;
62329cf1351SSteve Wise 
62402da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
625da5c8507SSteve Wise 		goto err;
626da5c8507SSteve Wise 
62729cf1351SSteve Wise 	return 0;
62829cf1351SSteve Wise 
629c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
63029cf1351SSteve Wise }
63129cf1351SSteve Wise 
632e5c9469eSLeon Romanovsky static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
633e5c9469eSLeon Romanovsky 			  struct netlink_ext_ack *extack)
634e5c9469eSLeon Romanovsky {
635e5c9469eSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
636e5c9469eSLeon Romanovsky 	struct ib_device *device;
637e5c9469eSLeon Romanovsky 	struct sk_buff *msg;
638e5c9469eSLeon Romanovsky 	u32 index;
639e5c9469eSLeon Romanovsky 	int err;
640e5c9469eSLeon Romanovsky 
6418cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
642e5c9469eSLeon Romanovsky 				     nldev_policy, extack);
643e5c9469eSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
644e5c9469eSLeon Romanovsky 		return -EINVAL;
645e5c9469eSLeon Romanovsky 
646e5c9469eSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
647e5c9469eSLeon Romanovsky 
64837eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
649e5c9469eSLeon Romanovsky 	if (!device)
650e5c9469eSLeon Romanovsky 		return -EINVAL;
651e5c9469eSLeon Romanovsky 
652e5c9469eSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
653f8978bd9SLeon Romanovsky 	if (!msg) {
654f8978bd9SLeon Romanovsky 		err = -ENOMEM;
655f8978bd9SLeon Romanovsky 		goto err;
656f8978bd9SLeon Romanovsky 	}
657e5c9469eSLeon Romanovsky 
658e5c9469eSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
659e5c9469eSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
660e5c9469eSLeon Romanovsky 			0, 0);
661e5c9469eSLeon Romanovsky 
662e5c9469eSLeon Romanovsky 	err = fill_dev_info(msg, device);
663f8978bd9SLeon Romanovsky 	if (err)
664f8978bd9SLeon Romanovsky 		goto err_free;
665e5c9469eSLeon Romanovsky 
666e5c9469eSLeon Romanovsky 	nlmsg_end(msg, nlh);
667e5c9469eSLeon Romanovsky 
66801b67117SParav Pandit 	ib_device_put(device);
669e5c9469eSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
670f8978bd9SLeon Romanovsky 
671f8978bd9SLeon Romanovsky err_free:
672f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
673f8978bd9SLeon Romanovsky err:
67401b67117SParav Pandit 	ib_device_put(device);
675f8978bd9SLeon Romanovsky 	return err;
676e5c9469eSLeon Romanovsky }
677e5c9469eSLeon Romanovsky 
67805d940d3SLeon Romanovsky static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
67905d940d3SLeon Romanovsky 			  struct netlink_ext_ack *extack)
68005d940d3SLeon Romanovsky {
68105d940d3SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
68205d940d3SLeon Romanovsky 	struct ib_device *device;
68305d940d3SLeon Romanovsky 	u32 index;
68405d940d3SLeon Romanovsky 	int err;
68505d940d3SLeon Romanovsky 
6868cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
6878cb08174SJohannes Berg 				     nldev_policy, extack);
68805d940d3SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
68905d940d3SLeon Romanovsky 		return -EINVAL;
69005d940d3SLeon Romanovsky 
69105d940d3SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
69237eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
69305d940d3SLeon Romanovsky 	if (!device)
69405d940d3SLeon Romanovsky 		return -EINVAL;
69505d940d3SLeon Romanovsky 
69605d940d3SLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
69705d940d3SLeon Romanovsky 		char name[IB_DEVICE_NAME_MAX] = {};
69805d940d3SLeon Romanovsky 
69905d940d3SLeon Romanovsky 		nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
70005d940d3SLeon Romanovsky 			    IB_DEVICE_NAME_MAX);
70105d940d3SLeon Romanovsky 		err = ib_device_rename(device, name);
7022e5b8a01SParav Pandit 		goto done;
70305d940d3SLeon Romanovsky 	}
70405d940d3SLeon Romanovsky 
7052e5b8a01SParav Pandit 	if (tb[RDMA_NLDEV_NET_NS_FD]) {
7062e5b8a01SParav Pandit 		u32 ns_fd;
7072e5b8a01SParav Pandit 
7082e5b8a01SParav Pandit 		ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
7092e5b8a01SParav Pandit 		err = ib_device_set_netns_put(skb, device, ns_fd);
7102e5b8a01SParav Pandit 		goto put_done;
7112e5b8a01SParav Pandit 	}
7122e5b8a01SParav Pandit 
7132e5b8a01SParav Pandit done:
71401b67117SParav Pandit 	ib_device_put(device);
7152e5b8a01SParav Pandit put_done:
71605d940d3SLeon Romanovsky 	return err;
71705d940d3SLeon Romanovsky }
71805d940d3SLeon Romanovsky 
719b4c598a6SLeon Romanovsky static int _nldev_get_dumpit(struct ib_device *device,
720b4c598a6SLeon Romanovsky 			     struct sk_buff *skb,
721b4c598a6SLeon Romanovsky 			     struct netlink_callback *cb,
722b4c598a6SLeon Romanovsky 			     unsigned int idx)
723b4c598a6SLeon Romanovsky {
724b4c598a6SLeon Romanovsky 	int start = cb->args[0];
725b4c598a6SLeon Romanovsky 	struct nlmsghdr *nlh;
726b4c598a6SLeon Romanovsky 
727b4c598a6SLeon Romanovsky 	if (idx < start)
728b4c598a6SLeon Romanovsky 		return 0;
729b4c598a6SLeon Romanovsky 
730b4c598a6SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
731b4c598a6SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
732b4c598a6SLeon Romanovsky 			0, NLM_F_MULTI);
733b4c598a6SLeon Romanovsky 
734b4c598a6SLeon Romanovsky 	if (fill_dev_info(skb, device)) {
735b4c598a6SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
736b4c598a6SLeon Romanovsky 		goto out;
737b4c598a6SLeon Romanovsky 	}
738b4c598a6SLeon Romanovsky 
739b4c598a6SLeon Romanovsky 	nlmsg_end(skb, nlh);
740b4c598a6SLeon Romanovsky 
741b4c598a6SLeon Romanovsky 	idx++;
742b4c598a6SLeon Romanovsky 
743b4c598a6SLeon Romanovsky out:	cb->args[0] = idx;
744b4c598a6SLeon Romanovsky 	return skb->len;
745b4c598a6SLeon Romanovsky }
746b4c598a6SLeon Romanovsky 
747b4c598a6SLeon Romanovsky static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
748b4c598a6SLeon Romanovsky {
749b4c598a6SLeon Romanovsky 	/*
750b4c598a6SLeon Romanovsky 	 * There is no need to take lock, because
75137eeab55SParav Pandit 	 * we are relying on ib_core's locking.
752b4c598a6SLeon Romanovsky 	 */
753b4c598a6SLeon Romanovsky 	return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
754b4c598a6SLeon Romanovsky }
755b4c598a6SLeon Romanovsky 
756c3f66f7bSLeon Romanovsky static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
757c3f66f7bSLeon Romanovsky 			       struct netlink_ext_ack *extack)
758c3f66f7bSLeon Romanovsky {
759c3f66f7bSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
760c3f66f7bSLeon Romanovsky 	struct ib_device *device;
761c3f66f7bSLeon Romanovsky 	struct sk_buff *msg;
762c3f66f7bSLeon Romanovsky 	u32 index;
763c3f66f7bSLeon Romanovsky 	u32 port;
764c3f66f7bSLeon Romanovsky 	int err;
765c3f66f7bSLeon Romanovsky 
7668cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
767c3f66f7bSLeon Romanovsky 				     nldev_policy, extack);
768287683d0SLeon Romanovsky 	if (err ||
769287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
770287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
771c3f66f7bSLeon Romanovsky 		return -EINVAL;
772c3f66f7bSLeon Romanovsky 
773c3f66f7bSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
77437eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
775c3f66f7bSLeon Romanovsky 	if (!device)
776c3f66f7bSLeon Romanovsky 		return -EINVAL;
777c3f66f7bSLeon Romanovsky 
778c3f66f7bSLeon Romanovsky 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
779f8978bd9SLeon Romanovsky 	if (!rdma_is_port_valid(device, port)) {
780f8978bd9SLeon Romanovsky 		err = -EINVAL;
781f8978bd9SLeon Romanovsky 		goto err;
782f8978bd9SLeon Romanovsky 	}
783c3f66f7bSLeon Romanovsky 
784c3f66f7bSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
785f8978bd9SLeon Romanovsky 	if (!msg) {
786f8978bd9SLeon Romanovsky 		err = -ENOMEM;
787f8978bd9SLeon Romanovsky 		goto err;
788f8978bd9SLeon Romanovsky 	}
789c3f66f7bSLeon Romanovsky 
790c3f66f7bSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
791c3f66f7bSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
792c3f66f7bSLeon Romanovsky 			0, 0);
793c3f66f7bSLeon Romanovsky 
7945b2cc79dSLeon Romanovsky 	err = fill_port_info(msg, device, port, sock_net(skb->sk));
795f8978bd9SLeon Romanovsky 	if (err)
796f8978bd9SLeon Romanovsky 		goto err_free;
797c3f66f7bSLeon Romanovsky 
798c3f66f7bSLeon Romanovsky 	nlmsg_end(msg, nlh);
79901b67117SParav Pandit 	ib_device_put(device);
800c3f66f7bSLeon Romanovsky 
801c3f66f7bSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
802f8978bd9SLeon Romanovsky 
803f8978bd9SLeon Romanovsky err_free:
804f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
805f8978bd9SLeon Romanovsky err:
80601b67117SParav Pandit 	ib_device_put(device);
807f8978bd9SLeon Romanovsky 	return err;
808c3f66f7bSLeon Romanovsky }
809c3f66f7bSLeon Romanovsky 
8107d02f605SLeon Romanovsky static int nldev_port_get_dumpit(struct sk_buff *skb,
8117d02f605SLeon Romanovsky 				 struct netlink_callback *cb)
8127d02f605SLeon Romanovsky {
8137d02f605SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
8147d02f605SLeon Romanovsky 	struct ib_device *device;
8157d02f605SLeon Romanovsky 	int start = cb->args[0];
8167d02f605SLeon Romanovsky 	struct nlmsghdr *nlh;
8177d02f605SLeon Romanovsky 	u32 idx = 0;
8187d02f605SLeon Romanovsky 	u32 ifindex;
8197d02f605SLeon Romanovsky 	int err;
820ea1075edSJason Gunthorpe 	unsigned int p;
8217d02f605SLeon Romanovsky 
8228cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
8237d02f605SLeon Romanovsky 				     nldev_policy, NULL);
8247d02f605SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
8257d02f605SLeon Romanovsky 		return -EINVAL;
8267d02f605SLeon Romanovsky 
8277d02f605SLeon Romanovsky 	ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
82837eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
8297d02f605SLeon Romanovsky 	if (!device)
8307d02f605SLeon Romanovsky 		return -EINVAL;
8317d02f605SLeon Romanovsky 
832ea1075edSJason Gunthorpe 	rdma_for_each_port (device, p) {
8337d02f605SLeon Romanovsky 		/*
8347d02f605SLeon Romanovsky 		 * The dumpit function returns all information from specific
8357d02f605SLeon Romanovsky 		 * index. This specific index is taken from the netlink
8367d02f605SLeon Romanovsky 		 * messages request sent by user and it is available
8377d02f605SLeon Romanovsky 		 * in cb->args[0].
8387d02f605SLeon Romanovsky 		 *
8397d02f605SLeon Romanovsky 		 * Usually, the user doesn't fill this field and it causes
8407d02f605SLeon Romanovsky 		 * to return everything.
8417d02f605SLeon Romanovsky 		 *
8427d02f605SLeon Romanovsky 		 */
8437d02f605SLeon Romanovsky 		if (idx < start) {
8447d02f605SLeon Romanovsky 			idx++;
8457d02f605SLeon Romanovsky 			continue;
8467d02f605SLeon Romanovsky 		}
8477d02f605SLeon Romanovsky 
8487d02f605SLeon Romanovsky 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
8497d02f605SLeon Romanovsky 				cb->nlh->nlmsg_seq,
8507d02f605SLeon Romanovsky 				RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
8517d02f605SLeon Romanovsky 						 RDMA_NLDEV_CMD_PORT_GET),
8527d02f605SLeon Romanovsky 				0, NLM_F_MULTI);
8537d02f605SLeon Romanovsky 
8545b2cc79dSLeon Romanovsky 		if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
8557d02f605SLeon Romanovsky 			nlmsg_cancel(skb, nlh);
8567d02f605SLeon Romanovsky 			goto out;
8577d02f605SLeon Romanovsky 		}
8587d02f605SLeon Romanovsky 		idx++;
8597d02f605SLeon Romanovsky 		nlmsg_end(skb, nlh);
8607d02f605SLeon Romanovsky 	}
8617d02f605SLeon Romanovsky 
862f8978bd9SLeon Romanovsky out:
86301b67117SParav Pandit 	ib_device_put(device);
864f8978bd9SLeon Romanovsky 	cb->args[0] = idx;
8657d02f605SLeon Romanovsky 	return skb->len;
8667d02f605SLeon Romanovsky }
8677d02f605SLeon Romanovsky 
868bf3c5a93SLeon Romanovsky static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
869bf3c5a93SLeon Romanovsky 			      struct netlink_ext_ack *extack)
870bf3c5a93SLeon Romanovsky {
871bf3c5a93SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
872bf3c5a93SLeon Romanovsky 	struct ib_device *device;
873bf3c5a93SLeon Romanovsky 	struct sk_buff *msg;
874bf3c5a93SLeon Romanovsky 	u32 index;
875bf3c5a93SLeon Romanovsky 	int ret;
876bf3c5a93SLeon Romanovsky 
8778cb08174SJohannes Berg 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
878bf3c5a93SLeon Romanovsky 				     nldev_policy, extack);
879bf3c5a93SLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
880bf3c5a93SLeon Romanovsky 		return -EINVAL;
881bf3c5a93SLeon Romanovsky 
882bf3c5a93SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
88337eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
884bf3c5a93SLeon Romanovsky 	if (!device)
885bf3c5a93SLeon Romanovsky 		return -EINVAL;
886bf3c5a93SLeon Romanovsky 
887bf3c5a93SLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
888f34727a1SDan Carpenter 	if (!msg) {
889f34727a1SDan Carpenter 		ret = -ENOMEM;
890bf3c5a93SLeon Romanovsky 		goto err;
891f34727a1SDan Carpenter 	}
892bf3c5a93SLeon Romanovsky 
893bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
894bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
895bf3c5a93SLeon Romanovsky 			0, 0);
896bf3c5a93SLeon Romanovsky 
897bf3c5a93SLeon Romanovsky 	ret = fill_res_info(msg, device);
898bf3c5a93SLeon Romanovsky 	if (ret)
899bf3c5a93SLeon Romanovsky 		goto err_free;
900bf3c5a93SLeon Romanovsky 
901bf3c5a93SLeon Romanovsky 	nlmsg_end(msg, nlh);
90201b67117SParav Pandit 	ib_device_put(device);
903bf3c5a93SLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
904bf3c5a93SLeon Romanovsky 
905bf3c5a93SLeon Romanovsky err_free:
906bf3c5a93SLeon Romanovsky 	nlmsg_free(msg);
907bf3c5a93SLeon Romanovsky err:
90801b67117SParav Pandit 	ib_device_put(device);
909bf3c5a93SLeon Romanovsky 	return ret;
910bf3c5a93SLeon Romanovsky }
911bf3c5a93SLeon Romanovsky 
912bf3c5a93SLeon Romanovsky static int _nldev_res_get_dumpit(struct ib_device *device,
913bf3c5a93SLeon Romanovsky 				 struct sk_buff *skb,
914bf3c5a93SLeon Romanovsky 				 struct netlink_callback *cb,
915bf3c5a93SLeon Romanovsky 				 unsigned int idx)
916bf3c5a93SLeon Romanovsky {
917bf3c5a93SLeon Romanovsky 	int start = cb->args[0];
918bf3c5a93SLeon Romanovsky 	struct nlmsghdr *nlh;
919bf3c5a93SLeon Romanovsky 
920bf3c5a93SLeon Romanovsky 	if (idx < start)
921bf3c5a93SLeon Romanovsky 		return 0;
922bf3c5a93SLeon Romanovsky 
923bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
924bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
925bf3c5a93SLeon Romanovsky 			0, NLM_F_MULTI);
926bf3c5a93SLeon Romanovsky 
927bf3c5a93SLeon Romanovsky 	if (fill_res_info(skb, device)) {
928bf3c5a93SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
929bf3c5a93SLeon Romanovsky 		goto out;
930bf3c5a93SLeon Romanovsky 	}
931bf3c5a93SLeon Romanovsky 	nlmsg_end(skb, nlh);
932bf3c5a93SLeon Romanovsky 
933bf3c5a93SLeon Romanovsky 	idx++;
934bf3c5a93SLeon Romanovsky 
935bf3c5a93SLeon Romanovsky out:
936bf3c5a93SLeon Romanovsky 	cb->args[0] = idx;
937bf3c5a93SLeon Romanovsky 	return skb->len;
938bf3c5a93SLeon Romanovsky }
939bf3c5a93SLeon Romanovsky 
940bf3c5a93SLeon Romanovsky static int nldev_res_get_dumpit(struct sk_buff *skb,
941bf3c5a93SLeon Romanovsky 				struct netlink_callback *cb)
942bf3c5a93SLeon Romanovsky {
943bf3c5a93SLeon Romanovsky 	return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
944bf3c5a93SLeon Romanovsky }
945bf3c5a93SLeon Romanovsky 
946d12ff624SSteve Wise struct nldev_fill_res_entry {
947659067b0SLeon Romanovsky 	int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin,
948d12ff624SSteve Wise 			     struct rdma_restrack_entry *res, u32 port);
949d12ff624SSteve Wise 	enum rdma_nldev_attr nldev_attr;
950d12ff624SSteve Wise 	enum rdma_nldev_command nldev_cmd;
951c5dfe0eaSLeon Romanovsky 	u8 flags;
952c5dfe0eaSLeon Romanovsky 	u32 entry;
953c5dfe0eaSLeon Romanovsky 	u32 id;
954c5dfe0eaSLeon Romanovsky };
955c5dfe0eaSLeon Romanovsky 
956c5dfe0eaSLeon Romanovsky enum nldev_res_flags {
957c5dfe0eaSLeon Romanovsky 	NLDEV_PER_DEV = 1 << 0,
958d12ff624SSteve Wise };
959d12ff624SSteve Wise 
960d12ff624SSteve Wise static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
961d12ff624SSteve Wise 	[RDMA_RESTRACK_QP] = {
962d12ff624SSteve Wise 		.fill_res_func = fill_res_qp_entry,
963d12ff624SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
964d12ff624SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
965c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
9661b8b7788SLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_LQPN,
967d12ff624SSteve Wise 	},
96800313983SSteve Wise 	[RDMA_RESTRACK_CM_ID] = {
96900313983SSteve Wise 		.fill_res_func = fill_res_cm_id_entry,
97000313983SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
97100313983SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
972c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
973517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_CM_IDN,
97400313983SSteve Wise 	},
975a34fc089SSteve Wise 	[RDMA_RESTRACK_CQ] = {
976a34fc089SSteve Wise 		.fill_res_func = fill_res_cq_entry,
977a34fc089SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
978a34fc089SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
979c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
980c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
981517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_CQN,
982a34fc089SSteve Wise 	},
983fccec5b8SSteve Wise 	[RDMA_RESTRACK_MR] = {
984fccec5b8SSteve Wise 		.fill_res_func = fill_res_mr_entry,
985fccec5b8SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
986fccec5b8SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
987c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
988c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
989517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_MRN,
990fccec5b8SSteve Wise 	},
99129cf1351SSteve Wise 	[RDMA_RESTRACK_PD] = {
99229cf1351SSteve Wise 		.fill_res_func = fill_res_pd_entry,
99329cf1351SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
99429cf1351SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
995c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
996c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
997517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_PDN,
99829cf1351SSteve Wise 	},
999d12ff624SSteve Wise };
1000d12ff624SSteve Wise 
10018be565e6SLeon Romanovsky static bool is_visible_in_pid_ns(struct rdma_restrack_entry *res)
10028be565e6SLeon Romanovsky {
10038be565e6SLeon Romanovsky 	/*
10048be565e6SLeon Romanovsky 	 * 1. Kern resources should be visible in init name space only
10058be565e6SLeon Romanovsky 	 * 2. Present only resources visible in the current namespace
10068be565e6SLeon Romanovsky 	 */
10078be565e6SLeon Romanovsky 	if (rdma_is_kernel_res(res))
10088be565e6SLeon Romanovsky 		return task_active_pid_ns(current) == &init_pid_ns;
10098be565e6SLeon Romanovsky 	return task_active_pid_ns(current) == task_active_pid_ns(res->task);
10108be565e6SLeon Romanovsky }
10118be565e6SLeon Romanovsky 
1012c5dfe0eaSLeon Romanovsky static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1013c5dfe0eaSLeon Romanovsky 			       struct netlink_ext_ack *extack,
1014c5dfe0eaSLeon Romanovsky 			       enum rdma_restrack_type res_type)
1015c5dfe0eaSLeon Romanovsky {
1016c5dfe0eaSLeon Romanovsky 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1017c5dfe0eaSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1018c5dfe0eaSLeon Romanovsky 	struct rdma_restrack_entry *res;
1019c5dfe0eaSLeon Romanovsky 	struct ib_device *device;
1020c5dfe0eaSLeon Romanovsky 	u32 index, id, port = 0;
1021c5dfe0eaSLeon Romanovsky 	bool has_cap_net_admin;
1022c5dfe0eaSLeon Romanovsky 	struct sk_buff *msg;
1023c5dfe0eaSLeon Romanovsky 	int ret;
1024c5dfe0eaSLeon Romanovsky 
10258cb08174SJohannes Berg 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1026c5dfe0eaSLeon Romanovsky 				     nldev_policy, extack);
1027c5dfe0eaSLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
1028c5dfe0eaSLeon Romanovsky 		return -EINVAL;
1029c5dfe0eaSLeon Romanovsky 
1030c5dfe0eaSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
103137eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1032c5dfe0eaSLeon Romanovsky 	if (!device)
1033c5dfe0eaSLeon Romanovsky 		return -EINVAL;
1034c5dfe0eaSLeon Romanovsky 
1035c5dfe0eaSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1036c5dfe0eaSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1037c5dfe0eaSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
1038c5dfe0eaSLeon Romanovsky 			ret = -EINVAL;
1039c5dfe0eaSLeon Romanovsky 			goto err;
1040c5dfe0eaSLeon Romanovsky 		}
1041c5dfe0eaSLeon Romanovsky 	}
1042c5dfe0eaSLeon Romanovsky 
1043c5dfe0eaSLeon Romanovsky 	if ((port && fe->flags & NLDEV_PER_DEV) ||
1044c5dfe0eaSLeon Romanovsky 	    (!port && ~fe->flags & NLDEV_PER_DEV)) {
1045c5dfe0eaSLeon Romanovsky 		ret = -EINVAL;
1046c5dfe0eaSLeon Romanovsky 		goto err;
1047c5dfe0eaSLeon Romanovsky 	}
1048c5dfe0eaSLeon Romanovsky 
1049c5dfe0eaSLeon Romanovsky 	id = nla_get_u32(tb[fe->id]);
1050c5dfe0eaSLeon Romanovsky 	res = rdma_restrack_get_byid(device, res_type, id);
1051c5dfe0eaSLeon Romanovsky 	if (IS_ERR(res)) {
1052c5dfe0eaSLeon Romanovsky 		ret = PTR_ERR(res);
1053c5dfe0eaSLeon Romanovsky 		goto err;
1054c5dfe0eaSLeon Romanovsky 	}
1055c5dfe0eaSLeon Romanovsky 
1056c5dfe0eaSLeon Romanovsky 	if (!is_visible_in_pid_ns(res)) {
1057c5dfe0eaSLeon Romanovsky 		ret = -ENOENT;
1058c5dfe0eaSLeon Romanovsky 		goto err_get;
1059c5dfe0eaSLeon Romanovsky 	}
1060c5dfe0eaSLeon Romanovsky 
1061c5dfe0eaSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1062c5dfe0eaSLeon Romanovsky 	if (!msg) {
1063c5dfe0eaSLeon Romanovsky 		ret = -ENOMEM;
1064c5dfe0eaSLeon Romanovsky 		goto err;
1065c5dfe0eaSLeon Romanovsky 	}
1066c5dfe0eaSLeon Romanovsky 
1067c5dfe0eaSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1068c5dfe0eaSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1069c5dfe0eaSLeon Romanovsky 			0, 0);
1070c5dfe0eaSLeon Romanovsky 
1071c5dfe0eaSLeon Romanovsky 	if (fill_nldev_handle(msg, device)) {
1072c5dfe0eaSLeon Romanovsky 		ret = -EMSGSIZE;
1073c5dfe0eaSLeon Romanovsky 		goto err_free;
1074c5dfe0eaSLeon Romanovsky 	}
1075c5dfe0eaSLeon Romanovsky 
1076c5dfe0eaSLeon Romanovsky 	has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1077c5dfe0eaSLeon Romanovsky 	ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
1078c5dfe0eaSLeon Romanovsky 	rdma_restrack_put(res);
1079c5dfe0eaSLeon Romanovsky 	if (ret)
1080c5dfe0eaSLeon Romanovsky 		goto err_free;
1081c5dfe0eaSLeon Romanovsky 
1082c5dfe0eaSLeon Romanovsky 	nlmsg_end(msg, nlh);
1083c5dfe0eaSLeon Romanovsky 	ib_device_put(device);
1084c5dfe0eaSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1085c5dfe0eaSLeon Romanovsky 
1086c5dfe0eaSLeon Romanovsky err_free:
1087c5dfe0eaSLeon Romanovsky 	nlmsg_free(msg);
1088c5dfe0eaSLeon Romanovsky err_get:
1089c5dfe0eaSLeon Romanovsky 	rdma_restrack_put(res);
1090c5dfe0eaSLeon Romanovsky err:
1091c5dfe0eaSLeon Romanovsky 	ib_device_put(device);
1092c5dfe0eaSLeon Romanovsky 	return ret;
1093c5dfe0eaSLeon Romanovsky }
1094c5dfe0eaSLeon Romanovsky 
1095d12ff624SSteve Wise static int res_get_common_dumpit(struct sk_buff *skb,
1096d12ff624SSteve Wise 				 struct netlink_callback *cb,
1097d12ff624SSteve Wise 				 enum rdma_restrack_type res_type)
1098b5fa635aSLeon Romanovsky {
1099d12ff624SSteve Wise 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1100b5fa635aSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1101b5fa635aSLeon Romanovsky 	struct rdma_restrack_entry *res;
11027c77c6a9SLeon Romanovsky 	struct rdma_restrack_root *rt;
1103b5fa635aSLeon Romanovsky 	int err, ret = 0, idx = 0;
1104b5fa635aSLeon Romanovsky 	struct nlattr *table_attr;
1105c5dfe0eaSLeon Romanovsky 	struct nlattr *entry_attr;
1106b5fa635aSLeon Romanovsky 	struct ib_device *device;
1107b5fa635aSLeon Romanovsky 	int start = cb->args[0];
1108659067b0SLeon Romanovsky 	bool has_cap_net_admin;
1109b5fa635aSLeon Romanovsky 	struct nlmsghdr *nlh;
1110fd47c2f9SLeon Romanovsky 	unsigned long id;
1111b5fa635aSLeon Romanovsky 	u32 index, port = 0;
1112d12ff624SSteve Wise 	bool filled = false;
1113b5fa635aSLeon Romanovsky 
11148cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1115b5fa635aSLeon Romanovsky 				     nldev_policy, NULL);
1116b5fa635aSLeon Romanovsky 	/*
1117d12ff624SSteve Wise 	 * Right now, we are expecting the device index to get res information,
1118b5fa635aSLeon Romanovsky 	 * but it is possible to extend this code to return all devices in
1119b5fa635aSLeon Romanovsky 	 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
1120b5fa635aSLeon Romanovsky 	 * if it doesn't exist, we will iterate over all devices.
1121b5fa635aSLeon Romanovsky 	 *
1122b5fa635aSLeon Romanovsky 	 * But it is not needed for now.
1123b5fa635aSLeon Romanovsky 	 */
1124b5fa635aSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1125b5fa635aSLeon Romanovsky 		return -EINVAL;
1126b5fa635aSLeon Romanovsky 
1127b5fa635aSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
112837eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1129b5fa635aSLeon Romanovsky 	if (!device)
1130b5fa635aSLeon Romanovsky 		return -EINVAL;
1131b5fa635aSLeon Romanovsky 
1132b5fa635aSLeon Romanovsky 	/*
1133b5fa635aSLeon Romanovsky 	 * If no PORT_INDEX is supplied, we will return all QPs from that device
1134b5fa635aSLeon Romanovsky 	 */
1135b5fa635aSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1136b5fa635aSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1137b5fa635aSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
1138b5fa635aSLeon Romanovsky 			ret = -EINVAL;
1139b5fa635aSLeon Romanovsky 			goto err_index;
1140b5fa635aSLeon Romanovsky 		}
1141b5fa635aSLeon Romanovsky 	}
1142b5fa635aSLeon Romanovsky 
1143b5fa635aSLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1144d12ff624SSteve Wise 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1145b5fa635aSLeon Romanovsky 			0, NLM_F_MULTI);
1146b5fa635aSLeon Romanovsky 
1147b5fa635aSLeon Romanovsky 	if (fill_nldev_handle(skb, device)) {
1148b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
1149b5fa635aSLeon Romanovsky 		goto err;
1150b5fa635aSLeon Romanovsky 	}
1151b5fa635aSLeon Romanovsky 
1152ae0be8deSMichal Kubecek 	table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
1153b5fa635aSLeon Romanovsky 	if (!table_attr) {
1154b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
1155b5fa635aSLeon Romanovsky 		goto err;
1156b5fa635aSLeon Romanovsky 	}
1157b5fa635aSLeon Romanovsky 
1158659067b0SLeon Romanovsky 	has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
1159659067b0SLeon Romanovsky 
11607c77c6a9SLeon Romanovsky 	rt = &device->res[res_type];
11617c77c6a9SLeon Romanovsky 	xa_lock(&rt->xa);
1162fd47c2f9SLeon Romanovsky 	/*
1163fd47c2f9SLeon Romanovsky 	 * FIXME: if the skip ahead is something common this loop should
1164fd47c2f9SLeon Romanovsky 	 * use xas_for_each & xas_pause to optimize, we can have a lot of
1165fd47c2f9SLeon Romanovsky 	 * objects.
1166fd47c2f9SLeon Romanovsky 	 */
11677c77c6a9SLeon Romanovsky 	xa_for_each(&rt->xa, id, res) {
11688be565e6SLeon Romanovsky 		if (!is_visible_in_pid_ns(res))
1169f2a0e45fSLeon Romanovsky 			continue;
1170b5fa635aSLeon Romanovsky 
1171f2a0e45fSLeon Romanovsky 		if (idx < start || !rdma_restrack_get(res))
1172b5fa635aSLeon Romanovsky 			goto next;
1173b5fa635aSLeon Romanovsky 
11747c77c6a9SLeon Romanovsky 		xa_unlock(&rt->xa);
11757c77c6a9SLeon Romanovsky 
1176d12ff624SSteve Wise 		filled = true;
1177b5fa635aSLeon Romanovsky 
1178ae0be8deSMichal Kubecek 		entry_attr = nla_nest_start_noflag(skb, fe->entry);
1179c5dfe0eaSLeon Romanovsky 		if (!entry_attr) {
1180c5dfe0eaSLeon Romanovsky 			ret = -EMSGSIZE;
1181c5dfe0eaSLeon Romanovsky 			rdma_restrack_put(res);
11827c77c6a9SLeon Romanovsky 			goto msg_full;
1183c5dfe0eaSLeon Romanovsky 		}
1184c5dfe0eaSLeon Romanovsky 
1185659067b0SLeon Romanovsky 		ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
1186b5fa635aSLeon Romanovsky 		rdma_restrack_put(res);
1187b5fa635aSLeon Romanovsky 
11887c77c6a9SLeon Romanovsky 		if (ret) {
1189c5dfe0eaSLeon Romanovsky 			nla_nest_cancel(skb, entry_attr);
1190b5fa635aSLeon Romanovsky 			if (ret == -EMSGSIZE)
11917c77c6a9SLeon Romanovsky 				goto msg_full;
1192c5dfe0eaSLeon Romanovsky 			if (ret == -EAGAIN)
11937c77c6a9SLeon Romanovsky 				goto again;
1194b5fa635aSLeon Romanovsky 			goto res_err;
11957c77c6a9SLeon Romanovsky 		}
1196c5dfe0eaSLeon Romanovsky 		nla_nest_end(skb, entry_attr);
11977c77c6a9SLeon Romanovsky again:		xa_lock(&rt->xa);
1198b5fa635aSLeon Romanovsky next:		idx++;
1199b5fa635aSLeon Romanovsky 	}
12007c77c6a9SLeon Romanovsky 	xa_unlock(&rt->xa);
1201b5fa635aSLeon Romanovsky 
12027c77c6a9SLeon Romanovsky msg_full:
1203b5fa635aSLeon Romanovsky 	nla_nest_end(skb, table_attr);
1204b5fa635aSLeon Romanovsky 	nlmsg_end(skb, nlh);
1205b5fa635aSLeon Romanovsky 	cb->args[0] = idx;
1206b5fa635aSLeon Romanovsky 
1207b5fa635aSLeon Romanovsky 	/*
1208d12ff624SSteve Wise 	 * No more entries to fill, cancel the message and
1209b5fa635aSLeon Romanovsky 	 * return 0 to mark end of dumpit.
1210b5fa635aSLeon Romanovsky 	 */
1211d12ff624SSteve Wise 	if (!filled)
1212b5fa635aSLeon Romanovsky 		goto err;
1213b5fa635aSLeon Romanovsky 
121401b67117SParav Pandit 	ib_device_put(device);
1215b5fa635aSLeon Romanovsky 	return skb->len;
1216b5fa635aSLeon Romanovsky 
1217b5fa635aSLeon Romanovsky res_err:
1218b5fa635aSLeon Romanovsky 	nla_nest_cancel(skb, table_attr);
1219b5fa635aSLeon Romanovsky 
1220b5fa635aSLeon Romanovsky err:
1221b5fa635aSLeon Romanovsky 	nlmsg_cancel(skb, nlh);
1222b5fa635aSLeon Romanovsky 
1223b5fa635aSLeon Romanovsky err_index:
122401b67117SParav Pandit 	ib_device_put(device);
1225b5fa635aSLeon Romanovsky 	return ret;
1226b5fa635aSLeon Romanovsky }
1227b5fa635aSLeon Romanovsky 
1228f732e713SLeon Romanovsky #define RES_GET_FUNCS(name, type)                                              \
1229f732e713SLeon Romanovsky 	static int nldev_res_get_##name##_dumpit(struct sk_buff *skb,          \
1230f732e713SLeon Romanovsky 						 struct netlink_callback *cb)  \
1231f732e713SLeon Romanovsky 	{                                                                      \
1232f732e713SLeon Romanovsky 		return res_get_common_dumpit(skb, cb, type);                   \
1233c5dfe0eaSLeon Romanovsky 	}                                                                      \
1234c5dfe0eaSLeon Romanovsky 	static int nldev_res_get_##name##_doit(struct sk_buff *skb,            \
1235c5dfe0eaSLeon Romanovsky 					       struct nlmsghdr *nlh,           \
1236c5dfe0eaSLeon Romanovsky 					       struct netlink_ext_ack *extack) \
1237c5dfe0eaSLeon Romanovsky 	{                                                                      \
1238c5dfe0eaSLeon Romanovsky 		return res_get_common_doit(skb, nlh, extack, type);            \
1239d12ff624SSteve Wise 	}
1240d12ff624SSteve Wise 
1241f732e713SLeon Romanovsky RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
1242f732e713SLeon Romanovsky RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
1243f732e713SLeon Romanovsky RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
1244f732e713SLeon Romanovsky RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
1245f732e713SLeon Romanovsky RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
124629cf1351SSteve Wise 
12473856ec4bSSteve Wise static LIST_HEAD(link_ops);
12483856ec4bSSteve Wise static DECLARE_RWSEM(link_ops_rwsem);
12493856ec4bSSteve Wise 
12503856ec4bSSteve Wise static const struct rdma_link_ops *link_ops_get(const char *type)
12513856ec4bSSteve Wise {
12523856ec4bSSteve Wise 	const struct rdma_link_ops *ops;
12533856ec4bSSteve Wise 
12543856ec4bSSteve Wise 	list_for_each_entry(ops, &link_ops, list) {
12553856ec4bSSteve Wise 		if (!strcmp(ops->type, type))
12563856ec4bSSteve Wise 			goto out;
12573856ec4bSSteve Wise 	}
12583856ec4bSSteve Wise 	ops = NULL;
12593856ec4bSSteve Wise out:
12603856ec4bSSteve Wise 	return ops;
12613856ec4bSSteve Wise }
12623856ec4bSSteve Wise 
12633856ec4bSSteve Wise void rdma_link_register(struct rdma_link_ops *ops)
12643856ec4bSSteve Wise {
12653856ec4bSSteve Wise 	down_write(&link_ops_rwsem);
1266afc1990eSDan Carpenter 	if (WARN_ON_ONCE(link_ops_get(ops->type)))
12673856ec4bSSteve Wise 		goto out;
12683856ec4bSSteve Wise 	list_add(&ops->list, &link_ops);
12693856ec4bSSteve Wise out:
12703856ec4bSSteve Wise 	up_write(&link_ops_rwsem);
12713856ec4bSSteve Wise }
12723856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_register);
12733856ec4bSSteve Wise 
12743856ec4bSSteve Wise void rdma_link_unregister(struct rdma_link_ops *ops)
12753856ec4bSSteve Wise {
12763856ec4bSSteve Wise 	down_write(&link_ops_rwsem);
12773856ec4bSSteve Wise 	list_del(&ops->list);
12783856ec4bSSteve Wise 	up_write(&link_ops_rwsem);
12793856ec4bSSteve Wise }
12803856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_unregister);
12813856ec4bSSteve Wise 
12823856ec4bSSteve Wise static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
12833856ec4bSSteve Wise 			  struct netlink_ext_ack *extack)
12843856ec4bSSteve Wise {
12853856ec4bSSteve Wise 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
12863856ec4bSSteve Wise 	char ibdev_name[IB_DEVICE_NAME_MAX];
12873856ec4bSSteve Wise 	const struct rdma_link_ops *ops;
12883856ec4bSSteve Wise 	char ndev_name[IFNAMSIZ];
12893856ec4bSSteve Wise 	struct net_device *ndev;
12903856ec4bSSteve Wise 	char type[IFNAMSIZ];
12913856ec4bSSteve Wise 	int err;
12923856ec4bSSteve Wise 
12938cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
12943856ec4bSSteve Wise 				     nldev_policy, extack);
12953856ec4bSSteve Wise 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
12963856ec4bSSteve Wise 	    !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
12973856ec4bSSteve Wise 		return -EINVAL;
12983856ec4bSSteve Wise 
12993856ec4bSSteve Wise 	nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
13003856ec4bSSteve Wise 		    sizeof(ibdev_name));
13013856ec4bSSteve Wise 	if (strchr(ibdev_name, '%'))
13023856ec4bSSteve Wise 		return -EINVAL;
13033856ec4bSSteve Wise 
13043856ec4bSSteve Wise 	nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
13053856ec4bSSteve Wise 	nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
13063856ec4bSSteve Wise 		    sizeof(ndev_name));
13073856ec4bSSteve Wise 
13083856ec4bSSteve Wise 	ndev = dev_get_by_name(&init_net, ndev_name);
13093856ec4bSSteve Wise 	if (!ndev)
13103856ec4bSSteve Wise 		return -ENODEV;
13113856ec4bSSteve Wise 
13123856ec4bSSteve Wise 	down_read(&link_ops_rwsem);
13133856ec4bSSteve Wise 	ops = link_ops_get(type);
13143856ec4bSSteve Wise #ifdef CONFIG_MODULES
13153856ec4bSSteve Wise 	if (!ops) {
13163856ec4bSSteve Wise 		up_read(&link_ops_rwsem);
13173856ec4bSSteve Wise 		request_module("rdma-link-%s", type);
13183856ec4bSSteve Wise 		down_read(&link_ops_rwsem);
13193856ec4bSSteve Wise 		ops = link_ops_get(type);
13203856ec4bSSteve Wise 	}
13213856ec4bSSteve Wise #endif
13223856ec4bSSteve Wise 	err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
13233856ec4bSSteve Wise 	up_read(&link_ops_rwsem);
13243856ec4bSSteve Wise 	dev_put(ndev);
13253856ec4bSSteve Wise 
13263856ec4bSSteve Wise 	return err;
13273856ec4bSSteve Wise }
13283856ec4bSSteve Wise 
13293856ec4bSSteve Wise static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
13303856ec4bSSteve Wise 			  struct netlink_ext_ack *extack)
13313856ec4bSSteve Wise {
13323856ec4bSSteve Wise 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
13333856ec4bSSteve Wise 	struct ib_device *device;
13343856ec4bSSteve Wise 	u32 index;
13353856ec4bSSteve Wise 	int err;
13363856ec4bSSteve Wise 
13378cb08174SJohannes Berg 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
13383856ec4bSSteve Wise 				     nldev_policy, extack);
13393856ec4bSSteve Wise 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
13403856ec4bSSteve Wise 		return -EINVAL;
13413856ec4bSSteve Wise 
13423856ec4bSSteve Wise 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
134337eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
13443856ec4bSSteve Wise 	if (!device)
13453856ec4bSSteve Wise 		return -EINVAL;
13463856ec4bSSteve Wise 
13473856ec4bSSteve Wise 	if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
13483856ec4bSSteve Wise 		ib_device_put(device);
13493856ec4bSSteve Wise 		return -EINVAL;
13503856ec4bSSteve Wise 	}
13513856ec4bSSteve Wise 
13523856ec4bSSteve Wise 	ib_unregister_device_and_put(device);
13533856ec4bSSteve Wise 	return 0;
13543856ec4bSSteve Wise }
13553856ec4bSSteve Wise 
13560e2d00ebSJason Gunthorpe static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
13570e2d00ebSJason Gunthorpe 			     struct netlink_ext_ack *extack)
13580e2d00ebSJason Gunthorpe {
13590e2d00ebSJason Gunthorpe 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
13600e2d00ebSJason Gunthorpe 	char client_name[IB_DEVICE_NAME_MAX];
13610e2d00ebSJason Gunthorpe 	struct ib_client_nl_info data = {};
13620e2d00ebSJason Gunthorpe 	struct ib_device *ibdev = NULL;
13630e2d00ebSJason Gunthorpe 	struct sk_buff *msg;
13640e2d00ebSJason Gunthorpe 	u32 index;
13650e2d00ebSJason Gunthorpe 	int err;
13660e2d00ebSJason Gunthorpe 
13670e2d00ebSJason Gunthorpe 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
13680e2d00ebSJason Gunthorpe 			  extack);
13690e2d00ebSJason Gunthorpe 	if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
13700e2d00ebSJason Gunthorpe 		return -EINVAL;
13710e2d00ebSJason Gunthorpe 
13720e2d00ebSJason Gunthorpe 	if (nla_strlcpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
13730e2d00ebSJason Gunthorpe 			sizeof(client_name)) >= sizeof(client_name))
13740e2d00ebSJason Gunthorpe 		return -EINVAL;
13750e2d00ebSJason Gunthorpe 
13760e2d00ebSJason Gunthorpe 	if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
13770e2d00ebSJason Gunthorpe 		index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
13780e2d00ebSJason Gunthorpe 		ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
13790e2d00ebSJason Gunthorpe 		if (!ibdev)
13800e2d00ebSJason Gunthorpe 			return -EINVAL;
13810e2d00ebSJason Gunthorpe 
13820e2d00ebSJason Gunthorpe 		if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
13830e2d00ebSJason Gunthorpe 			data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
13840e2d00ebSJason Gunthorpe 			if (!rdma_is_port_valid(ibdev, data.port)) {
13850e2d00ebSJason Gunthorpe 				err = -EINVAL;
13860e2d00ebSJason Gunthorpe 				goto out_put;
13870e2d00ebSJason Gunthorpe 			}
13880e2d00ebSJason Gunthorpe 		} else {
13890e2d00ebSJason Gunthorpe 			data.port = -1;
13900e2d00ebSJason Gunthorpe 		}
13910e2d00ebSJason Gunthorpe 	} else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
13920e2d00ebSJason Gunthorpe 		return -EINVAL;
13930e2d00ebSJason Gunthorpe 	}
13940e2d00ebSJason Gunthorpe 
13950e2d00ebSJason Gunthorpe 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
13960e2d00ebSJason Gunthorpe 	if (!msg) {
13970e2d00ebSJason Gunthorpe 		err = -ENOMEM;
13980e2d00ebSJason Gunthorpe 		goto out_put;
13990e2d00ebSJason Gunthorpe 	}
14000e2d00ebSJason Gunthorpe 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
14010e2d00ebSJason Gunthorpe 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
14020e2d00ebSJason Gunthorpe 					 RDMA_NLDEV_CMD_GET_CHARDEV),
14030e2d00ebSJason Gunthorpe 			0, 0);
14040e2d00ebSJason Gunthorpe 
14050e2d00ebSJason Gunthorpe 	data.nl_msg = msg;
14060e2d00ebSJason Gunthorpe 	err = ib_get_client_nl_info(ibdev, client_name, &data);
14070e2d00ebSJason Gunthorpe 	if (err)
14080e2d00ebSJason Gunthorpe 		goto out_nlmsg;
14090e2d00ebSJason Gunthorpe 
14100e2d00ebSJason Gunthorpe 	err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV,
14110e2d00ebSJason Gunthorpe 				huge_encode_dev(data.cdev->devt),
14120e2d00ebSJason Gunthorpe 				RDMA_NLDEV_ATTR_PAD);
14130e2d00ebSJason Gunthorpe 	if (err)
14140e2d00ebSJason Gunthorpe 		goto out_data;
14150e2d00ebSJason Gunthorpe 	err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi,
14160e2d00ebSJason Gunthorpe 				RDMA_NLDEV_ATTR_PAD);
14170e2d00ebSJason Gunthorpe 	if (err)
14180e2d00ebSJason Gunthorpe 		goto out_data;
14190e2d00ebSJason Gunthorpe 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME,
14200e2d00ebSJason Gunthorpe 			   dev_name(data.cdev))) {
14210e2d00ebSJason Gunthorpe 		err = -EMSGSIZE;
14220e2d00ebSJason Gunthorpe 		goto out_data;
14230e2d00ebSJason Gunthorpe 	}
14240e2d00ebSJason Gunthorpe 
14250e2d00ebSJason Gunthorpe 	nlmsg_end(msg, nlh);
14260e2d00ebSJason Gunthorpe 	put_device(data.cdev);
14270e2d00ebSJason Gunthorpe 	if (ibdev)
14280e2d00ebSJason Gunthorpe 		ib_device_put(ibdev);
14290e2d00ebSJason Gunthorpe 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
14300e2d00ebSJason Gunthorpe 
14310e2d00ebSJason Gunthorpe out_data:
14320e2d00ebSJason Gunthorpe 	put_device(data.cdev);
14330e2d00ebSJason Gunthorpe out_nlmsg:
14340e2d00ebSJason Gunthorpe 	nlmsg_free(msg);
14350e2d00ebSJason Gunthorpe out_put:
14360e2d00ebSJason Gunthorpe 	if (ibdev)
14370e2d00ebSJason Gunthorpe 		ib_device_put(ibdev);
14380e2d00ebSJason Gunthorpe 	return err;
14390e2d00ebSJason Gunthorpe }
14400e2d00ebSJason Gunthorpe 
14414d7ba8ceSParav Pandit static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
14424d7ba8ceSParav Pandit 			      struct netlink_ext_ack *extack)
1443cb7e0e13SParav Pandit {
1444cb7e0e13SParav Pandit 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
14454d7ba8ceSParav Pandit 	struct sk_buff *msg;
1446cb7e0e13SParav Pandit 	int err;
1447cb7e0e13SParav Pandit 
14484d7ba8ceSParav Pandit 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
14494d7ba8ceSParav Pandit 			  nldev_policy, extack);
1450cb7e0e13SParav Pandit 	if (err)
1451cb7e0e13SParav Pandit 		return err;
1452cb7e0e13SParav Pandit 
14534d7ba8ceSParav Pandit 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
14544d7ba8ceSParav Pandit 	if (!msg)
14554d7ba8ceSParav Pandit 		return -ENOMEM;
14564d7ba8ceSParav Pandit 
14574d7ba8ceSParav Pandit 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1458cb7e0e13SParav Pandit 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1459cb7e0e13SParav Pandit 					 RDMA_NLDEV_CMD_SYS_GET),
1460cb7e0e13SParav Pandit 			0, 0);
1461cb7e0e13SParav Pandit 
14624d7ba8ceSParav Pandit 	err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
1463cb7e0e13SParav Pandit 			 (u8)ib_devices_shared_netns);
1464cb7e0e13SParav Pandit 	if (err) {
14654d7ba8ceSParav Pandit 		nlmsg_free(msg);
1466cb7e0e13SParav Pandit 		return err;
1467cb7e0e13SParav Pandit 	}
14684d7ba8ceSParav Pandit 	nlmsg_end(msg, nlh);
14694d7ba8ceSParav Pandit 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1470cb7e0e13SParav Pandit }
1471cb7e0e13SParav Pandit 
14722b34c558SParav Pandit static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
14732b34c558SParav Pandit 				  struct netlink_ext_ack *extack)
14742b34c558SParav Pandit {
14752b34c558SParav Pandit 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
14762b34c558SParav Pandit 	u8 enable;
14772b34c558SParav Pandit 	int err;
14782b34c558SParav Pandit 
14792b34c558SParav Pandit 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
14802b34c558SParav Pandit 			  nldev_policy, extack);
14812b34c558SParav Pandit 	if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
14822b34c558SParav Pandit 		return -EINVAL;
14832b34c558SParav Pandit 
14842b34c558SParav Pandit 	enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
14852b34c558SParav Pandit 	/* Only 0 and 1 are supported */
14862b34c558SParav Pandit 	if (enable > 1)
14872b34c558SParav Pandit 		return -EINVAL;
14882b34c558SParav Pandit 
14892b34c558SParav Pandit 	err = rdma_compatdev_set(enable);
14902b34c558SParav Pandit 	return err;
14912b34c558SParav Pandit }
14922b34c558SParav Pandit 
1493d0e312feSLeon Romanovsky static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
1494b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_CMD_GET] = {
1495e5c9469eSLeon Romanovsky 		.doit = nldev_get_doit,
1496b4c598a6SLeon Romanovsky 		.dump = nldev_get_dumpit,
1497b4c598a6SLeon Romanovsky 	},
14980e2d00ebSJason Gunthorpe 	[RDMA_NLDEV_CMD_GET_CHARDEV] = {
14990e2d00ebSJason Gunthorpe 		.doit = nldev_get_chardev,
15000e2d00ebSJason Gunthorpe 	},
150105d940d3SLeon Romanovsky 	[RDMA_NLDEV_CMD_SET] = {
150205d940d3SLeon Romanovsky 		.doit = nldev_set_doit,
150305d940d3SLeon Romanovsky 		.flags = RDMA_NL_ADMIN_PERM,
150405d940d3SLeon Romanovsky 	},
15053856ec4bSSteve Wise 	[RDMA_NLDEV_CMD_NEWLINK] = {
15063856ec4bSSteve Wise 		.doit = nldev_newlink,
15073856ec4bSSteve Wise 		.flags = RDMA_NL_ADMIN_PERM,
15083856ec4bSSteve Wise 	},
15093856ec4bSSteve Wise 	[RDMA_NLDEV_CMD_DELLINK] = {
15103856ec4bSSteve Wise 		.doit = nldev_dellink,
15113856ec4bSSteve Wise 		.flags = RDMA_NL_ADMIN_PERM,
15123856ec4bSSteve Wise 	},
15137d02f605SLeon Romanovsky 	[RDMA_NLDEV_CMD_PORT_GET] = {
1514c3f66f7bSLeon Romanovsky 		.doit = nldev_port_get_doit,
15157d02f605SLeon Romanovsky 		.dump = nldev_port_get_dumpit,
15167d02f605SLeon Romanovsky 	},
1517bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_GET] = {
1518bf3c5a93SLeon Romanovsky 		.doit = nldev_res_get_doit,
1519bf3c5a93SLeon Romanovsky 		.dump = nldev_res_get_dumpit,
1520bf3c5a93SLeon Romanovsky 	},
1521b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_QP_GET] = {
1522c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_qp_doit,
1523b5fa635aSLeon Romanovsky 		.dump = nldev_res_get_qp_dumpit,
1524b5fa635aSLeon Romanovsky 	},
152500313983SSteve Wise 	[RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
1526c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_cm_id_doit,
152700313983SSteve Wise 		.dump = nldev_res_get_cm_id_dumpit,
152800313983SSteve Wise 	},
1529a34fc089SSteve Wise 	[RDMA_NLDEV_CMD_RES_CQ_GET] = {
1530c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_cq_doit,
1531a34fc089SSteve Wise 		.dump = nldev_res_get_cq_dumpit,
1532a34fc089SSteve Wise 	},
1533fccec5b8SSteve Wise 	[RDMA_NLDEV_CMD_RES_MR_GET] = {
1534c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_mr_doit,
1535fccec5b8SSteve Wise 		.dump = nldev_res_get_mr_dumpit,
1536fccec5b8SSteve Wise 	},
153729cf1351SSteve Wise 	[RDMA_NLDEV_CMD_RES_PD_GET] = {
1538c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_pd_doit,
153929cf1351SSteve Wise 		.dump = nldev_res_get_pd_dumpit,
154029cf1351SSteve Wise 	},
1541cb7e0e13SParav Pandit 	[RDMA_NLDEV_CMD_SYS_GET] = {
15424d7ba8ceSParav Pandit 		.doit = nldev_sys_get_doit,
1543cb7e0e13SParav Pandit 	},
15442b34c558SParav Pandit 	[RDMA_NLDEV_CMD_SYS_SET] = {
15452b34c558SParav Pandit 		.doit = nldev_set_sys_set_doit,
15462b34c558SParav Pandit 		.flags = RDMA_NL_ADMIN_PERM,
15472b34c558SParav Pandit 	},
1548b4c598a6SLeon Romanovsky };
1549b4c598a6SLeon Romanovsky 
15506c80b41aSLeon Romanovsky void __init nldev_init(void)
15516c80b41aSLeon Romanovsky {
1552b4c598a6SLeon Romanovsky 	rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
15536c80b41aSLeon Romanovsky }
15546c80b41aSLeon Romanovsky 
15556c80b41aSLeon Romanovsky void __exit nldev_exit(void)
15566c80b41aSLeon Romanovsky {
15576c80b41aSLeon Romanovsky 	rdma_nl_unregister(RDMA_NL_NLDEV);
15586c80b41aSLeon Romanovsky }
1559e3bf14bdSJason Gunthorpe 
1560e3bf14bdSJason Gunthorpe MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
1561