xref: /openbmc/linux/drivers/infiniband/core/nldev.c (revision 2b34c558)
16c80b41aSLeon Romanovsky /*
26c80b41aSLeon Romanovsky  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
36c80b41aSLeon Romanovsky  *
46c80b41aSLeon Romanovsky  * Redistribution and use in source and binary forms, with or without
56c80b41aSLeon Romanovsky  * modification, are permitted provided that the following conditions are met:
66c80b41aSLeon Romanovsky  *
76c80b41aSLeon Romanovsky  * 1. Redistributions of source code must retain the above copyright
86c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer.
96c80b41aSLeon Romanovsky  * 2. Redistributions in binary form must reproduce the above copyright
106c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer in the
116c80b41aSLeon Romanovsky  *    documentation and/or other materials provided with the distribution.
126c80b41aSLeon Romanovsky  * 3. Neither the names of the copyright holders nor the names of its
136c80b41aSLeon Romanovsky  *    contributors may be used to endorse or promote products derived from
146c80b41aSLeon Romanovsky  *    this software without specific prior written permission.
156c80b41aSLeon Romanovsky  *
166c80b41aSLeon Romanovsky  * Alternatively, this software may be distributed under the terms of the
176c80b41aSLeon Romanovsky  * GNU General Public License ("GPL") version 2 as published by the Free
186c80b41aSLeon Romanovsky  * Software Foundation.
196c80b41aSLeon Romanovsky  *
206c80b41aSLeon Romanovsky  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
216c80b41aSLeon Romanovsky  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
226c80b41aSLeon Romanovsky  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
236c80b41aSLeon Romanovsky  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
246c80b41aSLeon Romanovsky  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
256c80b41aSLeon Romanovsky  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
266c80b41aSLeon Romanovsky  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
276c80b41aSLeon Romanovsky  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
286c80b41aSLeon Romanovsky  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
296c80b41aSLeon Romanovsky  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
306c80b41aSLeon Romanovsky  * POSSIBILITY OF SUCH DAMAGE.
316c80b41aSLeon Romanovsky  */
326c80b41aSLeon Romanovsky 
33e3bf14bdSJason Gunthorpe #include <linux/module.h>
34bf3c5a93SLeon Romanovsky #include <linux/pid.h>
35bf3c5a93SLeon Romanovsky #include <linux/pid_namespace.h>
363856ec4bSSteve Wise #include <linux/mutex.h>
37b4c598a6SLeon Romanovsky #include <net/netlink.h>
3800313983SSteve Wise #include <rdma/rdma_cm.h>
396c80b41aSLeon Romanovsky #include <rdma/rdma_netlink.h>
406c80b41aSLeon Romanovsky 
416c80b41aSLeon Romanovsky #include "core_priv.h"
4200313983SSteve Wise #include "cma_priv.h"
4341eda65cSLeon Romanovsky #include "restrack.h"
446c80b41aSLeon Romanovsky 
45b4c598a6SLeon Romanovsky static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
46b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_INDEX]     = { .type = NLA_U32 },
47b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_NAME]	= { .type = NLA_NUL_STRING,
48b4c598a6SLeon Romanovsky 					    .len = IB_DEVICE_NAME_MAX - 1},
49b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_PORT_INDEX]	= { .type = NLA_U32 },
508621a7e3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_FW_VERSION]	= { .type = NLA_NUL_STRING,
518621a7e3SLeon Romanovsky 					    .len = IB_FW_VERSION_NAME_MAX - 1},
521aaff896SLeon Romanovsky 	[RDMA_NLDEV_ATTR_NODE_GUID]	= { .type = NLA_U64 },
531aaff896SLeon Romanovsky 	[RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
5412026fbbSLeon Romanovsky 	[RDMA_NLDEV_ATTR_SUBNET_PREFIX]	= { .type = NLA_U64 },
5580a06dd3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_LID]		= { .type = NLA_U32 },
5680a06dd3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_SM_LID]	= { .type = NLA_U32 },
5734840feaSLeon Romanovsky 	[RDMA_NLDEV_ATTR_LMC]		= { .type = NLA_U8 },
585654e49dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_PORT_STATE]	= { .type = NLA_U8 },
595654e49dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
601bb77b8cSLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
61bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SUMMARY]	= { .type = NLA_NESTED },
62bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY]	= { .type = NLA_NESTED },
63bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
64bf3c5a93SLeon Romanovsky 					     .len = 16 },
65bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
66b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_QP]		= { .type = NLA_NESTED },
67b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_QP_ENTRY]		= { .type = NLA_NESTED },
68b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_LQPN]		= { .type = NLA_U32 },
69b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_RQPN]		= { .type = NLA_U32 },
70b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_RQ_PSN]		= { .type = NLA_U32 },
71b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SQ_PSN]		= { .type = NLA_U32 },
72b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
73b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_TYPE]		= { .type = NLA_U8 },
74b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_STATE]		= { .type = NLA_U8 },
75b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_PID]		= { .type = NLA_U32 },
76b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_KERN_NAME]		= { .type = NLA_NUL_STRING,
77b5fa635aSLeon Romanovsky 						    .len = TASK_COMM_LEN },
7800313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CM_ID]		= { .type = NLA_NESTED },
7900313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY]	= { .type = NLA_NESTED },
8000313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_PS]		= { .type = NLA_U32 },
8100313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_SRC_ADDR]	= {
8200313983SSteve Wise 			.len = sizeof(struct __kernel_sockaddr_storage) },
8300313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_DST_ADDR]	= {
8400313983SSteve Wise 			.len = sizeof(struct __kernel_sockaddr_storage) },
85a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CQ]		= { .type = NLA_NESTED },
86a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CQ_ENTRY]		= { .type = NLA_NESTED },
87a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CQE]		= { .type = NLA_U32 },
88a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_USECNT]		= { .type = NLA_U64 },
89a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_POLL_CTX]		= { .type = NLA_U8 },
90fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_MR]		= { .type = NLA_NESTED },
91fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_MR_ENTRY]		= { .type = NLA_NESTED },
92fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_RKEY]		= { .type = NLA_U32 },
93fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_LKEY]		= { .type = NLA_U32 },
94fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_IOVA]		= { .type = NLA_U64 },
95fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_MRLEN]		= { .type = NLA_U64 },
9629cf1351SSteve Wise 	[RDMA_NLDEV_ATTR_RES_PD]		= { .type = NLA_NESTED },
9729cf1351SSteve Wise 	[RDMA_NLDEV_ATTR_RES_PD_ENTRY]		= { .type = NLA_NESTED },
9829cf1351SSteve Wise 	[RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY]	= { .type = NLA_U32 },
9929cf1351SSteve Wise 	[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY] = { .type = NLA_U32 },
1005b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_INDEX]		= { .type = NLA_U32 },
1015b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_NAME]		= { .type = NLA_NUL_STRING,
1025b2cc79dSLeon Romanovsky 						    .len = IFNAMSIZ },
103da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER]		= { .type = NLA_NESTED },
104da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_ENTRY]		= { .type = NLA_NESTED },
105da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_STRING]		= { .type = NLA_NUL_STRING,
106da5c8507SSteve Wise 				    .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
107da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE]	= { .type = NLA_U8 },
108da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_S32]		= { .type = NLA_S32 },
109da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_U32]		= { .type = NLA_U32 },
110da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_S64]		= { .type = NLA_S64 },
111da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_U64]		= { .type = NLA_U64 },
112517b773eSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_PDN]		= { .type = NLA_U32 },
113517b773eSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CQN]               = { .type = NLA_U32 },
114517b773eSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_MRN]               = { .type = NLA_U32 },
115517b773eSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CM_IDN]            = { .type = NLA_U32 },
116c3d02788SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_CTXN]              = { .type = NLA_U32 },
1173856ec4bSSteve Wise 	[RDMA_NLDEV_ATTR_LINK_TYPE]		= { .type = NLA_NUL_STRING,
1183856ec4bSSteve Wise 				    .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
119cb7e0e13SParav Pandit 	[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]	= { .type = NLA_U8 },
120b4c598a6SLeon Romanovsky };
121b4c598a6SLeon Romanovsky 
12273937e8aSSteve Wise static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
12373937e8aSSteve Wise 				      enum rdma_nldev_print_type print_type)
12473937e8aSSteve Wise {
12573937e8aSSteve Wise 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
12673937e8aSSteve Wise 		return -EMSGSIZE;
12773937e8aSSteve Wise 	if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
12873937e8aSSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
12973937e8aSSteve Wise 		return -EMSGSIZE;
13073937e8aSSteve Wise 
13173937e8aSSteve Wise 	return 0;
13273937e8aSSteve Wise }
13373937e8aSSteve Wise 
13473937e8aSSteve Wise static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
13573937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
13673937e8aSSteve Wise 				   u32 value)
13773937e8aSSteve Wise {
13873937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
13973937e8aSSteve Wise 		return -EMSGSIZE;
14073937e8aSSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
14173937e8aSSteve Wise 		return -EMSGSIZE;
14273937e8aSSteve Wise 
14373937e8aSSteve Wise 	return 0;
14473937e8aSSteve Wise }
14573937e8aSSteve Wise 
14673937e8aSSteve Wise static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
14773937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
14873937e8aSSteve Wise 				   u64 value)
14973937e8aSSteve Wise {
15073937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
15173937e8aSSteve Wise 		return -EMSGSIZE;
15273937e8aSSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
15373937e8aSSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
15473937e8aSSteve Wise 		return -EMSGSIZE;
15573937e8aSSteve Wise 
15673937e8aSSteve Wise 	return 0;
15773937e8aSSteve Wise }
15873937e8aSSteve Wise 
15973937e8aSSteve Wise int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
16073937e8aSSteve Wise {
16173937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
16273937e8aSSteve Wise 				       value);
16373937e8aSSteve Wise }
16473937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32);
16573937e8aSSteve Wise 
16673937e8aSSteve Wise int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
16773937e8aSSteve Wise 			       u32 value)
16873937e8aSSteve Wise {
16973937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
17073937e8aSSteve Wise 				       value);
17173937e8aSSteve Wise }
17273937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
17373937e8aSSteve Wise 
17473937e8aSSteve Wise int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
17573937e8aSSteve Wise {
17673937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
17773937e8aSSteve Wise 				       value);
17873937e8aSSteve Wise }
17973937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64);
18073937e8aSSteve Wise 
18173937e8aSSteve Wise int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
18273937e8aSSteve Wise {
18373937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
18473937e8aSSteve Wise 				       value);
18573937e8aSSteve Wise }
18673937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
18773937e8aSSteve Wise 
188c2409810SLeon Romanovsky static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
189b4c598a6SLeon Romanovsky {
190b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
191b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
192896de009SJason Gunthorpe 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
193896de009SJason Gunthorpe 			   dev_name(&device->dev)))
194b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
195c2409810SLeon Romanovsky 
196c2409810SLeon Romanovsky 	return 0;
197c2409810SLeon Romanovsky }
198c2409810SLeon Romanovsky 
199c2409810SLeon Romanovsky static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
200c2409810SLeon Romanovsky {
201c2409810SLeon Romanovsky 	char fw[IB_FW_VERSION_NAME_MAX];
202c2409810SLeon Romanovsky 
203c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
204c2409810SLeon Romanovsky 		return -EMSGSIZE;
205c2409810SLeon Romanovsky 
206b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
207b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
208ac505253SLeon Romanovsky 
209ac505253SLeon Romanovsky 	BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
210ac505253SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
21125a0ad85SSteve Wise 			      device->attrs.device_cap_flags,
21225a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
213ac505253SLeon Romanovsky 		return -EMSGSIZE;
214ac505253SLeon Romanovsky 
2158621a7e3SLeon Romanovsky 	ib_get_device_fw_str(device, fw);
2165b2cc79dSLeon Romanovsky 	/* Device without FW has strlen(fw) = 0 */
2178621a7e3SLeon Romanovsky 	if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
2188621a7e3SLeon Romanovsky 		return -EMSGSIZE;
2198621a7e3SLeon Romanovsky 
2201aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
22125a0ad85SSteve Wise 			      be64_to_cpu(device->node_guid),
22225a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2231aaff896SLeon Romanovsky 		return -EMSGSIZE;
2241aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
22525a0ad85SSteve Wise 			      be64_to_cpu(device->attrs.sys_image_guid),
22625a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2271aaff896SLeon Romanovsky 		return -EMSGSIZE;
2281bb77b8cSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
2291bb77b8cSLeon Romanovsky 		return -EMSGSIZE;
230b4c598a6SLeon Romanovsky 	return 0;
231b4c598a6SLeon Romanovsky }
232b4c598a6SLeon Romanovsky 
2337d02f605SLeon Romanovsky static int fill_port_info(struct sk_buff *msg,
2345b2cc79dSLeon Romanovsky 			  struct ib_device *device, u32 port,
2355b2cc79dSLeon Romanovsky 			  const struct net *net)
2367d02f605SLeon Romanovsky {
2375b2cc79dSLeon Romanovsky 	struct net_device *netdev = NULL;
238ac505253SLeon Romanovsky 	struct ib_port_attr attr;
239ac505253SLeon Romanovsky 	int ret;
2404fa2813dSMichael Guralnik 	u64 cap_flags = 0;
241ac505253SLeon Romanovsky 
242c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
2437d02f605SLeon Romanovsky 		return -EMSGSIZE;
244c2409810SLeon Romanovsky 
2457d02f605SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
2467d02f605SLeon Romanovsky 		return -EMSGSIZE;
247ac505253SLeon Romanovsky 
248ac505253SLeon Romanovsky 	ret = ib_query_port(device, port, &attr);
249ac505253SLeon Romanovsky 	if (ret)
250ac505253SLeon Romanovsky 		return ret;
251ac505253SLeon Romanovsky 
252dd8028f1SLeon Romanovsky 	if (rdma_protocol_ib(device, port)) {
2534fa2813dSMichael Guralnik 		BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
2544fa2813dSMichael Guralnik 				sizeof(attr.port_cap_flags2)) > sizeof(u64));
2554fa2813dSMichael Guralnik 		cap_flags = attr.port_cap_flags |
2564fa2813dSMichael Guralnik 			((u64)attr.port_cap_flags2 << 32);
257ac505253SLeon Romanovsky 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
2584fa2813dSMichael Guralnik 				      cap_flags, RDMA_NLDEV_ATTR_PAD))
259ac505253SLeon Romanovsky 			return -EMSGSIZE;
260dd8028f1SLeon Romanovsky 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
26125a0ad85SSteve Wise 				      attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
26212026fbbSLeon Romanovsky 			return -EMSGSIZE;
26380a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
26480a06dd3SLeon Romanovsky 			return -EMSGSIZE;
26580a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
26680a06dd3SLeon Romanovsky 			return -EMSGSIZE;
26734840feaSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
26834840feaSLeon Romanovsky 			return -EMSGSIZE;
26980a06dd3SLeon Romanovsky 	}
2705654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
2715654e49dSLeon Romanovsky 		return -EMSGSIZE;
2725654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
2735654e49dSLeon Romanovsky 		return -EMSGSIZE;
2745b2cc79dSLeon Romanovsky 
275c2261dd7SJason Gunthorpe 	netdev = ib_device_get_netdev(device, port);
2765b2cc79dSLeon Romanovsky 	if (netdev && net_eq(dev_net(netdev), net)) {
2775b2cc79dSLeon Romanovsky 		ret = nla_put_u32(msg,
2785b2cc79dSLeon Romanovsky 				  RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
2795b2cc79dSLeon Romanovsky 		if (ret)
2805b2cc79dSLeon Romanovsky 			goto out;
2815b2cc79dSLeon Romanovsky 		ret = nla_put_string(msg,
2825b2cc79dSLeon Romanovsky 				     RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
2835b2cc79dSLeon Romanovsky 	}
2845b2cc79dSLeon Romanovsky 
2855b2cc79dSLeon Romanovsky out:
2865b2cc79dSLeon Romanovsky 	if (netdev)
2875b2cc79dSLeon Romanovsky 		dev_put(netdev);
2885b2cc79dSLeon Romanovsky 	return ret;
2897d02f605SLeon Romanovsky }
2907d02f605SLeon Romanovsky 
291bf3c5a93SLeon Romanovsky static int fill_res_info_entry(struct sk_buff *msg,
292bf3c5a93SLeon Romanovsky 			       const char *name, u64 curr)
293bf3c5a93SLeon Romanovsky {
294bf3c5a93SLeon Romanovsky 	struct nlattr *entry_attr;
295bf3c5a93SLeon Romanovsky 
296bf3c5a93SLeon Romanovsky 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
297bf3c5a93SLeon Romanovsky 	if (!entry_attr)
298bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
299bf3c5a93SLeon Romanovsky 
300bf3c5a93SLeon Romanovsky 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
301bf3c5a93SLeon Romanovsky 		goto err;
30225a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
30325a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
304bf3c5a93SLeon Romanovsky 		goto err;
305bf3c5a93SLeon Romanovsky 
306bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, entry_attr);
307bf3c5a93SLeon Romanovsky 	return 0;
308bf3c5a93SLeon Romanovsky 
309bf3c5a93SLeon Romanovsky err:
310bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, entry_attr);
311bf3c5a93SLeon Romanovsky 	return -EMSGSIZE;
312bf3c5a93SLeon Romanovsky }
313bf3c5a93SLeon Romanovsky 
314bf3c5a93SLeon Romanovsky static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
315bf3c5a93SLeon Romanovsky {
316bf3c5a93SLeon Romanovsky 	static const char * const names[RDMA_RESTRACK_MAX] = {
317bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_PD] = "pd",
318bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_CQ] = "cq",
319bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_QP] = "qp",
32000313983SSteve Wise 		[RDMA_RESTRACK_CM_ID] = "cm_id",
321fccec5b8SSteve Wise 		[RDMA_RESTRACK_MR] = "mr",
322ffd321e4SLeon Romanovsky 		[RDMA_RESTRACK_CTX] = "ctx",
323bf3c5a93SLeon Romanovsky 	};
324bf3c5a93SLeon Romanovsky 
325bf3c5a93SLeon Romanovsky 	struct nlattr *table_attr;
326bf3c5a93SLeon Romanovsky 	int ret, i, curr;
327bf3c5a93SLeon Romanovsky 
328bf3c5a93SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
329bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
330bf3c5a93SLeon Romanovsky 
331bf3c5a93SLeon Romanovsky 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
332bf3c5a93SLeon Romanovsky 	if (!table_attr)
333bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
334bf3c5a93SLeon Romanovsky 
335bf3c5a93SLeon Romanovsky 	for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
336bf3c5a93SLeon Romanovsky 		if (!names[i])
337bf3c5a93SLeon Romanovsky 			continue;
3380ad699c0SLeon Romanovsky 		curr = rdma_restrack_count(device, i,
3390ad699c0SLeon Romanovsky 					   task_active_pid_ns(current));
340bf3c5a93SLeon Romanovsky 		ret = fill_res_info_entry(msg, names[i], curr);
341bf3c5a93SLeon Romanovsky 		if (ret)
342bf3c5a93SLeon Romanovsky 			goto err;
343bf3c5a93SLeon Romanovsky 	}
344bf3c5a93SLeon Romanovsky 
345bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, table_attr);
346bf3c5a93SLeon Romanovsky 	return 0;
347bf3c5a93SLeon Romanovsky 
348bf3c5a93SLeon Romanovsky err:
349bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, table_attr);
350bf3c5a93SLeon Romanovsky 	return ret;
351bf3c5a93SLeon Romanovsky }
352bf3c5a93SLeon Romanovsky 
35300313983SSteve Wise static int fill_res_name_pid(struct sk_buff *msg,
35400313983SSteve Wise 			     struct rdma_restrack_entry *res)
35500313983SSteve Wise {
35600313983SSteve Wise 	/*
35700313983SSteve Wise 	 * For user resources, user is should read /proc/PID/comm to get the
35800313983SSteve Wise 	 * name of the task file.
35900313983SSteve Wise 	 */
36000313983SSteve Wise 	if (rdma_is_kernel_res(res)) {
36100313983SSteve Wise 		if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
36200313983SSteve Wise 		    res->kern_name))
36300313983SSteve Wise 			return -EMSGSIZE;
36400313983SSteve Wise 	} else {
36500313983SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
36600313983SSteve Wise 		    task_pid_vnr(res->task)))
36700313983SSteve Wise 			return -EMSGSIZE;
36800313983SSteve Wise 	}
36900313983SSteve Wise 	return 0;
37000313983SSteve Wise }
37100313983SSteve Wise 
37202da3750SLeon Romanovsky static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
37302da3750SLeon Romanovsky 			   struct rdma_restrack_entry *res)
37402da3750SLeon Romanovsky {
37502da3750SLeon Romanovsky 	if (!dev->ops.fill_res_entry)
37602da3750SLeon Romanovsky 		return false;
37702da3750SLeon Romanovsky 	return dev->ops.fill_res_entry(msg, res);
37802da3750SLeon Romanovsky }
37902da3750SLeon Romanovsky 
380659067b0SLeon Romanovsky static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
381d12ff624SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
382b5fa635aSLeon Romanovsky {
383d12ff624SSteve Wise 	struct ib_qp *qp = container_of(res, struct ib_qp, res);
38402da3750SLeon Romanovsky 	struct ib_device *dev = qp->device;
385b5fa635aSLeon Romanovsky 	struct ib_qp_init_attr qp_init_attr;
386b5fa635aSLeon Romanovsky 	struct ib_qp_attr qp_attr;
387b5fa635aSLeon Romanovsky 	int ret;
388b5fa635aSLeon Romanovsky 
389b5fa635aSLeon Romanovsky 	ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
390b5fa635aSLeon Romanovsky 	if (ret)
391b5fa635aSLeon Romanovsky 		return ret;
392b5fa635aSLeon Romanovsky 
393b5fa635aSLeon Romanovsky 	if (port && port != qp_attr.port_num)
394c5dfe0eaSLeon Romanovsky 		return -EAGAIN;
395b5fa635aSLeon Romanovsky 
396b5fa635aSLeon Romanovsky 	/* In create_qp() port is not set yet */
397b5fa635aSLeon Romanovsky 	if (qp_attr.port_num &&
398b5fa635aSLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
399b5fa635aSLeon Romanovsky 		goto err;
400b5fa635aSLeon Romanovsky 
401b5fa635aSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
402b5fa635aSLeon Romanovsky 		goto err;
403b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
404b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
405b5fa635aSLeon Romanovsky 				qp_attr.dest_qp_num))
406b5fa635aSLeon Romanovsky 			goto err;
407b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
408b5fa635aSLeon Romanovsky 				qp_attr.rq_psn))
409b5fa635aSLeon Romanovsky 			goto err;
410b5fa635aSLeon Romanovsky 	}
411b5fa635aSLeon Romanovsky 
412b5fa635aSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
413b5fa635aSLeon Romanovsky 		goto err;
414b5fa635aSLeon Romanovsky 
415b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
416b5fa635aSLeon Romanovsky 	    qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
417b5fa635aSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
418b5fa635aSLeon Romanovsky 			       qp_attr.path_mig_state))
419b5fa635aSLeon Romanovsky 			goto err;
420b5fa635aSLeon Romanovsky 	}
421b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
422b5fa635aSLeon Romanovsky 		goto err;
423b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
424b5fa635aSLeon Romanovsky 		goto err;
425b5fa635aSLeon Romanovsky 
426c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
427c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
428c3d02788SLeon Romanovsky 		goto err;
429c3d02788SLeon Romanovsky 
43000313983SSteve Wise 	if (fill_res_name_pid(msg, res))
431b5fa635aSLeon Romanovsky 		goto err;
43200313983SSteve Wise 
43302da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
434da5c8507SSteve Wise 		goto err;
435da5c8507SSteve Wise 
43600313983SSteve Wise 	return 0;
43700313983SSteve Wise 
438c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
43900313983SSteve Wise }
44000313983SSteve Wise 
441659067b0SLeon Romanovsky static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
44200313983SSteve Wise 				struct rdma_restrack_entry *res, uint32_t port)
44300313983SSteve Wise {
44400313983SSteve Wise 	struct rdma_id_private *id_priv =
44500313983SSteve Wise 				container_of(res, struct rdma_id_private, res);
44602da3750SLeon Romanovsky 	struct ib_device *dev = id_priv->id.device;
44700313983SSteve Wise 	struct rdma_cm_id *cm_id = &id_priv->id;
44800313983SSteve Wise 
44900313983SSteve Wise 	if (port && port != cm_id->port_num)
45000313983SSteve Wise 		return 0;
45100313983SSteve Wise 
45200313983SSteve Wise 	if (cm_id->port_num &&
45300313983SSteve Wise 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
45400313983SSteve Wise 		goto err;
45500313983SSteve Wise 
45600313983SSteve Wise 	if (id_priv->qp_num) {
45700313983SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
45800313983SSteve Wise 			goto err;
45900313983SSteve Wise 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
460b5fa635aSLeon Romanovsky 			goto err;
461b5fa635aSLeon Romanovsky 	}
462b5fa635aSLeon Romanovsky 
46300313983SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
46400313983SSteve Wise 		goto err;
46500313983SSteve Wise 
46600313983SSteve Wise 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
46700313983SSteve Wise 		goto err;
46800313983SSteve Wise 
46900313983SSteve Wise 	if (cm_id->route.addr.src_addr.ss_family &&
47000313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
47100313983SSteve Wise 		    sizeof(cm_id->route.addr.src_addr),
47200313983SSteve Wise 		    &cm_id->route.addr.src_addr))
47300313983SSteve Wise 		goto err;
47400313983SSteve Wise 	if (cm_id->route.addr.dst_addr.ss_family &&
47500313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
47600313983SSteve Wise 		    sizeof(cm_id->route.addr.dst_addr),
47700313983SSteve Wise 		    &cm_id->route.addr.dst_addr))
47800313983SSteve Wise 		goto err;
47900313983SSteve Wise 
480517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
481517b773eSLeon Romanovsky 		goto err;
482517b773eSLeon Romanovsky 
48300313983SSteve Wise 	if (fill_res_name_pid(msg, res))
48400313983SSteve Wise 		goto err;
48500313983SSteve Wise 
48602da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
487da5c8507SSteve Wise 		goto err;
488da5c8507SSteve Wise 
489b5fa635aSLeon Romanovsky 	return 0;
490b5fa635aSLeon Romanovsky 
491c5dfe0eaSLeon Romanovsky err: return -EMSGSIZE;
492b5fa635aSLeon Romanovsky }
493b5fa635aSLeon Romanovsky 
494659067b0SLeon Romanovsky static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
495a34fc089SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
496a34fc089SSteve Wise {
497a34fc089SSteve Wise 	struct ib_cq *cq = container_of(res, struct ib_cq, res);
49802da3750SLeon Romanovsky 	struct ib_device *dev = cq->device;
499a34fc089SSteve Wise 
500a34fc089SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
501a34fc089SSteve Wise 		goto err;
502a34fc089SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
50325a0ad85SSteve Wise 			      atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
504a34fc089SSteve Wise 		goto err;
505a34fc089SSteve Wise 
506a34fc089SSteve Wise 	/* Poll context is only valid for kernel CQs */
507a34fc089SSteve Wise 	if (rdma_is_kernel_res(res) &&
508a34fc089SSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
509a34fc089SSteve Wise 		goto err;
510a34fc089SSteve Wise 
511517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
512517b773eSLeon Romanovsky 		goto err;
513c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
514c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
515c3d02788SLeon Romanovsky 			cq->uobject->context->res.id))
516c3d02788SLeon Romanovsky 		goto err;
517517b773eSLeon Romanovsky 
518a34fc089SSteve Wise 	if (fill_res_name_pid(msg, res))
519a34fc089SSteve Wise 		goto err;
520a34fc089SSteve Wise 
52102da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
522da5c8507SSteve Wise 		goto err;
523da5c8507SSteve Wise 
524a34fc089SSteve Wise 	return 0;
525a34fc089SSteve Wise 
526c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
527a34fc089SSteve Wise }
528a34fc089SSteve Wise 
529659067b0SLeon Romanovsky static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
530fccec5b8SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
531fccec5b8SSteve Wise {
532fccec5b8SSteve Wise 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
53302da3750SLeon Romanovsky 	struct ib_device *dev = mr->pd->device;
534fccec5b8SSteve Wise 
535659067b0SLeon Romanovsky 	if (has_cap_net_admin) {
536fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
537fccec5b8SSteve Wise 			goto err;
538fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
539fccec5b8SSteve Wise 			goto err;
540fccec5b8SSteve Wise 	}
541fccec5b8SSteve Wise 
54225a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
54325a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
544fccec5b8SSteve Wise 		goto err;
545fccec5b8SSteve Wise 
546517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
547517b773eSLeon Romanovsky 		goto err;
548517b773eSLeon Romanovsky 
549c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
550c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
551c3d02788SLeon Romanovsky 		goto err;
552c3d02788SLeon Romanovsky 
553fccec5b8SSteve Wise 	if (fill_res_name_pid(msg, res))
554fccec5b8SSteve Wise 		goto err;
555fccec5b8SSteve Wise 
55602da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
557da5c8507SSteve Wise 		goto err;
558da5c8507SSteve Wise 
559fccec5b8SSteve Wise 	return 0;
560fccec5b8SSteve Wise 
561c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
562fccec5b8SSteve Wise }
563fccec5b8SSteve Wise 
564659067b0SLeon Romanovsky static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
56529cf1351SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
56629cf1351SSteve Wise {
56729cf1351SSteve Wise 	struct ib_pd *pd = container_of(res, struct ib_pd, res);
56802da3750SLeon Romanovsky 	struct ib_device *dev = pd->device;
56929cf1351SSteve Wise 
570659067b0SLeon Romanovsky 	if (has_cap_net_admin) {
57129cf1351SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
57229cf1351SSteve Wise 				pd->local_dma_lkey))
57329cf1351SSteve Wise 			goto err;
57429cf1351SSteve Wise 		if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
57529cf1351SSteve Wise 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
57629cf1351SSteve Wise 				pd->unsafe_global_rkey))
57729cf1351SSteve Wise 			goto err;
57829cf1351SSteve Wise 	}
57929cf1351SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
58025a0ad85SSteve Wise 			      atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
58129cf1351SSteve Wise 		goto err;
58229cf1351SSteve Wise 
583517b773eSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
584517b773eSLeon Romanovsky 		goto err;
585517b773eSLeon Romanovsky 
586c3d02788SLeon Romanovsky 	if (!rdma_is_kernel_res(res) &&
587c3d02788SLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
588c3d02788SLeon Romanovsky 			pd->uobject->context->res.id))
589c3d02788SLeon Romanovsky 		goto err;
590c3d02788SLeon Romanovsky 
59129cf1351SSteve Wise 	if (fill_res_name_pid(msg, res))
59229cf1351SSteve Wise 		goto err;
59329cf1351SSteve Wise 
59402da3750SLeon Romanovsky 	if (fill_res_entry(dev, msg, res))
595da5c8507SSteve Wise 		goto err;
596da5c8507SSteve Wise 
59729cf1351SSteve Wise 	return 0;
59829cf1351SSteve Wise 
599c5dfe0eaSLeon Romanovsky err:	return -EMSGSIZE;
60029cf1351SSteve Wise }
60129cf1351SSteve Wise 
602e5c9469eSLeon Romanovsky static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
603e5c9469eSLeon Romanovsky 			  struct netlink_ext_ack *extack)
604e5c9469eSLeon Romanovsky {
605e5c9469eSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
606e5c9469eSLeon Romanovsky 	struct ib_device *device;
607e5c9469eSLeon Romanovsky 	struct sk_buff *msg;
608e5c9469eSLeon Romanovsky 	u32 index;
609e5c9469eSLeon Romanovsky 	int err;
610e5c9469eSLeon Romanovsky 
611e5c9469eSLeon Romanovsky 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
612e5c9469eSLeon Romanovsky 			  nldev_policy, extack);
613e5c9469eSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
614e5c9469eSLeon Romanovsky 		return -EINVAL;
615e5c9469eSLeon Romanovsky 
616e5c9469eSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
617e5c9469eSLeon Romanovsky 
61837eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
619e5c9469eSLeon Romanovsky 	if (!device)
620e5c9469eSLeon Romanovsky 		return -EINVAL;
621e5c9469eSLeon Romanovsky 
622e5c9469eSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
623f8978bd9SLeon Romanovsky 	if (!msg) {
624f8978bd9SLeon Romanovsky 		err = -ENOMEM;
625f8978bd9SLeon Romanovsky 		goto err;
626f8978bd9SLeon Romanovsky 	}
627e5c9469eSLeon Romanovsky 
628e5c9469eSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
629e5c9469eSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
630e5c9469eSLeon Romanovsky 			0, 0);
631e5c9469eSLeon Romanovsky 
632e5c9469eSLeon Romanovsky 	err = fill_dev_info(msg, device);
633f8978bd9SLeon Romanovsky 	if (err)
634f8978bd9SLeon Romanovsky 		goto err_free;
635e5c9469eSLeon Romanovsky 
636e5c9469eSLeon Romanovsky 	nlmsg_end(msg, nlh);
637e5c9469eSLeon Romanovsky 
63801b67117SParav Pandit 	ib_device_put(device);
639e5c9469eSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
640f8978bd9SLeon Romanovsky 
641f8978bd9SLeon Romanovsky err_free:
642f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
643f8978bd9SLeon Romanovsky err:
64401b67117SParav Pandit 	ib_device_put(device);
645f8978bd9SLeon Romanovsky 	return err;
646e5c9469eSLeon Romanovsky }
647e5c9469eSLeon Romanovsky 
64805d940d3SLeon Romanovsky static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
64905d940d3SLeon Romanovsky 			  struct netlink_ext_ack *extack)
65005d940d3SLeon Romanovsky {
65105d940d3SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
65205d940d3SLeon Romanovsky 	struct ib_device *device;
65305d940d3SLeon Romanovsky 	u32 index;
65405d940d3SLeon Romanovsky 	int err;
65505d940d3SLeon Romanovsky 
65605d940d3SLeon Romanovsky 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
65705d940d3SLeon Romanovsky 			  extack);
65805d940d3SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
65905d940d3SLeon Romanovsky 		return -EINVAL;
66005d940d3SLeon Romanovsky 
66105d940d3SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
66237eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
66305d940d3SLeon Romanovsky 	if (!device)
66405d940d3SLeon Romanovsky 		return -EINVAL;
66505d940d3SLeon Romanovsky 
66605d940d3SLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
66705d940d3SLeon Romanovsky 		char name[IB_DEVICE_NAME_MAX] = {};
66805d940d3SLeon Romanovsky 
66905d940d3SLeon Romanovsky 		nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
67005d940d3SLeon Romanovsky 			    IB_DEVICE_NAME_MAX);
67105d940d3SLeon Romanovsky 		err = ib_device_rename(device, name);
67205d940d3SLeon Romanovsky 	}
67305d940d3SLeon Romanovsky 
67401b67117SParav Pandit 	ib_device_put(device);
67505d940d3SLeon Romanovsky 	return err;
67605d940d3SLeon Romanovsky }
67705d940d3SLeon Romanovsky 
678b4c598a6SLeon Romanovsky static int _nldev_get_dumpit(struct ib_device *device,
679b4c598a6SLeon Romanovsky 			     struct sk_buff *skb,
680b4c598a6SLeon Romanovsky 			     struct netlink_callback *cb,
681b4c598a6SLeon Romanovsky 			     unsigned int idx)
682b4c598a6SLeon Romanovsky {
683b4c598a6SLeon Romanovsky 	int start = cb->args[0];
684b4c598a6SLeon Romanovsky 	struct nlmsghdr *nlh;
685b4c598a6SLeon Romanovsky 
686b4c598a6SLeon Romanovsky 	if (idx < start)
687b4c598a6SLeon Romanovsky 		return 0;
688b4c598a6SLeon Romanovsky 
689b4c598a6SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
690b4c598a6SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
691b4c598a6SLeon Romanovsky 			0, NLM_F_MULTI);
692b4c598a6SLeon Romanovsky 
693b4c598a6SLeon Romanovsky 	if (fill_dev_info(skb, device)) {
694b4c598a6SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
695b4c598a6SLeon Romanovsky 		goto out;
696b4c598a6SLeon Romanovsky 	}
697b4c598a6SLeon Romanovsky 
698b4c598a6SLeon Romanovsky 	nlmsg_end(skb, nlh);
699b4c598a6SLeon Romanovsky 
700b4c598a6SLeon Romanovsky 	idx++;
701b4c598a6SLeon Romanovsky 
702b4c598a6SLeon Romanovsky out:	cb->args[0] = idx;
703b4c598a6SLeon Romanovsky 	return skb->len;
704b4c598a6SLeon Romanovsky }
705b4c598a6SLeon Romanovsky 
706b4c598a6SLeon Romanovsky static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
707b4c598a6SLeon Romanovsky {
708b4c598a6SLeon Romanovsky 	/*
709b4c598a6SLeon Romanovsky 	 * There is no need to take lock, because
71037eeab55SParav Pandit 	 * we are relying on ib_core's locking.
711b4c598a6SLeon Romanovsky 	 */
712b4c598a6SLeon Romanovsky 	return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
713b4c598a6SLeon Romanovsky }
714b4c598a6SLeon Romanovsky 
715c3f66f7bSLeon Romanovsky static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
716c3f66f7bSLeon Romanovsky 			       struct netlink_ext_ack *extack)
717c3f66f7bSLeon Romanovsky {
718c3f66f7bSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
719c3f66f7bSLeon Romanovsky 	struct ib_device *device;
720c3f66f7bSLeon Romanovsky 	struct sk_buff *msg;
721c3f66f7bSLeon Romanovsky 	u32 index;
722c3f66f7bSLeon Romanovsky 	u32 port;
723c3f66f7bSLeon Romanovsky 	int err;
724c3f66f7bSLeon Romanovsky 
725c3f66f7bSLeon Romanovsky 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
726c3f66f7bSLeon Romanovsky 			  nldev_policy, extack);
727287683d0SLeon Romanovsky 	if (err ||
728287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
729287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
730c3f66f7bSLeon Romanovsky 		return -EINVAL;
731c3f66f7bSLeon Romanovsky 
732c3f66f7bSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
73337eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
734c3f66f7bSLeon Romanovsky 	if (!device)
735c3f66f7bSLeon Romanovsky 		return -EINVAL;
736c3f66f7bSLeon Romanovsky 
737c3f66f7bSLeon Romanovsky 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
738f8978bd9SLeon Romanovsky 	if (!rdma_is_port_valid(device, port)) {
739f8978bd9SLeon Romanovsky 		err = -EINVAL;
740f8978bd9SLeon Romanovsky 		goto err;
741f8978bd9SLeon Romanovsky 	}
742c3f66f7bSLeon Romanovsky 
743c3f66f7bSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
744f8978bd9SLeon Romanovsky 	if (!msg) {
745f8978bd9SLeon Romanovsky 		err = -ENOMEM;
746f8978bd9SLeon Romanovsky 		goto err;
747f8978bd9SLeon Romanovsky 	}
748c3f66f7bSLeon Romanovsky 
749c3f66f7bSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
750c3f66f7bSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
751c3f66f7bSLeon Romanovsky 			0, 0);
752c3f66f7bSLeon Romanovsky 
7535b2cc79dSLeon Romanovsky 	err = fill_port_info(msg, device, port, sock_net(skb->sk));
754f8978bd9SLeon Romanovsky 	if (err)
755f8978bd9SLeon Romanovsky 		goto err_free;
756c3f66f7bSLeon Romanovsky 
757c3f66f7bSLeon Romanovsky 	nlmsg_end(msg, nlh);
75801b67117SParav Pandit 	ib_device_put(device);
759c3f66f7bSLeon Romanovsky 
760c3f66f7bSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
761f8978bd9SLeon Romanovsky 
762f8978bd9SLeon Romanovsky err_free:
763f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
764f8978bd9SLeon Romanovsky err:
76501b67117SParav Pandit 	ib_device_put(device);
766f8978bd9SLeon Romanovsky 	return err;
767c3f66f7bSLeon Romanovsky }
768c3f66f7bSLeon Romanovsky 
7697d02f605SLeon Romanovsky static int nldev_port_get_dumpit(struct sk_buff *skb,
7707d02f605SLeon Romanovsky 				 struct netlink_callback *cb)
7717d02f605SLeon Romanovsky {
7727d02f605SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
7737d02f605SLeon Romanovsky 	struct ib_device *device;
7747d02f605SLeon Romanovsky 	int start = cb->args[0];
7757d02f605SLeon Romanovsky 	struct nlmsghdr *nlh;
7767d02f605SLeon Romanovsky 	u32 idx = 0;
7777d02f605SLeon Romanovsky 	u32 ifindex;
7787d02f605SLeon Romanovsky 	int err;
779ea1075edSJason Gunthorpe 	unsigned int p;
7807d02f605SLeon Romanovsky 
7817d02f605SLeon Romanovsky 	err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
7827d02f605SLeon Romanovsky 			  nldev_policy, NULL);
7837d02f605SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
7847d02f605SLeon Romanovsky 		return -EINVAL;
7857d02f605SLeon Romanovsky 
7867d02f605SLeon Romanovsky 	ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
78737eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
7887d02f605SLeon Romanovsky 	if (!device)
7897d02f605SLeon Romanovsky 		return -EINVAL;
7907d02f605SLeon Romanovsky 
791ea1075edSJason Gunthorpe 	rdma_for_each_port (device, p) {
7927d02f605SLeon Romanovsky 		/*
7937d02f605SLeon Romanovsky 		 * The dumpit function returns all information from specific
7947d02f605SLeon Romanovsky 		 * index. This specific index is taken from the netlink
7957d02f605SLeon Romanovsky 		 * messages request sent by user and it is available
7967d02f605SLeon Romanovsky 		 * in cb->args[0].
7977d02f605SLeon Romanovsky 		 *
7987d02f605SLeon Romanovsky 		 * Usually, the user doesn't fill this field and it causes
7997d02f605SLeon Romanovsky 		 * to return everything.
8007d02f605SLeon Romanovsky 		 *
8017d02f605SLeon Romanovsky 		 */
8027d02f605SLeon Romanovsky 		if (idx < start) {
8037d02f605SLeon Romanovsky 			idx++;
8047d02f605SLeon Romanovsky 			continue;
8057d02f605SLeon Romanovsky 		}
8067d02f605SLeon Romanovsky 
8077d02f605SLeon Romanovsky 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
8087d02f605SLeon Romanovsky 				cb->nlh->nlmsg_seq,
8097d02f605SLeon Romanovsky 				RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
8107d02f605SLeon Romanovsky 						 RDMA_NLDEV_CMD_PORT_GET),
8117d02f605SLeon Romanovsky 				0, NLM_F_MULTI);
8127d02f605SLeon Romanovsky 
8135b2cc79dSLeon Romanovsky 		if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
8147d02f605SLeon Romanovsky 			nlmsg_cancel(skb, nlh);
8157d02f605SLeon Romanovsky 			goto out;
8167d02f605SLeon Romanovsky 		}
8177d02f605SLeon Romanovsky 		idx++;
8187d02f605SLeon Romanovsky 		nlmsg_end(skb, nlh);
8197d02f605SLeon Romanovsky 	}
8207d02f605SLeon Romanovsky 
821f8978bd9SLeon Romanovsky out:
82201b67117SParav Pandit 	ib_device_put(device);
823f8978bd9SLeon Romanovsky 	cb->args[0] = idx;
8247d02f605SLeon Romanovsky 	return skb->len;
8257d02f605SLeon Romanovsky }
8267d02f605SLeon Romanovsky 
827bf3c5a93SLeon Romanovsky static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
828bf3c5a93SLeon Romanovsky 			      struct netlink_ext_ack *extack)
829bf3c5a93SLeon Romanovsky {
830bf3c5a93SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
831bf3c5a93SLeon Romanovsky 	struct ib_device *device;
832bf3c5a93SLeon Romanovsky 	struct sk_buff *msg;
833bf3c5a93SLeon Romanovsky 	u32 index;
834bf3c5a93SLeon Romanovsky 	int ret;
835bf3c5a93SLeon Romanovsky 
836bf3c5a93SLeon Romanovsky 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
837bf3c5a93SLeon Romanovsky 			  nldev_policy, extack);
838bf3c5a93SLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
839bf3c5a93SLeon Romanovsky 		return -EINVAL;
840bf3c5a93SLeon Romanovsky 
841bf3c5a93SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
84237eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
843bf3c5a93SLeon Romanovsky 	if (!device)
844bf3c5a93SLeon Romanovsky 		return -EINVAL;
845bf3c5a93SLeon Romanovsky 
846bf3c5a93SLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
847f34727a1SDan Carpenter 	if (!msg) {
848f34727a1SDan Carpenter 		ret = -ENOMEM;
849bf3c5a93SLeon Romanovsky 		goto err;
850f34727a1SDan Carpenter 	}
851bf3c5a93SLeon Romanovsky 
852bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
853bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
854bf3c5a93SLeon Romanovsky 			0, 0);
855bf3c5a93SLeon Romanovsky 
856bf3c5a93SLeon Romanovsky 	ret = fill_res_info(msg, device);
857bf3c5a93SLeon Romanovsky 	if (ret)
858bf3c5a93SLeon Romanovsky 		goto err_free;
859bf3c5a93SLeon Romanovsky 
860bf3c5a93SLeon Romanovsky 	nlmsg_end(msg, nlh);
86101b67117SParav Pandit 	ib_device_put(device);
862bf3c5a93SLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
863bf3c5a93SLeon Romanovsky 
864bf3c5a93SLeon Romanovsky err_free:
865bf3c5a93SLeon Romanovsky 	nlmsg_free(msg);
866bf3c5a93SLeon Romanovsky err:
86701b67117SParav Pandit 	ib_device_put(device);
868bf3c5a93SLeon Romanovsky 	return ret;
869bf3c5a93SLeon Romanovsky }
870bf3c5a93SLeon Romanovsky 
871bf3c5a93SLeon Romanovsky static int _nldev_res_get_dumpit(struct ib_device *device,
872bf3c5a93SLeon Romanovsky 				 struct sk_buff *skb,
873bf3c5a93SLeon Romanovsky 				 struct netlink_callback *cb,
874bf3c5a93SLeon Romanovsky 				 unsigned int idx)
875bf3c5a93SLeon Romanovsky {
876bf3c5a93SLeon Romanovsky 	int start = cb->args[0];
877bf3c5a93SLeon Romanovsky 	struct nlmsghdr *nlh;
878bf3c5a93SLeon Romanovsky 
879bf3c5a93SLeon Romanovsky 	if (idx < start)
880bf3c5a93SLeon Romanovsky 		return 0;
881bf3c5a93SLeon Romanovsky 
882bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
883bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
884bf3c5a93SLeon Romanovsky 			0, NLM_F_MULTI);
885bf3c5a93SLeon Romanovsky 
886bf3c5a93SLeon Romanovsky 	if (fill_res_info(skb, device)) {
887bf3c5a93SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
888bf3c5a93SLeon Romanovsky 		goto out;
889bf3c5a93SLeon Romanovsky 	}
890bf3c5a93SLeon Romanovsky 
891bf3c5a93SLeon Romanovsky 	nlmsg_end(skb, nlh);
892bf3c5a93SLeon Romanovsky 
893bf3c5a93SLeon Romanovsky 	idx++;
894bf3c5a93SLeon Romanovsky 
895bf3c5a93SLeon Romanovsky out:
896bf3c5a93SLeon Romanovsky 	cb->args[0] = idx;
897bf3c5a93SLeon Romanovsky 	return skb->len;
898bf3c5a93SLeon Romanovsky }
899bf3c5a93SLeon Romanovsky 
900bf3c5a93SLeon Romanovsky static int nldev_res_get_dumpit(struct sk_buff *skb,
901bf3c5a93SLeon Romanovsky 				struct netlink_callback *cb)
902bf3c5a93SLeon Romanovsky {
903bf3c5a93SLeon Romanovsky 	return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
904bf3c5a93SLeon Romanovsky }
905bf3c5a93SLeon Romanovsky 
906d12ff624SSteve Wise struct nldev_fill_res_entry {
907659067b0SLeon Romanovsky 	int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin,
908d12ff624SSteve Wise 			     struct rdma_restrack_entry *res, u32 port);
909d12ff624SSteve Wise 	enum rdma_nldev_attr nldev_attr;
910d12ff624SSteve Wise 	enum rdma_nldev_command nldev_cmd;
911c5dfe0eaSLeon Romanovsky 	u8 flags;
912c5dfe0eaSLeon Romanovsky 	u32 entry;
913c5dfe0eaSLeon Romanovsky 	u32 id;
914c5dfe0eaSLeon Romanovsky };
915c5dfe0eaSLeon Romanovsky 
916c5dfe0eaSLeon Romanovsky enum nldev_res_flags {
917c5dfe0eaSLeon Romanovsky 	NLDEV_PER_DEV = 1 << 0,
918d12ff624SSteve Wise };
919d12ff624SSteve Wise 
920d12ff624SSteve Wise static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
921d12ff624SSteve Wise 	[RDMA_RESTRACK_QP] = {
922d12ff624SSteve Wise 		.fill_res_func = fill_res_qp_entry,
923d12ff624SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
924d12ff624SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
925c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
9261b8b7788SLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_LQPN,
927d12ff624SSteve Wise 	},
92800313983SSteve Wise 	[RDMA_RESTRACK_CM_ID] = {
92900313983SSteve Wise 		.fill_res_func = fill_res_cm_id_entry,
93000313983SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
93100313983SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
932c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
933517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_CM_IDN,
93400313983SSteve Wise 	},
935a34fc089SSteve Wise 	[RDMA_RESTRACK_CQ] = {
936a34fc089SSteve Wise 		.fill_res_func = fill_res_cq_entry,
937a34fc089SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
938a34fc089SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
939c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
940c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
941517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_CQN,
942a34fc089SSteve Wise 	},
943fccec5b8SSteve Wise 	[RDMA_RESTRACK_MR] = {
944fccec5b8SSteve Wise 		.fill_res_func = fill_res_mr_entry,
945fccec5b8SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
946fccec5b8SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
947c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
948c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
949517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_MRN,
950fccec5b8SSteve Wise 	},
95129cf1351SSteve Wise 	[RDMA_RESTRACK_PD] = {
95229cf1351SSteve Wise 		.fill_res_func = fill_res_pd_entry,
95329cf1351SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
95429cf1351SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
955c5dfe0eaSLeon Romanovsky 		.flags = NLDEV_PER_DEV,
956c5dfe0eaSLeon Romanovsky 		.entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
957517b773eSLeon Romanovsky 		.id = RDMA_NLDEV_ATTR_RES_PDN,
95829cf1351SSteve Wise 	},
959d12ff624SSteve Wise };
960d12ff624SSteve Wise 
9618be565e6SLeon Romanovsky static bool is_visible_in_pid_ns(struct rdma_restrack_entry *res)
9628be565e6SLeon Romanovsky {
9638be565e6SLeon Romanovsky 	/*
9648be565e6SLeon Romanovsky 	 * 1. Kern resources should be visible in init name space only
9658be565e6SLeon Romanovsky 	 * 2. Present only resources visible in the current namespace
9668be565e6SLeon Romanovsky 	 */
9678be565e6SLeon Romanovsky 	if (rdma_is_kernel_res(res))
9688be565e6SLeon Romanovsky 		return task_active_pid_ns(current) == &init_pid_ns;
9698be565e6SLeon Romanovsky 	return task_active_pid_ns(current) == task_active_pid_ns(res->task);
9708be565e6SLeon Romanovsky }
9718be565e6SLeon Romanovsky 
972c5dfe0eaSLeon Romanovsky static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
973c5dfe0eaSLeon Romanovsky 			       struct netlink_ext_ack *extack,
974c5dfe0eaSLeon Romanovsky 			       enum rdma_restrack_type res_type)
975c5dfe0eaSLeon Romanovsky {
976c5dfe0eaSLeon Romanovsky 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
977c5dfe0eaSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
978c5dfe0eaSLeon Romanovsky 	struct rdma_restrack_entry *res;
979c5dfe0eaSLeon Romanovsky 	struct ib_device *device;
980c5dfe0eaSLeon Romanovsky 	u32 index, id, port = 0;
981c5dfe0eaSLeon Romanovsky 	bool has_cap_net_admin;
982c5dfe0eaSLeon Romanovsky 	struct sk_buff *msg;
983c5dfe0eaSLeon Romanovsky 	int ret;
984c5dfe0eaSLeon Romanovsky 
985c5dfe0eaSLeon Romanovsky 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
986c5dfe0eaSLeon Romanovsky 			  nldev_policy, extack);
987c5dfe0eaSLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
988c5dfe0eaSLeon Romanovsky 		return -EINVAL;
989c5dfe0eaSLeon Romanovsky 
990c5dfe0eaSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
99137eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
992c5dfe0eaSLeon Romanovsky 	if (!device)
993c5dfe0eaSLeon Romanovsky 		return -EINVAL;
994c5dfe0eaSLeon Romanovsky 
995c5dfe0eaSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
996c5dfe0eaSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
997c5dfe0eaSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
998c5dfe0eaSLeon Romanovsky 			ret = -EINVAL;
999c5dfe0eaSLeon Romanovsky 			goto err;
1000c5dfe0eaSLeon Romanovsky 		}
1001c5dfe0eaSLeon Romanovsky 	}
1002c5dfe0eaSLeon Romanovsky 
1003c5dfe0eaSLeon Romanovsky 	if ((port && fe->flags & NLDEV_PER_DEV) ||
1004c5dfe0eaSLeon Romanovsky 	    (!port && ~fe->flags & NLDEV_PER_DEV)) {
1005c5dfe0eaSLeon Romanovsky 		ret = -EINVAL;
1006c5dfe0eaSLeon Romanovsky 		goto err;
1007c5dfe0eaSLeon Romanovsky 	}
1008c5dfe0eaSLeon Romanovsky 
1009c5dfe0eaSLeon Romanovsky 	id = nla_get_u32(tb[fe->id]);
1010c5dfe0eaSLeon Romanovsky 	res = rdma_restrack_get_byid(device, res_type, id);
1011c5dfe0eaSLeon Romanovsky 	if (IS_ERR(res)) {
1012c5dfe0eaSLeon Romanovsky 		ret = PTR_ERR(res);
1013c5dfe0eaSLeon Romanovsky 		goto err;
1014c5dfe0eaSLeon Romanovsky 	}
1015c5dfe0eaSLeon Romanovsky 
1016c5dfe0eaSLeon Romanovsky 	if (!is_visible_in_pid_ns(res)) {
1017c5dfe0eaSLeon Romanovsky 		ret = -ENOENT;
1018c5dfe0eaSLeon Romanovsky 		goto err_get;
1019c5dfe0eaSLeon Romanovsky 	}
1020c5dfe0eaSLeon Romanovsky 
1021c5dfe0eaSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1022c5dfe0eaSLeon Romanovsky 	if (!msg) {
1023c5dfe0eaSLeon Romanovsky 		ret = -ENOMEM;
1024c5dfe0eaSLeon Romanovsky 		goto err;
1025c5dfe0eaSLeon Romanovsky 	}
1026c5dfe0eaSLeon Romanovsky 
1027c5dfe0eaSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1028c5dfe0eaSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1029c5dfe0eaSLeon Romanovsky 			0, 0);
1030c5dfe0eaSLeon Romanovsky 
1031c5dfe0eaSLeon Romanovsky 	if (fill_nldev_handle(msg, device)) {
1032c5dfe0eaSLeon Romanovsky 		ret = -EMSGSIZE;
1033c5dfe0eaSLeon Romanovsky 		goto err_free;
1034c5dfe0eaSLeon Romanovsky 	}
1035c5dfe0eaSLeon Romanovsky 
1036c5dfe0eaSLeon Romanovsky 	has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1037c5dfe0eaSLeon Romanovsky 	ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
1038c5dfe0eaSLeon Romanovsky 	rdma_restrack_put(res);
1039c5dfe0eaSLeon Romanovsky 	if (ret)
1040c5dfe0eaSLeon Romanovsky 		goto err_free;
1041c5dfe0eaSLeon Romanovsky 
1042c5dfe0eaSLeon Romanovsky 	nlmsg_end(msg, nlh);
1043c5dfe0eaSLeon Romanovsky 	ib_device_put(device);
1044c5dfe0eaSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
1045c5dfe0eaSLeon Romanovsky 
1046c5dfe0eaSLeon Romanovsky err_free:
1047c5dfe0eaSLeon Romanovsky 	nlmsg_free(msg);
1048c5dfe0eaSLeon Romanovsky err_get:
1049c5dfe0eaSLeon Romanovsky 	rdma_restrack_put(res);
1050c5dfe0eaSLeon Romanovsky err:
1051c5dfe0eaSLeon Romanovsky 	ib_device_put(device);
1052c5dfe0eaSLeon Romanovsky 	return ret;
1053c5dfe0eaSLeon Romanovsky }
1054c5dfe0eaSLeon Romanovsky 
1055d12ff624SSteve Wise static int res_get_common_dumpit(struct sk_buff *skb,
1056d12ff624SSteve Wise 				 struct netlink_callback *cb,
1057d12ff624SSteve Wise 				 enum rdma_restrack_type res_type)
1058b5fa635aSLeon Romanovsky {
1059d12ff624SSteve Wise 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1060b5fa635aSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1061b5fa635aSLeon Romanovsky 	struct rdma_restrack_entry *res;
10627c77c6a9SLeon Romanovsky 	struct rdma_restrack_root *rt;
1063b5fa635aSLeon Romanovsky 	int err, ret = 0, idx = 0;
1064b5fa635aSLeon Romanovsky 	struct nlattr *table_attr;
1065c5dfe0eaSLeon Romanovsky 	struct nlattr *entry_attr;
1066b5fa635aSLeon Romanovsky 	struct ib_device *device;
1067b5fa635aSLeon Romanovsky 	int start = cb->args[0];
1068659067b0SLeon Romanovsky 	bool has_cap_net_admin;
1069b5fa635aSLeon Romanovsky 	struct nlmsghdr *nlh;
1070fd47c2f9SLeon Romanovsky 	unsigned long id;
1071b5fa635aSLeon Romanovsky 	u32 index, port = 0;
1072d12ff624SSteve Wise 	bool filled = false;
1073b5fa635aSLeon Romanovsky 
1074b5fa635aSLeon Romanovsky 	err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1075b5fa635aSLeon Romanovsky 			  nldev_policy, NULL);
1076b5fa635aSLeon Romanovsky 	/*
1077d12ff624SSteve Wise 	 * Right now, we are expecting the device index to get res information,
1078b5fa635aSLeon Romanovsky 	 * but it is possible to extend this code to return all devices in
1079b5fa635aSLeon Romanovsky 	 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
1080b5fa635aSLeon Romanovsky 	 * if it doesn't exist, we will iterate over all devices.
1081b5fa635aSLeon Romanovsky 	 *
1082b5fa635aSLeon Romanovsky 	 * But it is not needed for now.
1083b5fa635aSLeon Romanovsky 	 */
1084b5fa635aSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1085b5fa635aSLeon Romanovsky 		return -EINVAL;
1086b5fa635aSLeon Romanovsky 
1087b5fa635aSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
108837eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1089b5fa635aSLeon Romanovsky 	if (!device)
1090b5fa635aSLeon Romanovsky 		return -EINVAL;
1091b5fa635aSLeon Romanovsky 
1092b5fa635aSLeon Romanovsky 	/*
1093b5fa635aSLeon Romanovsky 	 * If no PORT_INDEX is supplied, we will return all QPs from that device
1094b5fa635aSLeon Romanovsky 	 */
1095b5fa635aSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1096b5fa635aSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1097b5fa635aSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
1098b5fa635aSLeon Romanovsky 			ret = -EINVAL;
1099b5fa635aSLeon Romanovsky 			goto err_index;
1100b5fa635aSLeon Romanovsky 		}
1101b5fa635aSLeon Romanovsky 	}
1102b5fa635aSLeon Romanovsky 
1103b5fa635aSLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1104d12ff624SSteve Wise 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1105b5fa635aSLeon Romanovsky 			0, NLM_F_MULTI);
1106b5fa635aSLeon Romanovsky 
1107b5fa635aSLeon Romanovsky 	if (fill_nldev_handle(skb, device)) {
1108b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
1109b5fa635aSLeon Romanovsky 		goto err;
1110b5fa635aSLeon Romanovsky 	}
1111b5fa635aSLeon Romanovsky 
1112d12ff624SSteve Wise 	table_attr = nla_nest_start(skb, fe->nldev_attr);
1113b5fa635aSLeon Romanovsky 	if (!table_attr) {
1114b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
1115b5fa635aSLeon Romanovsky 		goto err;
1116b5fa635aSLeon Romanovsky 	}
1117b5fa635aSLeon Romanovsky 
1118659067b0SLeon Romanovsky 	has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
1119659067b0SLeon Romanovsky 
11207c77c6a9SLeon Romanovsky 	rt = &device->res[res_type];
11217c77c6a9SLeon Romanovsky 	xa_lock(&rt->xa);
1122fd47c2f9SLeon Romanovsky 	/*
1123fd47c2f9SLeon Romanovsky 	 * FIXME: if the skip ahead is something common this loop should
1124fd47c2f9SLeon Romanovsky 	 * use xas_for_each & xas_pause to optimize, we can have a lot of
1125fd47c2f9SLeon Romanovsky 	 * objects.
1126fd47c2f9SLeon Romanovsky 	 */
11277c77c6a9SLeon Romanovsky 	xa_for_each(&rt->xa, id, res) {
11288be565e6SLeon Romanovsky 		if (!is_visible_in_pid_ns(res))
1129f2a0e45fSLeon Romanovsky 			continue;
1130b5fa635aSLeon Romanovsky 
1131f2a0e45fSLeon Romanovsky 		if (idx < start || !rdma_restrack_get(res))
1132b5fa635aSLeon Romanovsky 			goto next;
1133b5fa635aSLeon Romanovsky 
11347c77c6a9SLeon Romanovsky 		xa_unlock(&rt->xa);
11357c77c6a9SLeon Romanovsky 
1136d12ff624SSteve Wise 		filled = true;
1137b5fa635aSLeon Romanovsky 
1138c5dfe0eaSLeon Romanovsky 		entry_attr = nla_nest_start(skb, fe->entry);
1139c5dfe0eaSLeon Romanovsky 		if (!entry_attr) {
1140c5dfe0eaSLeon Romanovsky 			ret = -EMSGSIZE;
1141c5dfe0eaSLeon Romanovsky 			rdma_restrack_put(res);
11427c77c6a9SLeon Romanovsky 			goto msg_full;
1143c5dfe0eaSLeon Romanovsky 		}
1144c5dfe0eaSLeon Romanovsky 
1145659067b0SLeon Romanovsky 		ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
1146b5fa635aSLeon Romanovsky 		rdma_restrack_put(res);
1147b5fa635aSLeon Romanovsky 
11487c77c6a9SLeon Romanovsky 		if (ret) {
1149c5dfe0eaSLeon Romanovsky 			nla_nest_cancel(skb, entry_attr);
1150b5fa635aSLeon Romanovsky 			if (ret == -EMSGSIZE)
11517c77c6a9SLeon Romanovsky 				goto msg_full;
1152c5dfe0eaSLeon Romanovsky 			if (ret == -EAGAIN)
11537c77c6a9SLeon Romanovsky 				goto again;
1154b5fa635aSLeon Romanovsky 			goto res_err;
11557c77c6a9SLeon Romanovsky 		}
1156c5dfe0eaSLeon Romanovsky 		nla_nest_end(skb, entry_attr);
11577c77c6a9SLeon Romanovsky again:		xa_lock(&rt->xa);
1158b5fa635aSLeon Romanovsky next:		idx++;
1159b5fa635aSLeon Romanovsky 	}
11607c77c6a9SLeon Romanovsky 	xa_unlock(&rt->xa);
1161b5fa635aSLeon Romanovsky 
11627c77c6a9SLeon Romanovsky msg_full:
1163b5fa635aSLeon Romanovsky 	nla_nest_end(skb, table_attr);
1164b5fa635aSLeon Romanovsky 	nlmsg_end(skb, nlh);
1165b5fa635aSLeon Romanovsky 	cb->args[0] = idx;
1166b5fa635aSLeon Romanovsky 
1167b5fa635aSLeon Romanovsky 	/*
1168d12ff624SSteve Wise 	 * No more entries to fill, cancel the message and
1169b5fa635aSLeon Romanovsky 	 * return 0 to mark end of dumpit.
1170b5fa635aSLeon Romanovsky 	 */
1171d12ff624SSteve Wise 	if (!filled)
1172b5fa635aSLeon Romanovsky 		goto err;
1173b5fa635aSLeon Romanovsky 
117401b67117SParav Pandit 	ib_device_put(device);
1175b5fa635aSLeon Romanovsky 	return skb->len;
1176b5fa635aSLeon Romanovsky 
1177b5fa635aSLeon Romanovsky res_err:
1178b5fa635aSLeon Romanovsky 	nla_nest_cancel(skb, table_attr);
1179b5fa635aSLeon Romanovsky 
1180b5fa635aSLeon Romanovsky err:
1181b5fa635aSLeon Romanovsky 	nlmsg_cancel(skb, nlh);
1182b5fa635aSLeon Romanovsky 
1183b5fa635aSLeon Romanovsky err_index:
118401b67117SParav Pandit 	ib_device_put(device);
1185b5fa635aSLeon Romanovsky 	return ret;
1186b5fa635aSLeon Romanovsky }
1187b5fa635aSLeon Romanovsky 
1188f732e713SLeon Romanovsky #define RES_GET_FUNCS(name, type)                                              \
1189f732e713SLeon Romanovsky 	static int nldev_res_get_##name##_dumpit(struct sk_buff *skb,          \
1190f732e713SLeon Romanovsky 						 struct netlink_callback *cb)  \
1191f732e713SLeon Romanovsky 	{                                                                      \
1192f732e713SLeon Romanovsky 		return res_get_common_dumpit(skb, cb, type);                   \
1193c5dfe0eaSLeon Romanovsky 	}                                                                      \
1194c5dfe0eaSLeon Romanovsky 	static int nldev_res_get_##name##_doit(struct sk_buff *skb,            \
1195c5dfe0eaSLeon Romanovsky 					       struct nlmsghdr *nlh,           \
1196c5dfe0eaSLeon Romanovsky 					       struct netlink_ext_ack *extack) \
1197c5dfe0eaSLeon Romanovsky 	{                                                                      \
1198c5dfe0eaSLeon Romanovsky 		return res_get_common_doit(skb, nlh, extack, type);            \
1199d12ff624SSteve Wise 	}
1200d12ff624SSteve Wise 
1201f732e713SLeon Romanovsky RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
1202f732e713SLeon Romanovsky RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
1203f732e713SLeon Romanovsky RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
1204f732e713SLeon Romanovsky RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
1205f732e713SLeon Romanovsky RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
120629cf1351SSteve Wise 
12073856ec4bSSteve Wise static LIST_HEAD(link_ops);
12083856ec4bSSteve Wise static DECLARE_RWSEM(link_ops_rwsem);
12093856ec4bSSteve Wise 
12103856ec4bSSteve Wise static const struct rdma_link_ops *link_ops_get(const char *type)
12113856ec4bSSteve Wise {
12123856ec4bSSteve Wise 	const struct rdma_link_ops *ops;
12133856ec4bSSteve Wise 
12143856ec4bSSteve Wise 	list_for_each_entry(ops, &link_ops, list) {
12153856ec4bSSteve Wise 		if (!strcmp(ops->type, type))
12163856ec4bSSteve Wise 			goto out;
12173856ec4bSSteve Wise 	}
12183856ec4bSSteve Wise 	ops = NULL;
12193856ec4bSSteve Wise out:
12203856ec4bSSteve Wise 	return ops;
12213856ec4bSSteve Wise }
12223856ec4bSSteve Wise 
12233856ec4bSSteve Wise void rdma_link_register(struct rdma_link_ops *ops)
12243856ec4bSSteve Wise {
12253856ec4bSSteve Wise 	down_write(&link_ops_rwsem);
1226afc1990eSDan Carpenter 	if (WARN_ON_ONCE(link_ops_get(ops->type)))
12273856ec4bSSteve Wise 		goto out;
12283856ec4bSSteve Wise 	list_add(&ops->list, &link_ops);
12293856ec4bSSteve Wise out:
12303856ec4bSSteve Wise 	up_write(&link_ops_rwsem);
12313856ec4bSSteve Wise }
12323856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_register);
12333856ec4bSSteve Wise 
12343856ec4bSSteve Wise void rdma_link_unregister(struct rdma_link_ops *ops)
12353856ec4bSSteve Wise {
12363856ec4bSSteve Wise 	down_write(&link_ops_rwsem);
12373856ec4bSSteve Wise 	list_del(&ops->list);
12383856ec4bSSteve Wise 	up_write(&link_ops_rwsem);
12393856ec4bSSteve Wise }
12403856ec4bSSteve Wise EXPORT_SYMBOL(rdma_link_unregister);
12413856ec4bSSteve Wise 
12423856ec4bSSteve Wise static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
12433856ec4bSSteve Wise 			  struct netlink_ext_ack *extack)
12443856ec4bSSteve Wise {
12453856ec4bSSteve Wise 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
12463856ec4bSSteve Wise 	char ibdev_name[IB_DEVICE_NAME_MAX];
12473856ec4bSSteve Wise 	const struct rdma_link_ops *ops;
12483856ec4bSSteve Wise 	char ndev_name[IFNAMSIZ];
12493856ec4bSSteve Wise 	struct net_device *ndev;
12503856ec4bSSteve Wise 	char type[IFNAMSIZ];
12513856ec4bSSteve Wise 	int err;
12523856ec4bSSteve Wise 
12533856ec4bSSteve Wise 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
12543856ec4bSSteve Wise 			  nldev_policy, extack);
12553856ec4bSSteve Wise 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
12563856ec4bSSteve Wise 	    !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
12573856ec4bSSteve Wise 		return -EINVAL;
12583856ec4bSSteve Wise 
12593856ec4bSSteve Wise 	nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
12603856ec4bSSteve Wise 		    sizeof(ibdev_name));
12613856ec4bSSteve Wise 	if (strchr(ibdev_name, '%'))
12623856ec4bSSteve Wise 		return -EINVAL;
12633856ec4bSSteve Wise 
12643856ec4bSSteve Wise 	nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
12653856ec4bSSteve Wise 	nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
12663856ec4bSSteve Wise 		    sizeof(ndev_name));
12673856ec4bSSteve Wise 
12683856ec4bSSteve Wise 	ndev = dev_get_by_name(&init_net, ndev_name);
12693856ec4bSSteve Wise 	if (!ndev)
12703856ec4bSSteve Wise 		return -ENODEV;
12713856ec4bSSteve Wise 
12723856ec4bSSteve Wise 	down_read(&link_ops_rwsem);
12733856ec4bSSteve Wise 	ops = link_ops_get(type);
12743856ec4bSSteve Wise #ifdef CONFIG_MODULES
12753856ec4bSSteve Wise 	if (!ops) {
12763856ec4bSSteve Wise 		up_read(&link_ops_rwsem);
12773856ec4bSSteve Wise 		request_module("rdma-link-%s", type);
12783856ec4bSSteve Wise 		down_read(&link_ops_rwsem);
12793856ec4bSSteve Wise 		ops = link_ops_get(type);
12803856ec4bSSteve Wise 	}
12813856ec4bSSteve Wise #endif
12823856ec4bSSteve Wise 	err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
12833856ec4bSSteve Wise 	up_read(&link_ops_rwsem);
12843856ec4bSSteve Wise 	dev_put(ndev);
12853856ec4bSSteve Wise 
12863856ec4bSSteve Wise 	return err;
12873856ec4bSSteve Wise }
12883856ec4bSSteve Wise 
12893856ec4bSSteve Wise static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
12903856ec4bSSteve Wise 			  struct netlink_ext_ack *extack)
12913856ec4bSSteve Wise {
12923856ec4bSSteve Wise 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
12933856ec4bSSteve Wise 	struct ib_device *device;
12943856ec4bSSteve Wise 	u32 index;
12953856ec4bSSteve Wise 	int err;
12963856ec4bSSteve Wise 
12973856ec4bSSteve Wise 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
12983856ec4bSSteve Wise 			  nldev_policy, extack);
12993856ec4bSSteve Wise 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
13003856ec4bSSteve Wise 		return -EINVAL;
13013856ec4bSSteve Wise 
13023856ec4bSSteve Wise 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
130337eeab55SParav Pandit 	device = ib_device_get_by_index(sock_net(skb->sk), index);
13043856ec4bSSteve Wise 	if (!device)
13053856ec4bSSteve Wise 		return -EINVAL;
13063856ec4bSSteve Wise 
13073856ec4bSSteve Wise 	if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
13083856ec4bSSteve Wise 		ib_device_put(device);
13093856ec4bSSteve Wise 		return -EINVAL;
13103856ec4bSSteve Wise 	}
13113856ec4bSSteve Wise 
13123856ec4bSSteve Wise 	ib_unregister_device_and_put(device);
13133856ec4bSSteve Wise 	return 0;
13143856ec4bSSteve Wise }
13153856ec4bSSteve Wise 
1316cb7e0e13SParav Pandit static int nldev_get_sys_get_dumpit(struct sk_buff *skb,
1317cb7e0e13SParav Pandit 				    struct netlink_callback *cb)
1318cb7e0e13SParav Pandit {
1319cb7e0e13SParav Pandit 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1320cb7e0e13SParav Pandit 	struct nlmsghdr *nlh;
1321cb7e0e13SParav Pandit 	int err;
1322cb7e0e13SParav Pandit 
1323cb7e0e13SParav Pandit 	err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1324cb7e0e13SParav Pandit 			  nldev_policy, NULL);
1325cb7e0e13SParav Pandit 	if (err)
1326cb7e0e13SParav Pandit 		return err;
1327cb7e0e13SParav Pandit 
1328cb7e0e13SParav Pandit 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1329cb7e0e13SParav Pandit 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1330cb7e0e13SParav Pandit 					 RDMA_NLDEV_CMD_SYS_GET),
1331cb7e0e13SParav Pandit 			0, 0);
1332cb7e0e13SParav Pandit 
1333cb7e0e13SParav Pandit 	err = nla_put_u8(skb, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
1334cb7e0e13SParav Pandit 			 (u8)ib_devices_shared_netns);
1335cb7e0e13SParav Pandit 	if (err) {
1336cb7e0e13SParav Pandit 		nlmsg_cancel(skb, nlh);
1337cb7e0e13SParav Pandit 		return err;
1338cb7e0e13SParav Pandit 	}
1339cb7e0e13SParav Pandit 
1340cb7e0e13SParav Pandit 	nlmsg_end(skb, nlh);
1341cb7e0e13SParav Pandit 	return skb->len;
1342cb7e0e13SParav Pandit }
1343cb7e0e13SParav Pandit 
13442b34c558SParav Pandit static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
13452b34c558SParav Pandit 				  struct netlink_ext_ack *extack)
13462b34c558SParav Pandit {
13472b34c558SParav Pandit 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
13482b34c558SParav Pandit 	u8 enable;
13492b34c558SParav Pandit 	int err;
13502b34c558SParav Pandit 
13512b34c558SParav Pandit 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
13522b34c558SParav Pandit 			  nldev_policy, extack);
13532b34c558SParav Pandit 	if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
13542b34c558SParav Pandit 		return -EINVAL;
13552b34c558SParav Pandit 
13562b34c558SParav Pandit 	enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
13572b34c558SParav Pandit 	/* Only 0 and 1 are supported */
13582b34c558SParav Pandit 	if (enable > 1)
13592b34c558SParav Pandit 		return -EINVAL;
13602b34c558SParav Pandit 
13612b34c558SParav Pandit 	err = rdma_compatdev_set(enable);
13622b34c558SParav Pandit 	return err;
13632b34c558SParav Pandit }
13642b34c558SParav Pandit 
1365d0e312feSLeon Romanovsky static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
1366b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_CMD_GET] = {
1367e5c9469eSLeon Romanovsky 		.doit = nldev_get_doit,
1368b4c598a6SLeon Romanovsky 		.dump = nldev_get_dumpit,
1369b4c598a6SLeon Romanovsky 	},
137005d940d3SLeon Romanovsky 	[RDMA_NLDEV_CMD_SET] = {
137105d940d3SLeon Romanovsky 		.doit = nldev_set_doit,
137205d940d3SLeon Romanovsky 		.flags = RDMA_NL_ADMIN_PERM,
137305d940d3SLeon Romanovsky 	},
13743856ec4bSSteve Wise 	[RDMA_NLDEV_CMD_NEWLINK] = {
13753856ec4bSSteve Wise 		.doit = nldev_newlink,
13763856ec4bSSteve Wise 		.flags = RDMA_NL_ADMIN_PERM,
13773856ec4bSSteve Wise 	},
13783856ec4bSSteve Wise 	[RDMA_NLDEV_CMD_DELLINK] = {
13793856ec4bSSteve Wise 		.doit = nldev_dellink,
13803856ec4bSSteve Wise 		.flags = RDMA_NL_ADMIN_PERM,
13813856ec4bSSteve Wise 	},
13827d02f605SLeon Romanovsky 	[RDMA_NLDEV_CMD_PORT_GET] = {
1383c3f66f7bSLeon Romanovsky 		.doit = nldev_port_get_doit,
13847d02f605SLeon Romanovsky 		.dump = nldev_port_get_dumpit,
13857d02f605SLeon Romanovsky 	},
1386bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_GET] = {
1387bf3c5a93SLeon Romanovsky 		.doit = nldev_res_get_doit,
1388bf3c5a93SLeon Romanovsky 		.dump = nldev_res_get_dumpit,
1389bf3c5a93SLeon Romanovsky 	},
1390b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_QP_GET] = {
1391c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_qp_doit,
1392b5fa635aSLeon Romanovsky 		.dump = nldev_res_get_qp_dumpit,
1393b5fa635aSLeon Romanovsky 	},
139400313983SSteve Wise 	[RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
1395c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_cm_id_doit,
139600313983SSteve Wise 		.dump = nldev_res_get_cm_id_dumpit,
139700313983SSteve Wise 	},
1398a34fc089SSteve Wise 	[RDMA_NLDEV_CMD_RES_CQ_GET] = {
1399c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_cq_doit,
1400a34fc089SSteve Wise 		.dump = nldev_res_get_cq_dumpit,
1401a34fc089SSteve Wise 	},
1402fccec5b8SSteve Wise 	[RDMA_NLDEV_CMD_RES_MR_GET] = {
1403c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_mr_doit,
1404fccec5b8SSteve Wise 		.dump = nldev_res_get_mr_dumpit,
1405fccec5b8SSteve Wise 	},
140629cf1351SSteve Wise 	[RDMA_NLDEV_CMD_RES_PD_GET] = {
1407c5dfe0eaSLeon Romanovsky 		.doit = nldev_res_get_pd_doit,
140829cf1351SSteve Wise 		.dump = nldev_res_get_pd_dumpit,
140929cf1351SSteve Wise 	},
1410cb7e0e13SParav Pandit 	[RDMA_NLDEV_CMD_SYS_GET] = {
1411cb7e0e13SParav Pandit 		.dump = nldev_get_sys_get_dumpit,
1412cb7e0e13SParav Pandit 	},
14132b34c558SParav Pandit 	[RDMA_NLDEV_CMD_SYS_SET] = {
14142b34c558SParav Pandit 		.doit = nldev_set_sys_set_doit,
14152b34c558SParav Pandit 		.flags = RDMA_NL_ADMIN_PERM,
14162b34c558SParav Pandit 	},
1417b4c598a6SLeon Romanovsky };
1418b4c598a6SLeon Romanovsky 
14196c80b41aSLeon Romanovsky void __init nldev_init(void)
14206c80b41aSLeon Romanovsky {
1421b4c598a6SLeon Romanovsky 	rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
14226c80b41aSLeon Romanovsky }
14236c80b41aSLeon Romanovsky 
14246c80b41aSLeon Romanovsky void __exit nldev_exit(void)
14256c80b41aSLeon Romanovsky {
14266c80b41aSLeon Romanovsky 	rdma_nl_unregister(RDMA_NL_NLDEV);
14276c80b41aSLeon Romanovsky }
1428e3bf14bdSJason Gunthorpe 
1429e3bf14bdSJason Gunthorpe MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
1430