xref: /openbmc/linux/drivers/infiniband/core/nldev.c (revision 73937e8a)
16c80b41aSLeon Romanovsky /*
26c80b41aSLeon Romanovsky  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
36c80b41aSLeon Romanovsky  *
46c80b41aSLeon Romanovsky  * Redistribution and use in source and binary forms, with or without
56c80b41aSLeon Romanovsky  * modification, are permitted provided that the following conditions are met:
66c80b41aSLeon Romanovsky  *
76c80b41aSLeon Romanovsky  * 1. Redistributions of source code must retain the above copyright
86c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer.
96c80b41aSLeon Romanovsky  * 2. Redistributions in binary form must reproduce the above copyright
106c80b41aSLeon Romanovsky  *    notice, this list of conditions and the following disclaimer in the
116c80b41aSLeon Romanovsky  *    documentation and/or other materials provided with the distribution.
126c80b41aSLeon Romanovsky  * 3. Neither the names of the copyright holders nor the names of its
136c80b41aSLeon Romanovsky  *    contributors may be used to endorse or promote products derived from
146c80b41aSLeon Romanovsky  *    this software without specific prior written permission.
156c80b41aSLeon Romanovsky  *
166c80b41aSLeon Romanovsky  * Alternatively, this software may be distributed under the terms of the
176c80b41aSLeon Romanovsky  * GNU General Public License ("GPL") version 2 as published by the Free
186c80b41aSLeon Romanovsky  * Software Foundation.
196c80b41aSLeon Romanovsky  *
206c80b41aSLeon Romanovsky  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
216c80b41aSLeon Romanovsky  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
226c80b41aSLeon Romanovsky  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
236c80b41aSLeon Romanovsky  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
246c80b41aSLeon Romanovsky  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
256c80b41aSLeon Romanovsky  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
266c80b41aSLeon Romanovsky  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
276c80b41aSLeon Romanovsky  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
286c80b41aSLeon Romanovsky  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
296c80b41aSLeon Romanovsky  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
306c80b41aSLeon Romanovsky  * POSSIBILITY OF SUCH DAMAGE.
316c80b41aSLeon Romanovsky  */
326c80b41aSLeon Romanovsky 
33e3bf14bdSJason Gunthorpe #include <linux/module.h>
34bf3c5a93SLeon Romanovsky #include <linux/pid.h>
35bf3c5a93SLeon Romanovsky #include <linux/pid_namespace.h>
36b4c598a6SLeon Romanovsky #include <net/netlink.h>
3700313983SSteve Wise #include <rdma/rdma_cm.h>
386c80b41aSLeon Romanovsky #include <rdma/rdma_netlink.h>
396c80b41aSLeon Romanovsky 
406c80b41aSLeon Romanovsky #include "core_priv.h"
4100313983SSteve Wise #include "cma_priv.h"
426c80b41aSLeon Romanovsky 
43b4c598a6SLeon Romanovsky static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
44b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_INDEX]     = { .type = NLA_U32 },
45b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_NAME]	= { .type = NLA_NUL_STRING,
46b4c598a6SLeon Romanovsky 					    .len = IB_DEVICE_NAME_MAX - 1},
47b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_ATTR_PORT_INDEX]	= { .type = NLA_U32 },
488621a7e3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_FW_VERSION]	= { .type = NLA_NUL_STRING,
498621a7e3SLeon Romanovsky 					    .len = IB_FW_VERSION_NAME_MAX - 1},
501aaff896SLeon Romanovsky 	[RDMA_NLDEV_ATTR_NODE_GUID]	= { .type = NLA_U64 },
511aaff896SLeon Romanovsky 	[RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
5212026fbbSLeon Romanovsky 	[RDMA_NLDEV_ATTR_SUBNET_PREFIX]	= { .type = NLA_U64 },
5380a06dd3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_LID]		= { .type = NLA_U32 },
5480a06dd3SLeon Romanovsky 	[RDMA_NLDEV_ATTR_SM_LID]	= { .type = NLA_U32 },
5534840feaSLeon Romanovsky 	[RDMA_NLDEV_ATTR_LMC]		= { .type = NLA_U8 },
565654e49dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_PORT_STATE]	= { .type = NLA_U8 },
575654e49dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
581bb77b8cSLeon Romanovsky 	[RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
59bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SUMMARY]	= { .type = NLA_NESTED },
60bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY]	= { .type = NLA_NESTED },
61bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
62bf3c5a93SLeon Romanovsky 					     .len = 16 },
63bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
64b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_QP]		= { .type = NLA_NESTED },
65b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_QP_ENTRY]		= { .type = NLA_NESTED },
66b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_LQPN]		= { .type = NLA_U32 },
67b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_RQPN]		= { .type = NLA_U32 },
68b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_RQ_PSN]		= { .type = NLA_U32 },
69b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_SQ_PSN]		= { .type = NLA_U32 },
70b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
71b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_TYPE]		= { .type = NLA_U8 },
72b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_STATE]		= { .type = NLA_U8 },
73b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_PID]		= { .type = NLA_U32 },
74b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_ATTR_RES_KERN_NAME]		= { .type = NLA_NUL_STRING,
75b5fa635aSLeon Romanovsky 						    .len = TASK_COMM_LEN },
7600313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CM_ID]		= { .type = NLA_NESTED },
7700313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY]	= { .type = NLA_NESTED },
7800313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_PS]		= { .type = NLA_U32 },
7900313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_SRC_ADDR]	= {
8000313983SSteve Wise 			.len = sizeof(struct __kernel_sockaddr_storage) },
8100313983SSteve Wise 	[RDMA_NLDEV_ATTR_RES_DST_ADDR]	= {
8200313983SSteve Wise 			.len = sizeof(struct __kernel_sockaddr_storage) },
83a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CQ]		= { .type = NLA_NESTED },
84a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CQ_ENTRY]		= { .type = NLA_NESTED },
85a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_CQE]		= { .type = NLA_U32 },
86a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_USECNT]		= { .type = NLA_U64 },
87a34fc089SSteve Wise 	[RDMA_NLDEV_ATTR_RES_POLL_CTX]		= { .type = NLA_U8 },
88fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_MR]		= { .type = NLA_NESTED },
89fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_MR_ENTRY]		= { .type = NLA_NESTED },
90fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_RKEY]		= { .type = NLA_U32 },
91fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_LKEY]		= { .type = NLA_U32 },
92fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_IOVA]		= { .type = NLA_U64 },
93fccec5b8SSteve Wise 	[RDMA_NLDEV_ATTR_RES_MRLEN]		= { .type = NLA_U64 },
9429cf1351SSteve Wise 	[RDMA_NLDEV_ATTR_RES_PD]		= { .type = NLA_NESTED },
9529cf1351SSteve Wise 	[RDMA_NLDEV_ATTR_RES_PD_ENTRY]		= { .type = NLA_NESTED },
9629cf1351SSteve Wise 	[RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY]	= { .type = NLA_U32 },
9729cf1351SSteve Wise 	[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY] = { .type = NLA_U32 },
985b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_INDEX]		= { .type = NLA_U32 },
995b2cc79dSLeon Romanovsky 	[RDMA_NLDEV_ATTR_NDEV_NAME]		= { .type = NLA_NUL_STRING,
1005b2cc79dSLeon Romanovsky 						    .len = IFNAMSIZ },
101da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER]		= { .type = NLA_NESTED },
102da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_ENTRY]		= { .type = NLA_NESTED },
103da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_STRING]		= { .type = NLA_NUL_STRING,
104da5c8507SSteve Wise 				    .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
105da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE]	= { .type = NLA_U8 },
106da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_S32]		= { .type = NLA_S32 },
107da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_U32]		= { .type = NLA_U32 },
108da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_S64]		= { .type = NLA_S64 },
109da5c8507SSteve Wise 	[RDMA_NLDEV_ATTR_DRIVER_U64]		= { .type = NLA_U64 },
110b4c598a6SLeon Romanovsky };
111b4c598a6SLeon Romanovsky 
11273937e8aSSteve Wise static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
11373937e8aSSteve Wise 				      enum rdma_nldev_print_type print_type)
11473937e8aSSteve Wise {
11573937e8aSSteve Wise 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
11673937e8aSSteve Wise 		return -EMSGSIZE;
11773937e8aSSteve Wise 	if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
11873937e8aSSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
11973937e8aSSteve Wise 		return -EMSGSIZE;
12073937e8aSSteve Wise 
12173937e8aSSteve Wise 	return 0;
12273937e8aSSteve Wise }
12373937e8aSSteve Wise 
12473937e8aSSteve Wise static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
12573937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
12673937e8aSSteve Wise 				   u32 value)
12773937e8aSSteve Wise {
12873937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
12973937e8aSSteve Wise 		return -EMSGSIZE;
13073937e8aSSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
13173937e8aSSteve Wise 		return -EMSGSIZE;
13273937e8aSSteve Wise 
13373937e8aSSteve Wise 	return 0;
13473937e8aSSteve Wise }
13573937e8aSSteve Wise 
13673937e8aSSteve Wise static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
13773937e8aSSteve Wise 				   enum rdma_nldev_print_type print_type,
13873937e8aSSteve Wise 				   u64 value)
13973937e8aSSteve Wise {
14073937e8aSSteve Wise 	if (put_driver_name_print_type(msg, name, print_type))
14173937e8aSSteve Wise 		return -EMSGSIZE;
14273937e8aSSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
14373937e8aSSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
14473937e8aSSteve Wise 		return -EMSGSIZE;
14573937e8aSSteve Wise 
14673937e8aSSteve Wise 	return 0;
14773937e8aSSteve Wise }
14873937e8aSSteve Wise 
14973937e8aSSteve Wise int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
15073937e8aSSteve Wise {
15173937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
15273937e8aSSteve Wise 				       value);
15373937e8aSSteve Wise }
15473937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32);
15573937e8aSSteve Wise 
15673937e8aSSteve Wise int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
15773937e8aSSteve Wise 			       u32 value)
15873937e8aSSteve Wise {
15973937e8aSSteve Wise 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
16073937e8aSSteve Wise 				       value);
16173937e8aSSteve Wise }
16273937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
16373937e8aSSteve Wise 
16473937e8aSSteve Wise int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
16573937e8aSSteve Wise {
16673937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
16773937e8aSSteve Wise 				       value);
16873937e8aSSteve Wise }
16973937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64);
17073937e8aSSteve Wise 
17173937e8aSSteve Wise int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
17273937e8aSSteve Wise {
17373937e8aSSteve Wise 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
17473937e8aSSteve Wise 				       value);
17573937e8aSSteve Wise }
17673937e8aSSteve Wise EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
17773937e8aSSteve Wise 
178c2409810SLeon Romanovsky static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
179b4c598a6SLeon Romanovsky {
180b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
181b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
182b4c598a6SLeon Romanovsky 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
183b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
184c2409810SLeon Romanovsky 
185c2409810SLeon Romanovsky 	return 0;
186c2409810SLeon Romanovsky }
187c2409810SLeon Romanovsky 
188c2409810SLeon Romanovsky static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
189c2409810SLeon Romanovsky {
190c2409810SLeon Romanovsky 	char fw[IB_FW_VERSION_NAME_MAX];
191c2409810SLeon Romanovsky 
192c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
193c2409810SLeon Romanovsky 		return -EMSGSIZE;
194c2409810SLeon Romanovsky 
195b4c598a6SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
196b4c598a6SLeon Romanovsky 		return -EMSGSIZE;
197ac505253SLeon Romanovsky 
198ac505253SLeon Romanovsky 	BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
199ac505253SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
20025a0ad85SSteve Wise 			      device->attrs.device_cap_flags,
20125a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
202ac505253SLeon Romanovsky 		return -EMSGSIZE;
203ac505253SLeon Romanovsky 
2048621a7e3SLeon Romanovsky 	ib_get_device_fw_str(device, fw);
2055b2cc79dSLeon Romanovsky 	/* Device without FW has strlen(fw) = 0 */
2068621a7e3SLeon Romanovsky 	if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
2078621a7e3SLeon Romanovsky 		return -EMSGSIZE;
2088621a7e3SLeon Romanovsky 
2091aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
21025a0ad85SSteve Wise 			      be64_to_cpu(device->node_guid),
21125a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2121aaff896SLeon Romanovsky 		return -EMSGSIZE;
2131aaff896SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
21425a0ad85SSteve Wise 			      be64_to_cpu(device->attrs.sys_image_guid),
21525a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
2161aaff896SLeon Romanovsky 		return -EMSGSIZE;
2171bb77b8cSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
2181bb77b8cSLeon Romanovsky 		return -EMSGSIZE;
219b4c598a6SLeon Romanovsky 	return 0;
220b4c598a6SLeon Romanovsky }
221b4c598a6SLeon Romanovsky 
2227d02f605SLeon Romanovsky static int fill_port_info(struct sk_buff *msg,
2235b2cc79dSLeon Romanovsky 			  struct ib_device *device, u32 port,
2245b2cc79dSLeon Romanovsky 			  const struct net *net)
2257d02f605SLeon Romanovsky {
2265b2cc79dSLeon Romanovsky 	struct net_device *netdev = NULL;
227ac505253SLeon Romanovsky 	struct ib_port_attr attr;
228ac505253SLeon Romanovsky 	int ret;
229ac505253SLeon Romanovsky 
230c2409810SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
2317d02f605SLeon Romanovsky 		return -EMSGSIZE;
232c2409810SLeon Romanovsky 
2337d02f605SLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
2347d02f605SLeon Romanovsky 		return -EMSGSIZE;
235ac505253SLeon Romanovsky 
236ac505253SLeon Romanovsky 	ret = ib_query_port(device, port, &attr);
237ac505253SLeon Romanovsky 	if (ret)
238ac505253SLeon Romanovsky 		return ret;
239ac505253SLeon Romanovsky 
240ac505253SLeon Romanovsky 	BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
241ac505253SLeon Romanovsky 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
24225a0ad85SSteve Wise 			      (u64)attr.port_cap_flags, RDMA_NLDEV_ATTR_PAD))
243ac505253SLeon Romanovsky 		return -EMSGSIZE;
24412026fbbSLeon Romanovsky 	if (rdma_protocol_ib(device, port) &&
24512026fbbSLeon Romanovsky 	    nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
24625a0ad85SSteve Wise 			      attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
24712026fbbSLeon Romanovsky 		return -EMSGSIZE;
24880a06dd3SLeon Romanovsky 	if (rdma_protocol_ib(device, port)) {
24980a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
25080a06dd3SLeon Romanovsky 			return -EMSGSIZE;
25180a06dd3SLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
25280a06dd3SLeon Romanovsky 			return -EMSGSIZE;
25334840feaSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
25434840feaSLeon Romanovsky 			return -EMSGSIZE;
25580a06dd3SLeon Romanovsky 	}
2565654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
2575654e49dSLeon Romanovsky 		return -EMSGSIZE;
2585654e49dSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
2595654e49dSLeon Romanovsky 		return -EMSGSIZE;
2605b2cc79dSLeon Romanovsky 
2615b2cc79dSLeon Romanovsky 	if (device->get_netdev)
2625b2cc79dSLeon Romanovsky 		netdev = device->get_netdev(device, port);
2635b2cc79dSLeon Romanovsky 
2645b2cc79dSLeon Romanovsky 	if (netdev && net_eq(dev_net(netdev), net)) {
2655b2cc79dSLeon Romanovsky 		ret = nla_put_u32(msg,
2665b2cc79dSLeon Romanovsky 				  RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
2675b2cc79dSLeon Romanovsky 		if (ret)
2685b2cc79dSLeon Romanovsky 			goto out;
2695b2cc79dSLeon Romanovsky 		ret = nla_put_string(msg,
2705b2cc79dSLeon Romanovsky 				     RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
2715b2cc79dSLeon Romanovsky 	}
2725b2cc79dSLeon Romanovsky 
2735b2cc79dSLeon Romanovsky out:
2745b2cc79dSLeon Romanovsky 	if (netdev)
2755b2cc79dSLeon Romanovsky 		dev_put(netdev);
2765b2cc79dSLeon Romanovsky 	return ret;
2777d02f605SLeon Romanovsky }
2787d02f605SLeon Romanovsky 
279bf3c5a93SLeon Romanovsky static int fill_res_info_entry(struct sk_buff *msg,
280bf3c5a93SLeon Romanovsky 			       const char *name, u64 curr)
281bf3c5a93SLeon Romanovsky {
282bf3c5a93SLeon Romanovsky 	struct nlattr *entry_attr;
283bf3c5a93SLeon Romanovsky 
284bf3c5a93SLeon Romanovsky 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
285bf3c5a93SLeon Romanovsky 	if (!entry_attr)
286bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
287bf3c5a93SLeon Romanovsky 
288bf3c5a93SLeon Romanovsky 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
289bf3c5a93SLeon Romanovsky 		goto err;
29025a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
29125a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
292bf3c5a93SLeon Romanovsky 		goto err;
293bf3c5a93SLeon Romanovsky 
294bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, entry_attr);
295bf3c5a93SLeon Romanovsky 	return 0;
296bf3c5a93SLeon Romanovsky 
297bf3c5a93SLeon Romanovsky err:
298bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, entry_attr);
299bf3c5a93SLeon Romanovsky 	return -EMSGSIZE;
300bf3c5a93SLeon Romanovsky }
301bf3c5a93SLeon Romanovsky 
302bf3c5a93SLeon Romanovsky static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
303bf3c5a93SLeon Romanovsky {
304bf3c5a93SLeon Romanovsky 	static const char * const names[RDMA_RESTRACK_MAX] = {
305bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_PD] = "pd",
306bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_CQ] = "cq",
307bf3c5a93SLeon Romanovsky 		[RDMA_RESTRACK_QP] = "qp",
30800313983SSteve Wise 		[RDMA_RESTRACK_CM_ID] = "cm_id",
309fccec5b8SSteve Wise 		[RDMA_RESTRACK_MR] = "mr",
310bf3c5a93SLeon Romanovsky 	};
311bf3c5a93SLeon Romanovsky 
312bf3c5a93SLeon Romanovsky 	struct rdma_restrack_root *res = &device->res;
313bf3c5a93SLeon Romanovsky 	struct nlattr *table_attr;
314bf3c5a93SLeon Romanovsky 	int ret, i, curr;
315bf3c5a93SLeon Romanovsky 
316bf3c5a93SLeon Romanovsky 	if (fill_nldev_handle(msg, device))
317bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
318bf3c5a93SLeon Romanovsky 
319bf3c5a93SLeon Romanovsky 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
320bf3c5a93SLeon Romanovsky 	if (!table_attr)
321bf3c5a93SLeon Romanovsky 		return -EMSGSIZE;
322bf3c5a93SLeon Romanovsky 
323bf3c5a93SLeon Romanovsky 	for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
324bf3c5a93SLeon Romanovsky 		if (!names[i])
325bf3c5a93SLeon Romanovsky 			continue;
326bf3c5a93SLeon Romanovsky 		curr = rdma_restrack_count(res, i, task_active_pid_ns(current));
327bf3c5a93SLeon Romanovsky 		ret = fill_res_info_entry(msg, names[i], curr);
328bf3c5a93SLeon Romanovsky 		if (ret)
329bf3c5a93SLeon Romanovsky 			goto err;
330bf3c5a93SLeon Romanovsky 	}
331bf3c5a93SLeon Romanovsky 
332bf3c5a93SLeon Romanovsky 	nla_nest_end(msg, table_attr);
333bf3c5a93SLeon Romanovsky 	return 0;
334bf3c5a93SLeon Romanovsky 
335bf3c5a93SLeon Romanovsky err:
336bf3c5a93SLeon Romanovsky 	nla_nest_cancel(msg, table_attr);
337bf3c5a93SLeon Romanovsky 	return ret;
338bf3c5a93SLeon Romanovsky }
339bf3c5a93SLeon Romanovsky 
34000313983SSteve Wise static int fill_res_name_pid(struct sk_buff *msg,
34100313983SSteve Wise 			     struct rdma_restrack_entry *res)
34200313983SSteve Wise {
34300313983SSteve Wise 	/*
34400313983SSteve Wise 	 * For user resources, user is should read /proc/PID/comm to get the
34500313983SSteve Wise 	 * name of the task file.
34600313983SSteve Wise 	 */
34700313983SSteve Wise 	if (rdma_is_kernel_res(res)) {
34800313983SSteve Wise 		if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
34900313983SSteve Wise 		    res->kern_name))
35000313983SSteve Wise 			return -EMSGSIZE;
35100313983SSteve Wise 	} else {
35200313983SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
35300313983SSteve Wise 		    task_pid_vnr(res->task)))
35400313983SSteve Wise 			return -EMSGSIZE;
35500313983SSteve Wise 	}
35600313983SSteve Wise 	return 0;
35700313983SSteve Wise }
35800313983SSteve Wise 
359d12ff624SSteve Wise static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
360d12ff624SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
361b5fa635aSLeon Romanovsky {
362d12ff624SSteve Wise 	struct ib_qp *qp = container_of(res, struct ib_qp, res);
363da5c8507SSteve Wise 	struct rdma_restrack_root *resroot = &qp->device->res;
364b5fa635aSLeon Romanovsky 	struct ib_qp_init_attr qp_init_attr;
365b5fa635aSLeon Romanovsky 	struct nlattr *entry_attr;
366b5fa635aSLeon Romanovsky 	struct ib_qp_attr qp_attr;
367b5fa635aSLeon Romanovsky 	int ret;
368b5fa635aSLeon Romanovsky 
369b5fa635aSLeon Romanovsky 	ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
370b5fa635aSLeon Romanovsky 	if (ret)
371b5fa635aSLeon Romanovsky 		return ret;
372b5fa635aSLeon Romanovsky 
373b5fa635aSLeon Romanovsky 	if (port && port != qp_attr.port_num)
374b5fa635aSLeon Romanovsky 		return 0;
375b5fa635aSLeon Romanovsky 
376b5fa635aSLeon Romanovsky 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
377b5fa635aSLeon Romanovsky 	if (!entry_attr)
378b5fa635aSLeon Romanovsky 		goto out;
379b5fa635aSLeon Romanovsky 
380b5fa635aSLeon Romanovsky 	/* In create_qp() port is not set yet */
381b5fa635aSLeon Romanovsky 	if (qp_attr.port_num &&
382b5fa635aSLeon Romanovsky 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
383b5fa635aSLeon Romanovsky 		goto err;
384b5fa635aSLeon Romanovsky 
385b5fa635aSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
386b5fa635aSLeon Romanovsky 		goto err;
387b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
388b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
389b5fa635aSLeon Romanovsky 				qp_attr.dest_qp_num))
390b5fa635aSLeon Romanovsky 			goto err;
391b5fa635aSLeon Romanovsky 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
392b5fa635aSLeon Romanovsky 				qp_attr.rq_psn))
393b5fa635aSLeon Romanovsky 			goto err;
394b5fa635aSLeon Romanovsky 	}
395b5fa635aSLeon Romanovsky 
396b5fa635aSLeon Romanovsky 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
397b5fa635aSLeon Romanovsky 		goto err;
398b5fa635aSLeon Romanovsky 
399b5fa635aSLeon Romanovsky 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
400b5fa635aSLeon Romanovsky 	    qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
401b5fa635aSLeon Romanovsky 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
402b5fa635aSLeon Romanovsky 			       qp_attr.path_mig_state))
403b5fa635aSLeon Romanovsky 			goto err;
404b5fa635aSLeon Romanovsky 	}
405b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
406b5fa635aSLeon Romanovsky 		goto err;
407b5fa635aSLeon Romanovsky 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
408b5fa635aSLeon Romanovsky 		goto err;
409b5fa635aSLeon Romanovsky 
41000313983SSteve Wise 	if (fill_res_name_pid(msg, res))
411b5fa635aSLeon Romanovsky 		goto err;
41200313983SSteve Wise 
413da5c8507SSteve Wise 	if (resroot->fill_res_entry(msg, res))
414da5c8507SSteve Wise 		goto err;
415da5c8507SSteve Wise 
41600313983SSteve Wise 	nla_nest_end(msg, entry_attr);
41700313983SSteve Wise 	return 0;
41800313983SSteve Wise 
41900313983SSteve Wise err:
42000313983SSteve Wise 	nla_nest_cancel(msg, entry_attr);
42100313983SSteve Wise out:
42200313983SSteve Wise 	return -EMSGSIZE;
42300313983SSteve Wise }
42400313983SSteve Wise 
42500313983SSteve Wise static int fill_res_cm_id_entry(struct sk_buff *msg,
42600313983SSteve Wise 				struct netlink_callback *cb,
42700313983SSteve Wise 				struct rdma_restrack_entry *res, uint32_t port)
42800313983SSteve Wise {
42900313983SSteve Wise 	struct rdma_id_private *id_priv =
43000313983SSteve Wise 				container_of(res, struct rdma_id_private, res);
431da5c8507SSteve Wise 	struct rdma_restrack_root *resroot = &id_priv->id.device->res;
43200313983SSteve Wise 	struct rdma_cm_id *cm_id = &id_priv->id;
43300313983SSteve Wise 	struct nlattr *entry_attr;
43400313983SSteve Wise 
43500313983SSteve Wise 	if (port && port != cm_id->port_num)
43600313983SSteve Wise 		return 0;
43700313983SSteve Wise 
43800313983SSteve Wise 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY);
43900313983SSteve Wise 	if (!entry_attr)
44000313983SSteve Wise 		goto out;
44100313983SSteve Wise 
44200313983SSteve Wise 	if (cm_id->port_num &&
44300313983SSteve Wise 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
44400313983SSteve Wise 		goto err;
44500313983SSteve Wise 
44600313983SSteve Wise 	if (id_priv->qp_num) {
44700313983SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
44800313983SSteve Wise 			goto err;
44900313983SSteve Wise 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
450b5fa635aSLeon Romanovsky 			goto err;
451b5fa635aSLeon Romanovsky 	}
452b5fa635aSLeon Romanovsky 
45300313983SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
45400313983SSteve Wise 		goto err;
45500313983SSteve Wise 
45600313983SSteve Wise 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
45700313983SSteve Wise 		goto err;
45800313983SSteve Wise 
45900313983SSteve Wise 	if (cm_id->route.addr.src_addr.ss_family &&
46000313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
46100313983SSteve Wise 		    sizeof(cm_id->route.addr.src_addr),
46200313983SSteve Wise 		    &cm_id->route.addr.src_addr))
46300313983SSteve Wise 		goto err;
46400313983SSteve Wise 	if (cm_id->route.addr.dst_addr.ss_family &&
46500313983SSteve Wise 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
46600313983SSteve Wise 		    sizeof(cm_id->route.addr.dst_addr),
46700313983SSteve Wise 		    &cm_id->route.addr.dst_addr))
46800313983SSteve Wise 		goto err;
46900313983SSteve Wise 
47000313983SSteve Wise 	if (fill_res_name_pid(msg, res))
47100313983SSteve Wise 		goto err;
47200313983SSteve Wise 
473da5c8507SSteve Wise 	if (resroot->fill_res_entry(msg, res))
474da5c8507SSteve Wise 		goto err;
475da5c8507SSteve Wise 
476b5fa635aSLeon Romanovsky 	nla_nest_end(msg, entry_attr);
477b5fa635aSLeon Romanovsky 	return 0;
478b5fa635aSLeon Romanovsky 
479b5fa635aSLeon Romanovsky err:
480b5fa635aSLeon Romanovsky 	nla_nest_cancel(msg, entry_attr);
481b5fa635aSLeon Romanovsky out:
482b5fa635aSLeon Romanovsky 	return -EMSGSIZE;
483b5fa635aSLeon Romanovsky }
484b5fa635aSLeon Romanovsky 
485a34fc089SSteve Wise static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
486a34fc089SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
487a34fc089SSteve Wise {
488a34fc089SSteve Wise 	struct ib_cq *cq = container_of(res, struct ib_cq, res);
489da5c8507SSteve Wise 	struct rdma_restrack_root *resroot = &cq->device->res;
490a34fc089SSteve Wise 	struct nlattr *entry_attr;
491a34fc089SSteve Wise 
492a34fc089SSteve Wise 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CQ_ENTRY);
493a34fc089SSteve Wise 	if (!entry_attr)
494a34fc089SSteve Wise 		goto out;
495a34fc089SSteve Wise 
496a34fc089SSteve Wise 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
497a34fc089SSteve Wise 		goto err;
498a34fc089SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
49925a0ad85SSteve Wise 			      atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
500a34fc089SSteve Wise 		goto err;
501a34fc089SSteve Wise 
502a34fc089SSteve Wise 	/* Poll context is only valid for kernel CQs */
503a34fc089SSteve Wise 	if (rdma_is_kernel_res(res) &&
504a34fc089SSteve Wise 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
505a34fc089SSteve Wise 		goto err;
506a34fc089SSteve Wise 
507a34fc089SSteve Wise 	if (fill_res_name_pid(msg, res))
508a34fc089SSteve Wise 		goto err;
509a34fc089SSteve Wise 
510da5c8507SSteve Wise 	if (resroot->fill_res_entry(msg, res))
511da5c8507SSteve Wise 		goto err;
512da5c8507SSteve Wise 
513a34fc089SSteve Wise 	nla_nest_end(msg, entry_attr);
514a34fc089SSteve Wise 	return 0;
515a34fc089SSteve Wise 
516a34fc089SSteve Wise err:
517a34fc089SSteve Wise 	nla_nest_cancel(msg, entry_attr);
518a34fc089SSteve Wise out:
519a34fc089SSteve Wise 	return -EMSGSIZE;
520a34fc089SSteve Wise }
521a34fc089SSteve Wise 
522fccec5b8SSteve Wise static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb,
523fccec5b8SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
524fccec5b8SSteve Wise {
525fccec5b8SSteve Wise 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
526da5c8507SSteve Wise 	struct rdma_restrack_root *resroot = &mr->pd->device->res;
527fccec5b8SSteve Wise 	struct nlattr *entry_attr;
528fccec5b8SSteve Wise 
529fccec5b8SSteve Wise 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_MR_ENTRY);
530fccec5b8SSteve Wise 	if (!entry_attr)
531fccec5b8SSteve Wise 		goto out;
532fccec5b8SSteve Wise 
533fccec5b8SSteve Wise 	if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
534fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
535fccec5b8SSteve Wise 			goto err;
536fccec5b8SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
537fccec5b8SSteve Wise 			goto err;
538fccec5b8SSteve Wise 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_IOVA,
53925a0ad85SSteve Wise 				      mr->iova, RDMA_NLDEV_ATTR_PAD))
540fccec5b8SSteve Wise 			goto err;
541fccec5b8SSteve Wise 	}
542fccec5b8SSteve Wise 
54325a0ad85SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
54425a0ad85SSteve Wise 			      RDMA_NLDEV_ATTR_PAD))
545fccec5b8SSteve Wise 		goto err;
546fccec5b8SSteve Wise 
547fccec5b8SSteve Wise 	if (fill_res_name_pid(msg, res))
548fccec5b8SSteve Wise 		goto err;
549fccec5b8SSteve Wise 
550da5c8507SSteve Wise 	if (resroot->fill_res_entry(msg, res))
551da5c8507SSteve Wise 		goto err;
552da5c8507SSteve Wise 
553fccec5b8SSteve Wise 	nla_nest_end(msg, entry_attr);
554fccec5b8SSteve Wise 	return 0;
555fccec5b8SSteve Wise 
556fccec5b8SSteve Wise err:
557fccec5b8SSteve Wise 	nla_nest_cancel(msg, entry_attr);
558fccec5b8SSteve Wise out:
559fccec5b8SSteve Wise 	return -EMSGSIZE;
560fccec5b8SSteve Wise }
561fccec5b8SSteve Wise 
56229cf1351SSteve Wise static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
56329cf1351SSteve Wise 			     struct rdma_restrack_entry *res, uint32_t port)
56429cf1351SSteve Wise {
56529cf1351SSteve Wise 	struct ib_pd *pd = container_of(res, struct ib_pd, res);
566da5c8507SSteve Wise 	struct rdma_restrack_root *resroot = &pd->device->res;
56729cf1351SSteve Wise 	struct nlattr *entry_attr;
56829cf1351SSteve Wise 
56929cf1351SSteve Wise 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_PD_ENTRY);
57029cf1351SSteve Wise 	if (!entry_attr)
57129cf1351SSteve Wise 		goto out;
57229cf1351SSteve Wise 
57329cf1351SSteve Wise 	if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
57429cf1351SSteve Wise 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
57529cf1351SSteve Wise 				pd->local_dma_lkey))
57629cf1351SSteve Wise 			goto err;
57729cf1351SSteve Wise 		if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
57829cf1351SSteve Wise 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
57929cf1351SSteve Wise 				pd->unsafe_global_rkey))
58029cf1351SSteve Wise 			goto err;
58129cf1351SSteve Wise 	}
58229cf1351SSteve Wise 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
58325a0ad85SSteve Wise 			      atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
58429cf1351SSteve Wise 		goto err;
58529cf1351SSteve Wise 	if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
58629cf1351SSteve Wise 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
58729cf1351SSteve Wise 			pd->unsafe_global_rkey))
58829cf1351SSteve Wise 		goto err;
58929cf1351SSteve Wise 
59029cf1351SSteve Wise 	if (fill_res_name_pid(msg, res))
59129cf1351SSteve Wise 		goto err;
59229cf1351SSteve Wise 
593da5c8507SSteve Wise 	if (resroot->fill_res_entry(msg, res))
594da5c8507SSteve Wise 		goto err;
595da5c8507SSteve Wise 
59629cf1351SSteve Wise 	nla_nest_end(msg, entry_attr);
59729cf1351SSteve Wise 	return 0;
59829cf1351SSteve Wise 
59929cf1351SSteve Wise err:
60029cf1351SSteve Wise 	nla_nest_cancel(msg, entry_attr);
60129cf1351SSteve Wise out:
60229cf1351SSteve Wise 	return -EMSGSIZE;
60329cf1351SSteve Wise }
60429cf1351SSteve Wise 
605e5c9469eSLeon Romanovsky static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
606e5c9469eSLeon Romanovsky 			  struct netlink_ext_ack *extack)
607e5c9469eSLeon Romanovsky {
608e5c9469eSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
609e5c9469eSLeon Romanovsky 	struct ib_device *device;
610e5c9469eSLeon Romanovsky 	struct sk_buff *msg;
611e5c9469eSLeon Romanovsky 	u32 index;
612e5c9469eSLeon Romanovsky 	int err;
613e5c9469eSLeon Romanovsky 
614e5c9469eSLeon Romanovsky 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
615e5c9469eSLeon Romanovsky 			  nldev_policy, extack);
616e5c9469eSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
617e5c9469eSLeon Romanovsky 		return -EINVAL;
618e5c9469eSLeon Romanovsky 
619e5c9469eSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
620e5c9469eSLeon Romanovsky 
621f8978bd9SLeon Romanovsky 	device = ib_device_get_by_index(index);
622e5c9469eSLeon Romanovsky 	if (!device)
623e5c9469eSLeon Romanovsky 		return -EINVAL;
624e5c9469eSLeon Romanovsky 
625e5c9469eSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
626f8978bd9SLeon Romanovsky 	if (!msg) {
627f8978bd9SLeon Romanovsky 		err = -ENOMEM;
628f8978bd9SLeon Romanovsky 		goto err;
629f8978bd9SLeon Romanovsky 	}
630e5c9469eSLeon Romanovsky 
631e5c9469eSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
632e5c9469eSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
633e5c9469eSLeon Romanovsky 			0, 0);
634e5c9469eSLeon Romanovsky 
635e5c9469eSLeon Romanovsky 	err = fill_dev_info(msg, device);
636f8978bd9SLeon Romanovsky 	if (err)
637f8978bd9SLeon Romanovsky 		goto err_free;
638e5c9469eSLeon Romanovsky 
639e5c9469eSLeon Romanovsky 	nlmsg_end(msg, nlh);
640e5c9469eSLeon Romanovsky 
641f8978bd9SLeon Romanovsky 	put_device(&device->dev);
642e5c9469eSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
643f8978bd9SLeon Romanovsky 
644f8978bd9SLeon Romanovsky err_free:
645f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
646f8978bd9SLeon Romanovsky err:
647f8978bd9SLeon Romanovsky 	put_device(&device->dev);
648f8978bd9SLeon Romanovsky 	return err;
649e5c9469eSLeon Romanovsky }
650e5c9469eSLeon Romanovsky 
651b4c598a6SLeon Romanovsky static int _nldev_get_dumpit(struct ib_device *device,
652b4c598a6SLeon Romanovsky 			     struct sk_buff *skb,
653b4c598a6SLeon Romanovsky 			     struct netlink_callback *cb,
654b4c598a6SLeon Romanovsky 			     unsigned int idx)
655b4c598a6SLeon Romanovsky {
656b4c598a6SLeon Romanovsky 	int start = cb->args[0];
657b4c598a6SLeon Romanovsky 	struct nlmsghdr *nlh;
658b4c598a6SLeon Romanovsky 
659b4c598a6SLeon Romanovsky 	if (idx < start)
660b4c598a6SLeon Romanovsky 		return 0;
661b4c598a6SLeon Romanovsky 
662b4c598a6SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
663b4c598a6SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
664b4c598a6SLeon Romanovsky 			0, NLM_F_MULTI);
665b4c598a6SLeon Romanovsky 
666b4c598a6SLeon Romanovsky 	if (fill_dev_info(skb, device)) {
667b4c598a6SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
668b4c598a6SLeon Romanovsky 		goto out;
669b4c598a6SLeon Romanovsky 	}
670b4c598a6SLeon Romanovsky 
671b4c598a6SLeon Romanovsky 	nlmsg_end(skb, nlh);
672b4c598a6SLeon Romanovsky 
673b4c598a6SLeon Romanovsky 	idx++;
674b4c598a6SLeon Romanovsky 
675b4c598a6SLeon Romanovsky out:	cb->args[0] = idx;
676b4c598a6SLeon Romanovsky 	return skb->len;
677b4c598a6SLeon Romanovsky }
678b4c598a6SLeon Romanovsky 
679b4c598a6SLeon Romanovsky static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
680b4c598a6SLeon Romanovsky {
681b4c598a6SLeon Romanovsky 	/*
682b4c598a6SLeon Romanovsky 	 * There is no need to take lock, because
683b4c598a6SLeon Romanovsky 	 * we are relying on ib_core's lists_rwsem
684b4c598a6SLeon Romanovsky 	 */
685b4c598a6SLeon Romanovsky 	return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
686b4c598a6SLeon Romanovsky }
687b4c598a6SLeon Romanovsky 
688c3f66f7bSLeon Romanovsky static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
689c3f66f7bSLeon Romanovsky 			       struct netlink_ext_ack *extack)
690c3f66f7bSLeon Romanovsky {
691c3f66f7bSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
692c3f66f7bSLeon Romanovsky 	struct ib_device *device;
693c3f66f7bSLeon Romanovsky 	struct sk_buff *msg;
694c3f66f7bSLeon Romanovsky 	u32 index;
695c3f66f7bSLeon Romanovsky 	u32 port;
696c3f66f7bSLeon Romanovsky 	int err;
697c3f66f7bSLeon Romanovsky 
698c3f66f7bSLeon Romanovsky 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
699c3f66f7bSLeon Romanovsky 			  nldev_policy, extack);
700287683d0SLeon Romanovsky 	if (err ||
701287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
702287683d0SLeon Romanovsky 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
703c3f66f7bSLeon Romanovsky 		return -EINVAL;
704c3f66f7bSLeon Romanovsky 
705c3f66f7bSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
706f8978bd9SLeon Romanovsky 	device = ib_device_get_by_index(index);
707c3f66f7bSLeon Romanovsky 	if (!device)
708c3f66f7bSLeon Romanovsky 		return -EINVAL;
709c3f66f7bSLeon Romanovsky 
710c3f66f7bSLeon Romanovsky 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
711f8978bd9SLeon Romanovsky 	if (!rdma_is_port_valid(device, port)) {
712f8978bd9SLeon Romanovsky 		err = -EINVAL;
713f8978bd9SLeon Romanovsky 		goto err;
714f8978bd9SLeon Romanovsky 	}
715c3f66f7bSLeon Romanovsky 
716c3f66f7bSLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
717f8978bd9SLeon Romanovsky 	if (!msg) {
718f8978bd9SLeon Romanovsky 		err = -ENOMEM;
719f8978bd9SLeon Romanovsky 		goto err;
720f8978bd9SLeon Romanovsky 	}
721c3f66f7bSLeon Romanovsky 
722c3f66f7bSLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
723c3f66f7bSLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
724c3f66f7bSLeon Romanovsky 			0, 0);
725c3f66f7bSLeon Romanovsky 
7265b2cc79dSLeon Romanovsky 	err = fill_port_info(msg, device, port, sock_net(skb->sk));
727f8978bd9SLeon Romanovsky 	if (err)
728f8978bd9SLeon Romanovsky 		goto err_free;
729c3f66f7bSLeon Romanovsky 
730c3f66f7bSLeon Romanovsky 	nlmsg_end(msg, nlh);
731f8978bd9SLeon Romanovsky 	put_device(&device->dev);
732c3f66f7bSLeon Romanovsky 
733c3f66f7bSLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
734f8978bd9SLeon Romanovsky 
735f8978bd9SLeon Romanovsky err_free:
736f8978bd9SLeon Romanovsky 	nlmsg_free(msg);
737f8978bd9SLeon Romanovsky err:
738f8978bd9SLeon Romanovsky 	put_device(&device->dev);
739f8978bd9SLeon Romanovsky 	return err;
740c3f66f7bSLeon Romanovsky }
741c3f66f7bSLeon Romanovsky 
7427d02f605SLeon Romanovsky static int nldev_port_get_dumpit(struct sk_buff *skb,
7437d02f605SLeon Romanovsky 				 struct netlink_callback *cb)
7447d02f605SLeon Romanovsky {
7457d02f605SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
7467d02f605SLeon Romanovsky 	struct ib_device *device;
7477d02f605SLeon Romanovsky 	int start = cb->args[0];
7487d02f605SLeon Romanovsky 	struct nlmsghdr *nlh;
7497d02f605SLeon Romanovsky 	u32 idx = 0;
7507d02f605SLeon Romanovsky 	u32 ifindex;
7517d02f605SLeon Romanovsky 	int err;
7527d02f605SLeon Romanovsky 	u32 p;
7537d02f605SLeon Romanovsky 
7547d02f605SLeon Romanovsky 	err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
7557d02f605SLeon Romanovsky 			  nldev_policy, NULL);
7567d02f605SLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
7577d02f605SLeon Romanovsky 		return -EINVAL;
7587d02f605SLeon Romanovsky 
7597d02f605SLeon Romanovsky 	ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
760f8978bd9SLeon Romanovsky 	device = ib_device_get_by_index(ifindex);
7617d02f605SLeon Romanovsky 	if (!device)
7627d02f605SLeon Romanovsky 		return -EINVAL;
7637d02f605SLeon Romanovsky 
7647d02f605SLeon Romanovsky 	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
7657d02f605SLeon Romanovsky 		/*
7667d02f605SLeon Romanovsky 		 * The dumpit function returns all information from specific
7677d02f605SLeon Romanovsky 		 * index. This specific index is taken from the netlink
7687d02f605SLeon Romanovsky 		 * messages request sent by user and it is available
7697d02f605SLeon Romanovsky 		 * in cb->args[0].
7707d02f605SLeon Romanovsky 		 *
7717d02f605SLeon Romanovsky 		 * Usually, the user doesn't fill this field and it causes
7727d02f605SLeon Romanovsky 		 * to return everything.
7737d02f605SLeon Romanovsky 		 *
7747d02f605SLeon Romanovsky 		 */
7757d02f605SLeon Romanovsky 		if (idx < start) {
7767d02f605SLeon Romanovsky 			idx++;
7777d02f605SLeon Romanovsky 			continue;
7787d02f605SLeon Romanovsky 		}
7797d02f605SLeon Romanovsky 
7807d02f605SLeon Romanovsky 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
7817d02f605SLeon Romanovsky 				cb->nlh->nlmsg_seq,
7827d02f605SLeon Romanovsky 				RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
7837d02f605SLeon Romanovsky 						 RDMA_NLDEV_CMD_PORT_GET),
7847d02f605SLeon Romanovsky 				0, NLM_F_MULTI);
7857d02f605SLeon Romanovsky 
7865b2cc79dSLeon Romanovsky 		if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
7877d02f605SLeon Romanovsky 			nlmsg_cancel(skb, nlh);
7887d02f605SLeon Romanovsky 			goto out;
7897d02f605SLeon Romanovsky 		}
7907d02f605SLeon Romanovsky 		idx++;
7917d02f605SLeon Romanovsky 		nlmsg_end(skb, nlh);
7927d02f605SLeon Romanovsky 	}
7937d02f605SLeon Romanovsky 
794f8978bd9SLeon Romanovsky out:
795f8978bd9SLeon Romanovsky 	put_device(&device->dev);
796f8978bd9SLeon Romanovsky 	cb->args[0] = idx;
7977d02f605SLeon Romanovsky 	return skb->len;
7987d02f605SLeon Romanovsky }
7997d02f605SLeon Romanovsky 
800bf3c5a93SLeon Romanovsky static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
801bf3c5a93SLeon Romanovsky 			      struct netlink_ext_ack *extack)
802bf3c5a93SLeon Romanovsky {
803bf3c5a93SLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
804bf3c5a93SLeon Romanovsky 	struct ib_device *device;
805bf3c5a93SLeon Romanovsky 	struct sk_buff *msg;
806bf3c5a93SLeon Romanovsky 	u32 index;
807bf3c5a93SLeon Romanovsky 	int ret;
808bf3c5a93SLeon Romanovsky 
809bf3c5a93SLeon Romanovsky 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
810bf3c5a93SLeon Romanovsky 			  nldev_policy, extack);
811bf3c5a93SLeon Romanovsky 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
812bf3c5a93SLeon Romanovsky 		return -EINVAL;
813bf3c5a93SLeon Romanovsky 
814bf3c5a93SLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
815bf3c5a93SLeon Romanovsky 	device = ib_device_get_by_index(index);
816bf3c5a93SLeon Romanovsky 	if (!device)
817bf3c5a93SLeon Romanovsky 		return -EINVAL;
818bf3c5a93SLeon Romanovsky 
819bf3c5a93SLeon Romanovsky 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
820f34727a1SDan Carpenter 	if (!msg) {
821f34727a1SDan Carpenter 		ret = -ENOMEM;
822bf3c5a93SLeon Romanovsky 		goto err;
823f34727a1SDan Carpenter 	}
824bf3c5a93SLeon Romanovsky 
825bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
826bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
827bf3c5a93SLeon Romanovsky 			0, 0);
828bf3c5a93SLeon Romanovsky 
829bf3c5a93SLeon Romanovsky 	ret = fill_res_info(msg, device);
830bf3c5a93SLeon Romanovsky 	if (ret)
831bf3c5a93SLeon Romanovsky 		goto err_free;
832bf3c5a93SLeon Romanovsky 
833bf3c5a93SLeon Romanovsky 	nlmsg_end(msg, nlh);
834bf3c5a93SLeon Romanovsky 	put_device(&device->dev);
835bf3c5a93SLeon Romanovsky 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
836bf3c5a93SLeon Romanovsky 
837bf3c5a93SLeon Romanovsky err_free:
838bf3c5a93SLeon Romanovsky 	nlmsg_free(msg);
839bf3c5a93SLeon Romanovsky err:
840bf3c5a93SLeon Romanovsky 	put_device(&device->dev);
841bf3c5a93SLeon Romanovsky 	return ret;
842bf3c5a93SLeon Romanovsky }
843bf3c5a93SLeon Romanovsky 
844bf3c5a93SLeon Romanovsky static int _nldev_res_get_dumpit(struct ib_device *device,
845bf3c5a93SLeon Romanovsky 				 struct sk_buff *skb,
846bf3c5a93SLeon Romanovsky 				 struct netlink_callback *cb,
847bf3c5a93SLeon Romanovsky 				 unsigned int idx)
848bf3c5a93SLeon Romanovsky {
849bf3c5a93SLeon Romanovsky 	int start = cb->args[0];
850bf3c5a93SLeon Romanovsky 	struct nlmsghdr *nlh;
851bf3c5a93SLeon Romanovsky 
852bf3c5a93SLeon Romanovsky 	if (idx < start)
853bf3c5a93SLeon Romanovsky 		return 0;
854bf3c5a93SLeon Romanovsky 
855bf3c5a93SLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
856bf3c5a93SLeon Romanovsky 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
857bf3c5a93SLeon Romanovsky 			0, NLM_F_MULTI);
858bf3c5a93SLeon Romanovsky 
859bf3c5a93SLeon Romanovsky 	if (fill_res_info(skb, device)) {
860bf3c5a93SLeon Romanovsky 		nlmsg_cancel(skb, nlh);
861bf3c5a93SLeon Romanovsky 		goto out;
862bf3c5a93SLeon Romanovsky 	}
863bf3c5a93SLeon Romanovsky 
864bf3c5a93SLeon Romanovsky 	nlmsg_end(skb, nlh);
865bf3c5a93SLeon Romanovsky 
866bf3c5a93SLeon Romanovsky 	idx++;
867bf3c5a93SLeon Romanovsky 
868bf3c5a93SLeon Romanovsky out:
869bf3c5a93SLeon Romanovsky 	cb->args[0] = idx;
870bf3c5a93SLeon Romanovsky 	return skb->len;
871bf3c5a93SLeon Romanovsky }
872bf3c5a93SLeon Romanovsky 
873bf3c5a93SLeon Romanovsky static int nldev_res_get_dumpit(struct sk_buff *skb,
874bf3c5a93SLeon Romanovsky 				struct netlink_callback *cb)
875bf3c5a93SLeon Romanovsky {
876bf3c5a93SLeon Romanovsky 	return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
877bf3c5a93SLeon Romanovsky }
878bf3c5a93SLeon Romanovsky 
879d12ff624SSteve Wise struct nldev_fill_res_entry {
880d12ff624SSteve Wise 	int (*fill_res_func)(struct sk_buff *msg, struct netlink_callback *cb,
881d12ff624SSteve Wise 			     struct rdma_restrack_entry *res, u32 port);
882d12ff624SSteve Wise 	enum rdma_nldev_attr nldev_attr;
883d12ff624SSteve Wise 	enum rdma_nldev_command nldev_cmd;
884d12ff624SSteve Wise };
885d12ff624SSteve Wise 
886d12ff624SSteve Wise static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
887d12ff624SSteve Wise 	[RDMA_RESTRACK_QP] = {
888d12ff624SSteve Wise 		.fill_res_func = fill_res_qp_entry,
889d12ff624SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
890d12ff624SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
891d12ff624SSteve Wise 	},
89200313983SSteve Wise 	[RDMA_RESTRACK_CM_ID] = {
89300313983SSteve Wise 		.fill_res_func = fill_res_cm_id_entry,
89400313983SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
89500313983SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
89600313983SSteve Wise 	},
897a34fc089SSteve Wise 	[RDMA_RESTRACK_CQ] = {
898a34fc089SSteve Wise 		.fill_res_func = fill_res_cq_entry,
899a34fc089SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
900a34fc089SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
901a34fc089SSteve Wise 	},
902fccec5b8SSteve Wise 	[RDMA_RESTRACK_MR] = {
903fccec5b8SSteve Wise 		.fill_res_func = fill_res_mr_entry,
904fccec5b8SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
905fccec5b8SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
906fccec5b8SSteve Wise 	},
90729cf1351SSteve Wise 	[RDMA_RESTRACK_PD] = {
90829cf1351SSteve Wise 		.fill_res_func = fill_res_pd_entry,
90929cf1351SSteve Wise 		.nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
91029cf1351SSteve Wise 		.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
91129cf1351SSteve Wise 	},
912d12ff624SSteve Wise };
913d12ff624SSteve Wise 
914d12ff624SSteve Wise static int res_get_common_dumpit(struct sk_buff *skb,
915d12ff624SSteve Wise 				 struct netlink_callback *cb,
916d12ff624SSteve Wise 				 enum rdma_restrack_type res_type)
917b5fa635aSLeon Romanovsky {
918d12ff624SSteve Wise 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
919b5fa635aSLeon Romanovsky 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
920b5fa635aSLeon Romanovsky 	struct rdma_restrack_entry *res;
921b5fa635aSLeon Romanovsky 	int err, ret = 0, idx = 0;
922b5fa635aSLeon Romanovsky 	struct nlattr *table_attr;
923b5fa635aSLeon Romanovsky 	struct ib_device *device;
924b5fa635aSLeon Romanovsky 	int start = cb->args[0];
925b5fa635aSLeon Romanovsky 	struct nlmsghdr *nlh;
926b5fa635aSLeon Romanovsky 	u32 index, port = 0;
927d12ff624SSteve Wise 	bool filled = false;
928b5fa635aSLeon Romanovsky 
929b5fa635aSLeon Romanovsky 	err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
930b5fa635aSLeon Romanovsky 			  nldev_policy, NULL);
931b5fa635aSLeon Romanovsky 	/*
932d12ff624SSteve Wise 	 * Right now, we are expecting the device index to get res information,
933b5fa635aSLeon Romanovsky 	 * but it is possible to extend this code to return all devices in
934b5fa635aSLeon Romanovsky 	 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
935b5fa635aSLeon Romanovsky 	 * if it doesn't exist, we will iterate over all devices.
936b5fa635aSLeon Romanovsky 	 *
937b5fa635aSLeon Romanovsky 	 * But it is not needed for now.
938b5fa635aSLeon Romanovsky 	 */
939b5fa635aSLeon Romanovsky 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
940b5fa635aSLeon Romanovsky 		return -EINVAL;
941b5fa635aSLeon Romanovsky 
942b5fa635aSLeon Romanovsky 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
943b5fa635aSLeon Romanovsky 	device = ib_device_get_by_index(index);
944b5fa635aSLeon Romanovsky 	if (!device)
945b5fa635aSLeon Romanovsky 		return -EINVAL;
946b5fa635aSLeon Romanovsky 
947b5fa635aSLeon Romanovsky 	/*
948b5fa635aSLeon Romanovsky 	 * If no PORT_INDEX is supplied, we will return all QPs from that device
949b5fa635aSLeon Romanovsky 	 */
950b5fa635aSLeon Romanovsky 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
951b5fa635aSLeon Romanovsky 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
952b5fa635aSLeon Romanovsky 		if (!rdma_is_port_valid(device, port)) {
953b5fa635aSLeon Romanovsky 			ret = -EINVAL;
954b5fa635aSLeon Romanovsky 			goto err_index;
955b5fa635aSLeon Romanovsky 		}
956b5fa635aSLeon Romanovsky 	}
957b5fa635aSLeon Romanovsky 
958b5fa635aSLeon Romanovsky 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
959d12ff624SSteve Wise 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
960b5fa635aSLeon Romanovsky 			0, NLM_F_MULTI);
961b5fa635aSLeon Romanovsky 
962b5fa635aSLeon Romanovsky 	if (fill_nldev_handle(skb, device)) {
963b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
964b5fa635aSLeon Romanovsky 		goto err;
965b5fa635aSLeon Romanovsky 	}
966b5fa635aSLeon Romanovsky 
967d12ff624SSteve Wise 	table_attr = nla_nest_start(skb, fe->nldev_attr);
968b5fa635aSLeon Romanovsky 	if (!table_attr) {
969b5fa635aSLeon Romanovsky 		ret = -EMSGSIZE;
970b5fa635aSLeon Romanovsky 		goto err;
971b5fa635aSLeon Romanovsky 	}
972b5fa635aSLeon Romanovsky 
973b5fa635aSLeon Romanovsky 	down_read(&device->res.rwsem);
974d12ff624SSteve Wise 	hash_for_each_possible(device->res.hash, res, node, res_type) {
975b5fa635aSLeon Romanovsky 		if (idx < start)
976b5fa635aSLeon Romanovsky 			goto next;
977b5fa635aSLeon Romanovsky 
978b5fa635aSLeon Romanovsky 		if ((rdma_is_kernel_res(res) &&
979b5fa635aSLeon Romanovsky 		     task_active_pid_ns(current) != &init_pid_ns) ||
980d12ff624SSteve Wise 		    (!rdma_is_kernel_res(res) && task_active_pid_ns(current) !=
981d12ff624SSteve Wise 		     task_active_pid_ns(res->task)))
982b5fa635aSLeon Romanovsky 			/*
983d12ff624SSteve Wise 			 * 1. Kern resources should be visible in init
984d12ff624SSteve Wise 			 *    namspace only
985d12ff624SSteve Wise 			 * 2. Present only resources visible in the current
986d12ff624SSteve Wise 			 *    namespace
987b5fa635aSLeon Romanovsky 			 */
988b5fa635aSLeon Romanovsky 			goto next;
989b5fa635aSLeon Romanovsky 
990b5fa635aSLeon Romanovsky 		if (!rdma_restrack_get(res))
991b5fa635aSLeon Romanovsky 			/*
992b5fa635aSLeon Romanovsky 			 * Resource is under release now, but we are not
993b5fa635aSLeon Romanovsky 			 * relesing lock now, so it will be released in
994b5fa635aSLeon Romanovsky 			 * our next pass, once we will get ->next pointer.
995b5fa635aSLeon Romanovsky 			 */
996b5fa635aSLeon Romanovsky 			goto next;
997b5fa635aSLeon Romanovsky 
998d12ff624SSteve Wise 		filled = true;
999b5fa635aSLeon Romanovsky 
1000b5fa635aSLeon Romanovsky 		up_read(&device->res.rwsem);
1001d12ff624SSteve Wise 		ret = fe->fill_res_func(skb, cb, res, port);
1002b5fa635aSLeon Romanovsky 		down_read(&device->res.rwsem);
1003b5fa635aSLeon Romanovsky 		/*
1004b5fa635aSLeon Romanovsky 		 * Return resource back, but it won't be released till
1005b5fa635aSLeon Romanovsky 		 * the &device->res.rwsem will be released for write.
1006b5fa635aSLeon Romanovsky 		 */
1007b5fa635aSLeon Romanovsky 		rdma_restrack_put(res);
1008b5fa635aSLeon Romanovsky 
1009b5fa635aSLeon Romanovsky 		if (ret == -EMSGSIZE)
1010b5fa635aSLeon Romanovsky 			/*
1011b5fa635aSLeon Romanovsky 			 * There is a chance to optimize here.
1012b5fa635aSLeon Romanovsky 			 * It can be done by using list_prepare_entry
1013b5fa635aSLeon Romanovsky 			 * and list_for_each_entry_continue afterwards.
1014b5fa635aSLeon Romanovsky 			 */
1015b5fa635aSLeon Romanovsky 			break;
1016b5fa635aSLeon Romanovsky 		if (ret)
1017b5fa635aSLeon Romanovsky 			goto res_err;
1018b5fa635aSLeon Romanovsky next:		idx++;
1019b5fa635aSLeon Romanovsky 	}
1020b5fa635aSLeon Romanovsky 	up_read(&device->res.rwsem);
1021b5fa635aSLeon Romanovsky 
1022b5fa635aSLeon Romanovsky 	nla_nest_end(skb, table_attr);
1023b5fa635aSLeon Romanovsky 	nlmsg_end(skb, nlh);
1024b5fa635aSLeon Romanovsky 	cb->args[0] = idx;
1025b5fa635aSLeon Romanovsky 
1026b5fa635aSLeon Romanovsky 	/*
1027d12ff624SSteve Wise 	 * No more entries to fill, cancel the message and
1028b5fa635aSLeon Romanovsky 	 * return 0 to mark end of dumpit.
1029b5fa635aSLeon Romanovsky 	 */
1030d12ff624SSteve Wise 	if (!filled)
1031b5fa635aSLeon Romanovsky 		goto err;
1032b5fa635aSLeon Romanovsky 
1033b5fa635aSLeon Romanovsky 	put_device(&device->dev);
1034b5fa635aSLeon Romanovsky 	return skb->len;
1035b5fa635aSLeon Romanovsky 
1036b5fa635aSLeon Romanovsky res_err:
1037b5fa635aSLeon Romanovsky 	nla_nest_cancel(skb, table_attr);
1038b5fa635aSLeon Romanovsky 	up_read(&device->res.rwsem);
1039b5fa635aSLeon Romanovsky 
1040b5fa635aSLeon Romanovsky err:
1041b5fa635aSLeon Romanovsky 	nlmsg_cancel(skb, nlh);
1042b5fa635aSLeon Romanovsky 
1043b5fa635aSLeon Romanovsky err_index:
1044b5fa635aSLeon Romanovsky 	put_device(&device->dev);
1045b5fa635aSLeon Romanovsky 	return ret;
1046b5fa635aSLeon Romanovsky }
1047b5fa635aSLeon Romanovsky 
1048d12ff624SSteve Wise static int nldev_res_get_qp_dumpit(struct sk_buff *skb,
1049d12ff624SSteve Wise 				   struct netlink_callback *cb)
1050d12ff624SSteve Wise {
1051d12ff624SSteve Wise 	return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_QP);
1052d12ff624SSteve Wise }
1053d12ff624SSteve Wise 
105400313983SSteve Wise static int nldev_res_get_cm_id_dumpit(struct sk_buff *skb,
105500313983SSteve Wise 				      struct netlink_callback *cb)
105600313983SSteve Wise {
105700313983SSteve Wise 	return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CM_ID);
105800313983SSteve Wise }
105900313983SSteve Wise 
1060a34fc089SSteve Wise static int nldev_res_get_cq_dumpit(struct sk_buff *skb,
1061a34fc089SSteve Wise 				   struct netlink_callback *cb)
1062a34fc089SSteve Wise {
1063a34fc089SSteve Wise 	return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CQ);
1064a34fc089SSteve Wise }
1065a34fc089SSteve Wise 
1066fccec5b8SSteve Wise static int nldev_res_get_mr_dumpit(struct sk_buff *skb,
1067fccec5b8SSteve Wise 				   struct netlink_callback *cb)
1068fccec5b8SSteve Wise {
1069fccec5b8SSteve Wise 	return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR);
1070fccec5b8SSteve Wise }
1071fccec5b8SSteve Wise 
107229cf1351SSteve Wise static int nldev_res_get_pd_dumpit(struct sk_buff *skb,
107329cf1351SSteve Wise 				   struct netlink_callback *cb)
107429cf1351SSteve Wise {
107529cf1351SSteve Wise 	return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_PD);
107629cf1351SSteve Wise }
107729cf1351SSteve Wise 
1078d0e312feSLeon Romanovsky static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
1079b4c598a6SLeon Romanovsky 	[RDMA_NLDEV_CMD_GET] = {
1080e5c9469eSLeon Romanovsky 		.doit = nldev_get_doit,
1081b4c598a6SLeon Romanovsky 		.dump = nldev_get_dumpit,
1082b4c598a6SLeon Romanovsky 	},
10837d02f605SLeon Romanovsky 	[RDMA_NLDEV_CMD_PORT_GET] = {
1084c3f66f7bSLeon Romanovsky 		.doit = nldev_port_get_doit,
10857d02f605SLeon Romanovsky 		.dump = nldev_port_get_dumpit,
10867d02f605SLeon Romanovsky 	},
1087bf3c5a93SLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_GET] = {
1088bf3c5a93SLeon Romanovsky 		.doit = nldev_res_get_doit,
1089bf3c5a93SLeon Romanovsky 		.dump = nldev_res_get_dumpit,
1090bf3c5a93SLeon Romanovsky 	},
1091b5fa635aSLeon Romanovsky 	[RDMA_NLDEV_CMD_RES_QP_GET] = {
1092b5fa635aSLeon Romanovsky 		.dump = nldev_res_get_qp_dumpit,
1093b5fa635aSLeon Romanovsky 		/*
1094b5fa635aSLeon Romanovsky 		 * .doit is not implemented yet for two reasons:
1095b5fa635aSLeon Romanovsky 		 * 1. It is not needed yet.
1096b5fa635aSLeon Romanovsky 		 * 2. There is a need to provide identifier, while it is easy
1097b5fa635aSLeon Romanovsky 		 * for the QPs (device index + port index + LQPN), it is not
1098b5fa635aSLeon Romanovsky 		 * the case for the rest of resources (PD and CQ). Because it
1099b5fa635aSLeon Romanovsky 		 * is better to provide similar interface for all resources,
1100b5fa635aSLeon Romanovsky 		 * let's wait till we will have other resources implemented
1101b5fa635aSLeon Romanovsky 		 * too.
1102b5fa635aSLeon Romanovsky 		 */
1103b5fa635aSLeon Romanovsky 	},
110400313983SSteve Wise 	[RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
110500313983SSteve Wise 		.dump = nldev_res_get_cm_id_dumpit,
110600313983SSteve Wise 	},
1107a34fc089SSteve Wise 	[RDMA_NLDEV_CMD_RES_CQ_GET] = {
1108a34fc089SSteve Wise 		.dump = nldev_res_get_cq_dumpit,
1109a34fc089SSteve Wise 	},
1110fccec5b8SSteve Wise 	[RDMA_NLDEV_CMD_RES_MR_GET] = {
1111fccec5b8SSteve Wise 		.dump = nldev_res_get_mr_dumpit,
1112fccec5b8SSteve Wise 	},
111329cf1351SSteve Wise 	[RDMA_NLDEV_CMD_RES_PD_GET] = {
111429cf1351SSteve Wise 		.dump = nldev_res_get_pd_dumpit,
111529cf1351SSteve Wise 	},
1116b4c598a6SLeon Romanovsky };
1117b4c598a6SLeon Romanovsky 
11186c80b41aSLeon Romanovsky void __init nldev_init(void)
11196c80b41aSLeon Romanovsky {
1120b4c598a6SLeon Romanovsky 	rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
11216c80b41aSLeon Romanovsky }
11226c80b41aSLeon Romanovsky 
11236c80b41aSLeon Romanovsky void __exit nldev_exit(void)
11246c80b41aSLeon Romanovsky {
11256c80b41aSLeon Romanovsky 	rdma_nl_unregister(RDMA_NL_NLDEV);
11266c80b41aSLeon Romanovsky }
1127e3bf14bdSJason Gunthorpe 
1128e3bf14bdSJason Gunthorpe MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
1129