xref: /openbmc/linux/drivers/infiniband/core/nldev.c (revision 74ce1896)
1 /*
2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Neither the names of the copyright holders nor the names of its
13  *    contributors may be used to endorse or promote products derived from
14  *    this software without specific prior written permission.
15  *
16  * Alternatively, this software may be distributed under the terms of the
17  * GNU General Public License ("GPL") version 2 as published by the Free
18  * Software Foundation.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <linux/module.h>
34 #include <net/netlink.h>
35 #include <rdma/rdma_netlink.h>
36 
37 #include "core_priv.h"
38 
39 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
40 	[RDMA_NLDEV_ATTR_DEV_INDEX]     = { .type = NLA_U32 },
41 	[RDMA_NLDEV_ATTR_DEV_NAME]	= { .type = NLA_NUL_STRING,
42 					    .len = IB_DEVICE_NAME_MAX - 1},
43 	[RDMA_NLDEV_ATTR_PORT_INDEX]	= { .type = NLA_U32 },
44 	[RDMA_NLDEV_ATTR_FW_VERSION]	= { .type = NLA_NUL_STRING,
45 					    .len = IB_FW_VERSION_NAME_MAX - 1},
46 	[RDMA_NLDEV_ATTR_NODE_GUID]	= { .type = NLA_U64 },
47 	[RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
48 	[RDMA_NLDEV_ATTR_SUBNET_PREFIX]	= { .type = NLA_U64 },
49 	[RDMA_NLDEV_ATTR_LID]		= { .type = NLA_U32 },
50 	[RDMA_NLDEV_ATTR_SM_LID]	= { .type = NLA_U32 },
51 	[RDMA_NLDEV_ATTR_LMC]		= { .type = NLA_U8 },
52 	[RDMA_NLDEV_ATTR_PORT_STATE]	= { .type = NLA_U8 },
53 	[RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
54 	[RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
55 };
56 
57 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
58 {
59 	char fw[IB_FW_VERSION_NAME_MAX];
60 
61 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
62 		return -EMSGSIZE;
63 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
64 		return -EMSGSIZE;
65 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
66 		return -EMSGSIZE;
67 
68 	BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
69 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
70 			      device->attrs.device_cap_flags, 0))
71 		return -EMSGSIZE;
72 
73 	ib_get_device_fw_str(device, fw);
74 	/* Device without FW has strlen(fw) */
75 	if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
76 		return -EMSGSIZE;
77 
78 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
79 			      be64_to_cpu(device->node_guid), 0))
80 		return -EMSGSIZE;
81 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
82 			      be64_to_cpu(device->attrs.sys_image_guid), 0))
83 		return -EMSGSIZE;
84 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
85 		return -EMSGSIZE;
86 	return 0;
87 }
88 
89 static int fill_port_info(struct sk_buff *msg,
90 			  struct ib_device *device, u32 port)
91 {
92 	struct ib_port_attr attr;
93 	int ret;
94 
95 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
96 		return -EMSGSIZE;
97 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
98 		return -EMSGSIZE;
99 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
100 		return -EMSGSIZE;
101 
102 	ret = ib_query_port(device, port, &attr);
103 	if (ret)
104 		return ret;
105 
106 	BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
107 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
108 			      (u64)attr.port_cap_flags, 0))
109 		return -EMSGSIZE;
110 	if (rdma_protocol_ib(device, port) &&
111 	    nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
112 			      attr.subnet_prefix, 0))
113 		return -EMSGSIZE;
114 	if (rdma_protocol_ib(device, port)) {
115 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
116 			return -EMSGSIZE;
117 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
118 			return -EMSGSIZE;
119 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
120 			return -EMSGSIZE;
121 	}
122 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
123 		return -EMSGSIZE;
124 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
125 		return -EMSGSIZE;
126 	return 0;
127 }
128 
129 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
130 			  struct netlink_ext_ack *extack)
131 {
132 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
133 	struct ib_device *device;
134 	struct sk_buff *msg;
135 	u32 index;
136 	int err;
137 
138 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
139 			  nldev_policy, extack);
140 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
141 		return -EINVAL;
142 
143 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
144 
145 	device = __ib_device_get_by_index(index);
146 	if (!device)
147 		return -EINVAL;
148 
149 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
150 	if (!msg)
151 		return -ENOMEM;
152 
153 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
154 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
155 			0, 0);
156 
157 	err = fill_dev_info(msg, device);
158 	if (err) {
159 		nlmsg_free(msg);
160 		return err;
161 	}
162 
163 	nlmsg_end(msg, nlh);
164 
165 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
166 }
167 
168 static int _nldev_get_dumpit(struct ib_device *device,
169 			     struct sk_buff *skb,
170 			     struct netlink_callback *cb,
171 			     unsigned int idx)
172 {
173 	int start = cb->args[0];
174 	struct nlmsghdr *nlh;
175 
176 	if (idx < start)
177 		return 0;
178 
179 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
180 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
181 			0, NLM_F_MULTI);
182 
183 	if (fill_dev_info(skb, device)) {
184 		nlmsg_cancel(skb, nlh);
185 		goto out;
186 	}
187 
188 	nlmsg_end(skb, nlh);
189 
190 	idx++;
191 
192 out:	cb->args[0] = idx;
193 	return skb->len;
194 }
195 
196 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
197 {
198 	/*
199 	 * There is no need to take lock, because
200 	 * we are relying on ib_core's lists_rwsem
201 	 */
202 	return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
203 }
204 
205 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
206 			       struct netlink_ext_ack *extack)
207 {
208 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
209 	struct ib_device *device;
210 	struct sk_buff *msg;
211 	u32 index;
212 	u32 port;
213 	int err;
214 
215 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
216 			  nldev_policy, extack);
217 	if (err || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
218 		return -EINVAL;
219 
220 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
221 	device = __ib_device_get_by_index(index);
222 	if (!device)
223 		return -EINVAL;
224 
225 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
226 	if (!rdma_is_port_valid(device, port))
227 		return -EINVAL;
228 
229 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
230 	if (!msg)
231 		return -ENOMEM;
232 
233 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
234 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
235 			0, 0);
236 
237 	err = fill_port_info(msg, device, port);
238 	if (err) {
239 		nlmsg_free(msg);
240 		return err;
241 	}
242 
243 	nlmsg_end(msg, nlh);
244 
245 	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
246 }
247 
248 static int nldev_port_get_dumpit(struct sk_buff *skb,
249 				 struct netlink_callback *cb)
250 {
251 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
252 	struct ib_device *device;
253 	int start = cb->args[0];
254 	struct nlmsghdr *nlh;
255 	u32 idx = 0;
256 	u32 ifindex;
257 	int err;
258 	u32 p;
259 
260 	err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
261 			  nldev_policy, NULL);
262 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
263 		return -EINVAL;
264 
265 	ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
266 	device = __ib_device_get_by_index(ifindex);
267 	if (!device)
268 		return -EINVAL;
269 
270 	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
271 		/*
272 		 * The dumpit function returns all information from specific
273 		 * index. This specific index is taken from the netlink
274 		 * messages request sent by user and it is available
275 		 * in cb->args[0].
276 		 *
277 		 * Usually, the user doesn't fill this field and it causes
278 		 * to return everything.
279 		 *
280 		 */
281 		if (idx < start) {
282 			idx++;
283 			continue;
284 		}
285 
286 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
287 				cb->nlh->nlmsg_seq,
288 				RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
289 						 RDMA_NLDEV_CMD_PORT_GET),
290 				0, NLM_F_MULTI);
291 
292 		if (fill_port_info(skb, device, p)) {
293 			nlmsg_cancel(skb, nlh);
294 			goto out;
295 		}
296 		idx++;
297 		nlmsg_end(skb, nlh);
298 	}
299 
300 out:	cb->args[0] = idx;
301 	return skb->len;
302 }
303 
304 static const struct rdma_nl_cbs nldev_cb_table[] = {
305 	[RDMA_NLDEV_CMD_GET] = {
306 		.doit = nldev_get_doit,
307 		.dump = nldev_get_dumpit,
308 	},
309 	[RDMA_NLDEV_CMD_PORT_GET] = {
310 		.doit = nldev_port_get_doit,
311 		.dump = nldev_port_get_dumpit,
312 	},
313 };
314 
315 void __init nldev_init(void)
316 {
317 	rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
318 }
319 
320 void __exit nldev_exit(void)
321 {
322 	rdma_nl_unregister(RDMA_NL_NLDEV);
323 }
324 
325 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
326