1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
38 
39 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
40 				   u16 vport, u32 *out, int outlen)
41 {
42 	int err;
43 	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
44 
45 	memset(in, 0, sizeof(in));
46 
47 	MLX5_SET(query_vport_state_in, in, opcode,
48 		 MLX5_CMD_OP_QUERY_VPORT_STATE);
49 	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
50 	MLX5_SET(query_vport_state_in, in, vport_number, vport);
51 	if (vport)
52 		MLX5_SET(query_vport_state_in, in, other_vport, 1);
53 
54 	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
55 	if (err)
56 		mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
57 
58 	return err;
59 }
60 
61 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
62 {
63 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
64 
65 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
66 
67 	return MLX5_GET(query_vport_state_out, out, state);
68 }
69 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
70 
71 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
72 {
73 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
74 
75 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
76 
77 	return MLX5_GET(query_vport_state_out, out, admin_state);
78 }
79 EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
80 
81 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
82 				  u16 vport, u8 state)
83 {
84 	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
85 	u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
86 	int err;
87 
88 	memset(in, 0, sizeof(in));
89 
90 	MLX5_SET(modify_vport_state_in, in, opcode,
91 		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
92 	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
93 	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
94 
95 	if (vport)
96 		MLX5_SET(modify_vport_state_in, in, other_vport, 1);
97 
98 	MLX5_SET(modify_vport_state_in, in, admin_state, state);
99 
100 	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
101 					 sizeof(out));
102 	if (err)
103 		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
104 
105 	return err;
106 }
107 EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
108 
109 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
110 					u32 *out, int outlen)
111 {
112 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
113 
114 	memset(in, 0, sizeof(in));
115 
116 	MLX5_SET(query_nic_vport_context_in, in, opcode,
117 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
118 
119 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
120 	if (vport)
121 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
122 
123 	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
124 }
125 
126 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
127 					 int inlen)
128 {
129 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
130 
131 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
132 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
133 
134 	memset(out, 0, sizeof(out));
135 	return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
136 }
137 
138 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
139 				     u16 vport, u8 *addr)
140 {
141 	u32 *out;
142 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
143 	u8 *out_addr;
144 	int err;
145 
146 	out = mlx5_vzalloc(outlen);
147 	if (!out)
148 		return -ENOMEM;
149 
150 	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
151 				nic_vport_context.permanent_address);
152 
153 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
154 	if (!err)
155 		ether_addr_copy(addr, &out_addr[2]);
156 
157 	kvfree(out);
158 	return err;
159 }
160 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
161 
162 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
163 				      u16 vport, u8 *addr)
164 {
165 	void *in;
166 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
167 	int err;
168 	void *nic_vport_ctx;
169 	u8 *perm_mac;
170 
171 	in = mlx5_vzalloc(inlen);
172 	if (!in) {
173 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
174 		return -ENOMEM;
175 	}
176 
177 	MLX5_SET(modify_nic_vport_context_in, in,
178 		 field_select.permanent_address, 1);
179 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
180 
181 	if (vport)
182 		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
183 
184 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
185 				     in, nic_vport_context);
186 	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
187 				permanent_address);
188 
189 	ether_addr_copy(&perm_mac[2], addr);
190 
191 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
192 
193 	kvfree(in);
194 
195 	return err;
196 }
197 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
198 
199 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
200 {
201 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
202 	u32 *out;
203 	int err;
204 
205 	out = mlx5_vzalloc(outlen);
206 	if (!out)
207 		return -ENOMEM;
208 
209 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
210 	if (!err)
211 		*mtu = MLX5_GET(query_nic_vport_context_out, out,
212 				nic_vport_context.mtu);
213 
214 	kvfree(out);
215 	return err;
216 }
217 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
218 
219 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
220 {
221 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
222 	void *in;
223 	int err;
224 
225 	in = mlx5_vzalloc(inlen);
226 	if (!in)
227 		return -ENOMEM;
228 
229 	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
230 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
231 
232 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
233 
234 	kvfree(in);
235 	return err;
236 }
237 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
238 
239 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
240 				  u32 vport,
241 				  enum mlx5_list_type list_type,
242 				  u8 addr_list[][ETH_ALEN],
243 				  int *list_size)
244 {
245 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
246 	void *nic_vport_ctx;
247 	int max_list_size;
248 	int req_list_size;
249 	int out_sz;
250 	void *out;
251 	int err;
252 	int i;
253 
254 	req_list_size = *list_size;
255 
256 	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
257 		1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
258 		1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
259 
260 	if (req_list_size > max_list_size) {
261 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
262 			       req_list_size, max_list_size);
263 		req_list_size = max_list_size;
264 	}
265 
266 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
267 			req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
268 
269 	memset(in, 0, sizeof(in));
270 	out = kzalloc(out_sz, GFP_KERNEL);
271 	if (!out)
272 		return -ENOMEM;
273 
274 	MLX5_SET(query_nic_vport_context_in, in, opcode,
275 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
276 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
277 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
278 
279 	if (vport)
280 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
281 
282 	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
283 	if (err)
284 		goto out;
285 
286 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
287 				     nic_vport_context);
288 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
289 				 allowed_list_size);
290 
291 	*list_size = req_list_size;
292 	for (i = 0; i < req_list_size; i++) {
293 		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
294 					nic_vport_ctx,
295 					current_uc_mac_address[i]) + 2;
296 		ether_addr_copy(addr_list[i], mac_addr);
297 	}
298 out:
299 	kfree(out);
300 	return err;
301 }
302 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
303 
304 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
305 				   enum mlx5_list_type list_type,
306 				   u8 addr_list[][ETH_ALEN],
307 				   int list_size)
308 {
309 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
310 	void *nic_vport_ctx;
311 	int max_list_size;
312 	int in_sz;
313 	void *in;
314 	int err;
315 	int i;
316 
317 	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
318 		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
319 		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
320 
321 	if (list_size > max_list_size)
322 		return -ENOSPC;
323 
324 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
325 		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
326 
327 	memset(out, 0, sizeof(out));
328 	in = kzalloc(in_sz, GFP_KERNEL);
329 	if (!in)
330 		return -ENOMEM;
331 
332 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
333 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
334 	MLX5_SET(modify_nic_vport_context_in, in,
335 		 field_select.addresses_list, 1);
336 
337 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
338 				     nic_vport_context);
339 
340 	MLX5_SET(nic_vport_context, nic_vport_ctx,
341 		 allowed_list_type, list_type);
342 	MLX5_SET(nic_vport_context, nic_vport_ctx,
343 		 allowed_list_size, list_size);
344 
345 	for (i = 0; i < list_size; i++) {
346 		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
347 					    nic_vport_ctx,
348 					    current_uc_mac_address[i]) + 2;
349 		ether_addr_copy(curr_mac, addr_list[i]);
350 	}
351 
352 	err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
353 	kfree(in);
354 	return err;
355 }
356 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
357 
358 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
359 			       u32 vport,
360 			       u16 vlans[],
361 			       int *size)
362 {
363 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
364 	void *nic_vport_ctx;
365 	int req_list_size;
366 	int max_list_size;
367 	int out_sz;
368 	void *out;
369 	int err;
370 	int i;
371 
372 	req_list_size = *size;
373 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
374 	if (req_list_size > max_list_size) {
375 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
376 			       req_list_size, max_list_size);
377 		req_list_size = max_list_size;
378 	}
379 
380 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
381 			req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
382 
383 	memset(in, 0, sizeof(in));
384 	out = kzalloc(out_sz, GFP_KERNEL);
385 	if (!out)
386 		return -ENOMEM;
387 
388 	MLX5_SET(query_nic_vport_context_in, in, opcode,
389 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
390 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
391 		 MLX5_NVPRT_LIST_TYPE_VLAN);
392 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
393 
394 	if (vport)
395 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
396 
397 	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
398 	if (err)
399 		goto out;
400 
401 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
402 				     nic_vport_context);
403 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
404 				 allowed_list_size);
405 
406 	*size = req_list_size;
407 	for (i = 0; i < req_list_size; i++) {
408 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
409 					       nic_vport_ctx,
410 					       current_uc_mac_address[i]);
411 		vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
412 	}
413 out:
414 	kfree(out);
415 	return err;
416 }
417 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
418 
419 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
420 				u16 vlans[],
421 				int list_size)
422 {
423 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
424 	void *nic_vport_ctx;
425 	int max_list_size;
426 	int in_sz;
427 	void *in;
428 	int err;
429 	int i;
430 
431 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
432 
433 	if (list_size > max_list_size)
434 		return -ENOSPC;
435 
436 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
437 		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
438 
439 	memset(out, 0, sizeof(out));
440 	in = kzalloc(in_sz, GFP_KERNEL);
441 	if (!in)
442 		return -ENOMEM;
443 
444 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
445 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
446 	MLX5_SET(modify_nic_vport_context_in, in,
447 		 field_select.addresses_list, 1);
448 
449 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
450 				     nic_vport_context);
451 
452 	MLX5_SET(nic_vport_context, nic_vport_ctx,
453 		 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
454 	MLX5_SET(nic_vport_context, nic_vport_ctx,
455 		 allowed_list_size, list_size);
456 
457 	for (i = 0; i < list_size; i++) {
458 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
459 					       nic_vport_ctx,
460 					       current_uc_mac_address[i]);
461 		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
462 	}
463 
464 	err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
465 	kfree(in);
466 	return err;
467 }
468 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
469 
470 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
471 					   u64 *system_image_guid)
472 {
473 	u32 *out;
474 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
475 
476 	out = mlx5_vzalloc(outlen);
477 	if (!out)
478 		return -ENOMEM;
479 
480 	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
481 
482 	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
483 					nic_vport_context.system_image_guid);
484 
485 	kfree(out);
486 
487 	return 0;
488 }
489 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
490 
491 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
492 {
493 	u32 *out;
494 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
495 
496 	out = mlx5_vzalloc(outlen);
497 	if (!out)
498 		return -ENOMEM;
499 
500 	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
501 
502 	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
503 				nic_vport_context.node_guid);
504 
505 	kfree(out);
506 
507 	return 0;
508 }
509 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
510 
511 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
512 				    u32 vport, u64 node_guid)
513 {
514 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
515 	void *nic_vport_context;
516 	void *in;
517 	int err;
518 
519 	if (!vport)
520 		return -EINVAL;
521 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
522 		return -EACCES;
523 	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
524 		return -ENOTSUPP;
525 
526 	in = mlx5_vzalloc(inlen);
527 	if (!in)
528 		return -ENOMEM;
529 
530 	MLX5_SET(modify_nic_vport_context_in, in,
531 		 field_select.node_guid, 1);
532 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
533 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
534 
535 	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
536 					 in, nic_vport_context);
537 	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
538 
539 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
540 
541 	kvfree(in);
542 
543 	return err;
544 }
545 
546 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
547 					u16 *qkey_viol_cntr)
548 {
549 	u32 *out;
550 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
551 
552 	out = mlx5_vzalloc(outlen);
553 	if (!out)
554 		return -ENOMEM;
555 
556 	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
557 
558 	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
559 				   nic_vport_context.qkey_violation_counter);
560 
561 	kfree(out);
562 
563 	return 0;
564 }
565 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
566 
567 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
568 			     u8 port_num, u16  vf_num, u16 gid_index,
569 			     union ib_gid *gid)
570 {
571 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
572 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
573 	int is_group_manager;
574 	void *out = NULL;
575 	void *in = NULL;
576 	union ib_gid *tmp;
577 	int tbsz;
578 	int nout;
579 	int err;
580 
581 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
582 	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
583 	mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
584 		      vf_num, gid_index, tbsz);
585 
586 	if (gid_index > tbsz && gid_index != 0xffff)
587 		return -EINVAL;
588 
589 	if (gid_index == 0xffff)
590 		nout = tbsz;
591 	else
592 		nout = 1;
593 
594 	out_sz += nout * sizeof(*gid);
595 
596 	in = kzalloc(in_sz, GFP_KERNEL);
597 	out = kzalloc(out_sz, GFP_KERNEL);
598 	if (!in || !out) {
599 		err = -ENOMEM;
600 		goto out;
601 	}
602 
603 	MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
604 	if (other_vport) {
605 		if (is_group_manager) {
606 			MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
607 			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
608 		} else {
609 			err = -EPERM;
610 			goto out;
611 		}
612 	}
613 	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
614 
615 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
616 		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
617 
618 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
619 	if (err)
620 		goto out;
621 
622 	err = mlx5_cmd_status_to_err_v2(out);
623 	if (err)
624 		goto out;
625 
626 	tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
627 	gid->global.subnet_prefix = tmp->global.subnet_prefix;
628 	gid->global.interface_id = tmp->global.interface_id;
629 
630 out:
631 	kfree(in);
632 	kfree(out);
633 	return err;
634 }
635 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
636 
637 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
638 			      u8 port_num, u16 vf_num, u16 pkey_index,
639 			      u16 *pkey)
640 {
641 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
642 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
643 	int is_group_manager;
644 	void *out = NULL;
645 	void *in = NULL;
646 	void *pkarr;
647 	int nout;
648 	int tbsz;
649 	int err;
650 	int i;
651 
652 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
653 
654 	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
655 	if (pkey_index > tbsz && pkey_index != 0xffff)
656 		return -EINVAL;
657 
658 	if (pkey_index == 0xffff)
659 		nout = tbsz;
660 	else
661 		nout = 1;
662 
663 	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
664 
665 	in = kzalloc(in_sz, GFP_KERNEL);
666 	out = kzalloc(out_sz, GFP_KERNEL);
667 	if (!in || !out) {
668 		err = -ENOMEM;
669 		goto out;
670 	}
671 
672 	MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
673 	if (other_vport) {
674 		if (is_group_manager) {
675 			MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
676 			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
677 		} else {
678 			err = -EPERM;
679 			goto out;
680 		}
681 	}
682 	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
683 
684 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
685 		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
686 
687 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
688 	if (err)
689 		goto out;
690 
691 	err = mlx5_cmd_status_to_err_v2(out);
692 	if (err)
693 		goto out;
694 
695 	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
696 	for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
697 		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
698 
699 out:
700 	kfree(in);
701 	kfree(out);
702 	return err;
703 }
704 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
705 
706 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
707 				 u8 other_vport, u8 port_num,
708 				 u16 vf_num,
709 				 struct mlx5_hca_vport_context *rep)
710 {
711 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
712 	int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
713 	int is_group_manager;
714 	void *out;
715 	void *ctx;
716 	int err;
717 
718 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
719 
720 	memset(in, 0, sizeof(in));
721 	out = kzalloc(out_sz, GFP_KERNEL);
722 	if (!out)
723 		return -ENOMEM;
724 
725 	MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
726 
727 	if (other_vport) {
728 		if (is_group_manager) {
729 			MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
730 			MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
731 		} else {
732 			err = -EPERM;
733 			goto ex;
734 		}
735 	}
736 
737 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
738 		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
739 
740 	err = mlx5_cmd_exec(dev, in, sizeof(in), out,  out_sz);
741 	if (err)
742 		goto ex;
743 	err = mlx5_cmd_status_to_err_v2(out);
744 	if (err)
745 		goto ex;
746 
747 	ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
748 	rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
749 	rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
750 	rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
751 	rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
752 	rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
753 	rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
754 				      port_physical_state);
755 	rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
756 	rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
757 					       port_physical_state);
758 	rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
759 	rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
760 	rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
761 	rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
762 					  cap_mask1_field_select);
763 	rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
764 	rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
765 					  cap_mask2_field_select);
766 	rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
767 	rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
768 					   init_type_reply);
769 	rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
770 	rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
771 					  subnet_timeout);
772 	rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
773 	rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
774 	rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
775 						  qkey_violation_counter);
776 	rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
777 						  pkey_violation_counter);
778 	rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
779 	rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
780 					    system_image_guid);
781 
782 ex:
783 	kfree(out);
784 	return err;
785 }
786 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
787 
788 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
789 					   u64 *sys_image_guid)
790 {
791 	struct mlx5_hca_vport_context *rep;
792 	int err;
793 
794 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
795 	if (!rep)
796 		return -ENOMEM;
797 
798 	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
799 	if (!err)
800 		*sys_image_guid = rep->sys_image_guid;
801 
802 	kfree(rep);
803 	return err;
804 }
805 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
806 
807 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
808 				   u64 *node_guid)
809 {
810 	struct mlx5_hca_vport_context *rep;
811 	int err;
812 
813 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
814 	if (!rep)
815 		return -ENOMEM;
816 
817 	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
818 	if (!err)
819 		*node_guid = rep->node_guid;
820 
821 	kfree(rep);
822 	return err;
823 }
824 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
825 
826 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
827 				 u32 vport,
828 				 int *promisc_uc,
829 				 int *promisc_mc,
830 				 int *promisc_all)
831 {
832 	u32 *out;
833 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
834 	int err;
835 
836 	out = kzalloc(outlen, GFP_KERNEL);
837 	if (!out)
838 		return -ENOMEM;
839 
840 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
841 	if (err)
842 		goto out;
843 
844 	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
845 			       nic_vport_context.promisc_uc);
846 	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
847 			       nic_vport_context.promisc_mc);
848 	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
849 				nic_vport_context.promisc_all);
850 
851 out:
852 	kfree(out);
853 	return err;
854 }
855 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
856 
857 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
858 				  int promisc_uc,
859 				  int promisc_mc,
860 				  int promisc_all)
861 {
862 	void *in;
863 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
864 	int err;
865 
866 	in = mlx5_vzalloc(inlen);
867 	if (!in) {
868 		mlx5_core_err(mdev, "failed to allocate inbox\n");
869 		return -ENOMEM;
870 	}
871 
872 	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
873 	MLX5_SET(modify_nic_vport_context_in, in,
874 		 nic_vport_context.promisc_uc, promisc_uc);
875 	MLX5_SET(modify_nic_vport_context_in, in,
876 		 nic_vport_context.promisc_mc, promisc_mc);
877 	MLX5_SET(modify_nic_vport_context_in, in,
878 		 nic_vport_context.promisc_all, promisc_all);
879 
880 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
881 
882 	kvfree(in);
883 
884 	return err;
885 }
886 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
887 
888 enum mlx5_vport_roce_state {
889 	MLX5_VPORT_ROCE_DISABLED = 0,
890 	MLX5_VPORT_ROCE_ENABLED  = 1,
891 };
892 
893 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
894 					    enum mlx5_vport_roce_state state)
895 {
896 	void *in;
897 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
898 	int err;
899 
900 	in = mlx5_vzalloc(inlen);
901 	if (!in) {
902 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
903 		return -ENOMEM;
904 	}
905 
906 	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
907 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
908 		 state);
909 
910 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
911 
912 	kvfree(in);
913 
914 	return err;
915 }
916 
917 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
918 {
919 	return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
920 }
921 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
922 
923 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
924 {
925 	return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
926 }
927 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
928 
929 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
930 				  int vf, u8 port_num, void *out,
931 				  size_t out_sz)
932 {
933 	int	in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
934 	int	is_group_manager;
935 	void   *in;
936 	int	err;
937 
938 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
939 	in = mlx5_vzalloc(in_sz);
940 	if (!in) {
941 		err = -ENOMEM;
942 		return err;
943 	}
944 
945 	MLX5_SET(query_vport_counter_in, in, opcode,
946 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
947 	if (other_vport) {
948 		if (is_group_manager) {
949 			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
950 			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
951 		} else {
952 			err = -EPERM;
953 			goto free;
954 		}
955 	}
956 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
957 		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
958 
959 	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_sz);
960 	if (err)
961 		goto free;
962 	err = mlx5_cmd_status_to_err_v2(out);
963 
964 free:
965 	kvfree(in);
966 	return err;
967 }
968 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
969 
970 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
971 				       u8 other_vport, u8 port_num,
972 				       int vf,
973 				       struct mlx5_hca_vport_context *req)
974 {
975 	int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
976 	u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
977 	int is_group_manager;
978 	void *in;
979 	int err;
980 	void *ctx;
981 
982 	mlx5_core_dbg(dev, "vf %d\n", vf);
983 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
984 	in = kzalloc(in_sz, GFP_KERNEL);
985 	if (!in)
986 		return -ENOMEM;
987 
988 	memset(out, 0, sizeof(out));
989 	MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
990 	if (other_vport) {
991 		if (is_group_manager) {
992 			MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
993 			MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
994 		} else {
995 			err = -EPERM;
996 			goto ex;
997 		}
998 	}
999 
1000 	if (MLX5_CAP_GEN(dev, num_ports) > 1)
1001 		MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1002 
1003 	ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1004 	MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1005 	MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
1006 	MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
1007 	MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
1008 	MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
1009 	MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
1010 	MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
1011 	MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1012 	MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1013 	MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1014 	MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
1015 	MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
1016 	MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
1017 	MLX5_SET(hca_vport_context, ctx, lid, req->lid);
1018 	MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
1019 	MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
1020 	MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
1021 	MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
1022 	MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
1023 	MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
1024 	MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
1025 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
1026 	if (err)
1027 		goto ex;
1028 
1029 	err = mlx5_cmd_status_to_err_v2(out);
1030 
1031 ex:
1032 	kfree(in);
1033 	return err;
1034 }
1035 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
1036