1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
38 
39 /* Mutex to hold while enabling or disabling RoCE */
40 static DEFINE_MUTEX(mlx5_roce_en_lock);
41 
42 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
43 				   u16 vport, u32 *out, int outlen)
44 {
45 	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
46 
47 	MLX5_SET(query_vport_state_in, in, opcode,
48 		 MLX5_CMD_OP_QUERY_VPORT_STATE);
49 	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
50 	MLX5_SET(query_vport_state_in, in, vport_number, vport);
51 	if (vport)
52 		MLX5_SET(query_vport_state_in, in, other_vport, 1);
53 
54 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
55 }
56 
57 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
58 {
59 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
60 
61 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
62 
63 	return MLX5_GET(query_vport_state_out, out, state);
64 }
65 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
66 
67 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
68 {
69 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
70 
71 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
72 
73 	return MLX5_GET(query_vport_state_out, out, admin_state);
74 }
75 EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
76 
77 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
78 				  u16 vport, u8 state)
79 {
80 	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)]   = {0};
81 	u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
82 
83 	MLX5_SET(modify_vport_state_in, in, opcode,
84 		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
85 	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
86 	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
87 	if (vport)
88 		MLX5_SET(modify_vport_state_in, in, other_vport, 1);
89 	MLX5_SET(modify_vport_state_in, in, admin_state, state);
90 
91 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
92 }
93 EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
94 
95 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
96 					u32 *out, int outlen)
97 {
98 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
99 
100 	MLX5_SET(query_nic_vport_context_in, in, opcode,
101 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
102 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
103 	if (vport)
104 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
105 
106 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
107 }
108 
109 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
110 					 int inlen)
111 {
112 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
113 
114 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
115 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
116 	return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
117 }
118 
119 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
120 				    u16 vport, u8 *min_inline)
121 {
122 	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
123 	int err;
124 
125 	err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
126 	if (!err)
127 		*min_inline = MLX5_GET(query_nic_vport_context_out, out,
128 				       nic_vport_context.min_wqe_inline_mode);
129 	return err;
130 }
131 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
132 
133 void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
134 			   u8 *min_inline_mode)
135 {
136 	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
137 	case MLX5_CAP_INLINE_MODE_L2:
138 		*min_inline_mode = MLX5_INLINE_MODE_L2;
139 		break;
140 	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
141 		mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
142 		break;
143 	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
144 		*min_inline_mode = MLX5_INLINE_MODE_NONE;
145 		break;
146 	}
147 }
148 EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
149 
150 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
151 				     u16 vport, u8 min_inline)
152 {
153 	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
154 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
155 	void *nic_vport_ctx;
156 
157 	MLX5_SET(modify_nic_vport_context_in, in,
158 		 field_select.min_inline, 1);
159 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
160 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
161 
162 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
163 				     in, nic_vport_context);
164 	MLX5_SET(nic_vport_context, nic_vport_ctx,
165 		 min_wqe_inline_mode, min_inline);
166 
167 	return mlx5_modify_nic_vport_context(mdev, in, inlen);
168 }
169 
170 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
171 				     u16 vport, u8 *addr)
172 {
173 	u32 *out;
174 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
175 	u8 *out_addr;
176 	int err;
177 
178 	out = kvzalloc(outlen, GFP_KERNEL);
179 	if (!out)
180 		return -ENOMEM;
181 
182 	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
183 				nic_vport_context.permanent_address);
184 
185 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
186 	if (!err)
187 		ether_addr_copy(addr, &out_addr[2]);
188 
189 	kvfree(out);
190 	return err;
191 }
192 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
193 
194 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
195 				      u16 vport, u8 *addr)
196 {
197 	void *in;
198 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
199 	int err;
200 	void *nic_vport_ctx;
201 	u8 *perm_mac;
202 
203 	in = kvzalloc(inlen, GFP_KERNEL);
204 	if (!in)
205 		return -ENOMEM;
206 
207 	MLX5_SET(modify_nic_vport_context_in, in,
208 		 field_select.permanent_address, 1);
209 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
210 
211 	if (vport)
212 		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
213 
214 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
215 				     in, nic_vport_context);
216 	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
217 				permanent_address);
218 
219 	ether_addr_copy(&perm_mac[2], addr);
220 
221 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
222 
223 	kvfree(in);
224 
225 	return err;
226 }
227 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
228 
229 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
230 {
231 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
232 	u32 *out;
233 	int err;
234 
235 	out = kvzalloc(outlen, GFP_KERNEL);
236 	if (!out)
237 		return -ENOMEM;
238 
239 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
240 	if (!err)
241 		*mtu = MLX5_GET(query_nic_vport_context_out, out,
242 				nic_vport_context.mtu);
243 
244 	kvfree(out);
245 	return err;
246 }
247 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
248 
249 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
250 {
251 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
252 	void *in;
253 	int err;
254 
255 	in = kvzalloc(inlen, GFP_KERNEL);
256 	if (!in)
257 		return -ENOMEM;
258 
259 	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
260 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
261 
262 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
263 
264 	kvfree(in);
265 	return err;
266 }
267 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
268 
269 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
270 				  u32 vport,
271 				  enum mlx5_list_type list_type,
272 				  u8 addr_list[][ETH_ALEN],
273 				  int *list_size)
274 {
275 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
276 	void *nic_vport_ctx;
277 	int max_list_size;
278 	int req_list_size;
279 	int out_sz;
280 	void *out;
281 	int err;
282 	int i;
283 
284 	req_list_size = *list_size;
285 
286 	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
287 		1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
288 		1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
289 
290 	if (req_list_size > max_list_size) {
291 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
292 			       req_list_size, max_list_size);
293 		req_list_size = max_list_size;
294 	}
295 
296 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
297 			req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
298 
299 	out = kzalloc(out_sz, GFP_KERNEL);
300 	if (!out)
301 		return -ENOMEM;
302 
303 	MLX5_SET(query_nic_vport_context_in, in, opcode,
304 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
305 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
306 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
307 
308 	if (vport)
309 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
310 
311 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
312 	if (err)
313 		goto out;
314 
315 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
316 				     nic_vport_context);
317 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
318 				 allowed_list_size);
319 
320 	*list_size = req_list_size;
321 	for (i = 0; i < req_list_size; i++) {
322 		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
323 					nic_vport_ctx,
324 					current_uc_mac_address[i]) + 2;
325 		ether_addr_copy(addr_list[i], mac_addr);
326 	}
327 out:
328 	kfree(out);
329 	return err;
330 }
331 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
332 
333 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
334 				   enum mlx5_list_type list_type,
335 				   u8 addr_list[][ETH_ALEN],
336 				   int list_size)
337 {
338 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
339 	void *nic_vport_ctx;
340 	int max_list_size;
341 	int in_sz;
342 	void *in;
343 	int err;
344 	int i;
345 
346 	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
347 		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
348 		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
349 
350 	if (list_size > max_list_size)
351 		return -ENOSPC;
352 
353 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
354 		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
355 
356 	memset(out, 0, sizeof(out));
357 	in = kzalloc(in_sz, GFP_KERNEL);
358 	if (!in)
359 		return -ENOMEM;
360 
361 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
362 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
363 	MLX5_SET(modify_nic_vport_context_in, in,
364 		 field_select.addresses_list, 1);
365 
366 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
367 				     nic_vport_context);
368 
369 	MLX5_SET(nic_vport_context, nic_vport_ctx,
370 		 allowed_list_type, list_type);
371 	MLX5_SET(nic_vport_context, nic_vport_ctx,
372 		 allowed_list_size, list_size);
373 
374 	for (i = 0; i < list_size; i++) {
375 		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
376 					    nic_vport_ctx,
377 					    current_uc_mac_address[i]) + 2;
378 		ether_addr_copy(curr_mac, addr_list[i]);
379 	}
380 
381 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
382 	kfree(in);
383 	return err;
384 }
385 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
386 
387 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
388 			       u32 vport,
389 			       u16 vlans[],
390 			       int *size)
391 {
392 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
393 	void *nic_vport_ctx;
394 	int req_list_size;
395 	int max_list_size;
396 	int out_sz;
397 	void *out;
398 	int err;
399 	int i;
400 
401 	req_list_size = *size;
402 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
403 	if (req_list_size > max_list_size) {
404 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
405 			       req_list_size, max_list_size);
406 		req_list_size = max_list_size;
407 	}
408 
409 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
410 			req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
411 
412 	memset(in, 0, sizeof(in));
413 	out = kzalloc(out_sz, GFP_KERNEL);
414 	if (!out)
415 		return -ENOMEM;
416 
417 	MLX5_SET(query_nic_vport_context_in, in, opcode,
418 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
419 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
420 		 MLX5_NVPRT_LIST_TYPE_VLAN);
421 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
422 
423 	if (vport)
424 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
425 
426 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
427 	if (err)
428 		goto out;
429 
430 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
431 				     nic_vport_context);
432 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
433 				 allowed_list_size);
434 
435 	*size = req_list_size;
436 	for (i = 0; i < req_list_size; i++) {
437 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
438 					       nic_vport_ctx,
439 					       current_uc_mac_address[i]);
440 		vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
441 	}
442 out:
443 	kfree(out);
444 	return err;
445 }
446 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
447 
448 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
449 				u16 vlans[],
450 				int list_size)
451 {
452 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
453 	void *nic_vport_ctx;
454 	int max_list_size;
455 	int in_sz;
456 	void *in;
457 	int err;
458 	int i;
459 
460 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
461 
462 	if (list_size > max_list_size)
463 		return -ENOSPC;
464 
465 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
466 		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
467 
468 	memset(out, 0, sizeof(out));
469 	in = kzalloc(in_sz, GFP_KERNEL);
470 	if (!in)
471 		return -ENOMEM;
472 
473 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
474 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
475 	MLX5_SET(modify_nic_vport_context_in, in,
476 		 field_select.addresses_list, 1);
477 
478 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
479 				     nic_vport_context);
480 
481 	MLX5_SET(nic_vport_context, nic_vport_ctx,
482 		 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
483 	MLX5_SET(nic_vport_context, nic_vport_ctx,
484 		 allowed_list_size, list_size);
485 
486 	for (i = 0; i < list_size; i++) {
487 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
488 					       nic_vport_ctx,
489 					       current_uc_mac_address[i]);
490 		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
491 	}
492 
493 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
494 	kfree(in);
495 	return err;
496 }
497 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
498 
499 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
500 					   u64 *system_image_guid)
501 {
502 	u32 *out;
503 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
504 
505 	out = kvzalloc(outlen, GFP_KERNEL);
506 	if (!out)
507 		return -ENOMEM;
508 
509 	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
510 
511 	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
512 					nic_vport_context.system_image_guid);
513 
514 	kvfree(out);
515 
516 	return 0;
517 }
518 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
519 
520 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
521 {
522 	u32 *out;
523 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
524 
525 	out = kvzalloc(outlen, GFP_KERNEL);
526 	if (!out)
527 		return -ENOMEM;
528 
529 	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
530 
531 	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
532 				nic_vport_context.node_guid);
533 
534 	kvfree(out);
535 
536 	return 0;
537 }
538 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
539 
540 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
541 				    u32 vport, u64 node_guid)
542 {
543 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
544 	void *nic_vport_context;
545 	void *in;
546 	int err;
547 
548 	if (!vport)
549 		return -EINVAL;
550 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
551 		return -EACCES;
552 	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
553 		return -EOPNOTSUPP;
554 
555 	in = kvzalloc(inlen, GFP_KERNEL);
556 	if (!in)
557 		return -ENOMEM;
558 
559 	MLX5_SET(modify_nic_vport_context_in, in,
560 		 field_select.node_guid, 1);
561 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
562 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
563 
564 	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
565 					 in, nic_vport_context);
566 	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
567 
568 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
569 
570 	kvfree(in);
571 
572 	return err;
573 }
574 
575 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
576 					u16 *qkey_viol_cntr)
577 {
578 	u32 *out;
579 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
580 
581 	out = kvzalloc(outlen, GFP_KERNEL);
582 	if (!out)
583 		return -ENOMEM;
584 
585 	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
586 
587 	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
588 				   nic_vport_context.qkey_violation_counter);
589 
590 	kvfree(out);
591 
592 	return 0;
593 }
594 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
595 
596 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
597 			     u8 port_num, u16  vf_num, u16 gid_index,
598 			     union ib_gid *gid)
599 {
600 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
601 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
602 	int is_group_manager;
603 	void *out = NULL;
604 	void *in = NULL;
605 	union ib_gid *tmp;
606 	int tbsz;
607 	int nout;
608 	int err;
609 
610 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
611 	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
612 	mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
613 		      vf_num, gid_index, tbsz);
614 
615 	if (gid_index > tbsz && gid_index != 0xffff)
616 		return -EINVAL;
617 
618 	if (gid_index == 0xffff)
619 		nout = tbsz;
620 	else
621 		nout = 1;
622 
623 	out_sz += nout * sizeof(*gid);
624 
625 	in = kzalloc(in_sz, GFP_KERNEL);
626 	out = kzalloc(out_sz, GFP_KERNEL);
627 	if (!in || !out) {
628 		err = -ENOMEM;
629 		goto out;
630 	}
631 
632 	MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
633 	if (other_vport) {
634 		if (is_group_manager) {
635 			MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
636 			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
637 		} else {
638 			err = -EPERM;
639 			goto out;
640 		}
641 	}
642 	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
643 
644 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
645 		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
646 
647 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
648 	if (err)
649 		goto out;
650 
651 	tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
652 	gid->global.subnet_prefix = tmp->global.subnet_prefix;
653 	gid->global.interface_id = tmp->global.interface_id;
654 
655 out:
656 	kfree(in);
657 	kfree(out);
658 	return err;
659 }
660 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
661 
662 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
663 			      u8 port_num, u16 vf_num, u16 pkey_index,
664 			      u16 *pkey)
665 {
666 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
667 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
668 	int is_group_manager;
669 	void *out = NULL;
670 	void *in = NULL;
671 	void *pkarr;
672 	int nout;
673 	int tbsz;
674 	int err;
675 	int i;
676 
677 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
678 
679 	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
680 	if (pkey_index > tbsz && pkey_index != 0xffff)
681 		return -EINVAL;
682 
683 	if (pkey_index == 0xffff)
684 		nout = tbsz;
685 	else
686 		nout = 1;
687 
688 	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
689 
690 	in = kzalloc(in_sz, GFP_KERNEL);
691 	out = kzalloc(out_sz, GFP_KERNEL);
692 	if (!in || !out) {
693 		err = -ENOMEM;
694 		goto out;
695 	}
696 
697 	MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
698 	if (other_vport) {
699 		if (is_group_manager) {
700 			MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
701 			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
702 		} else {
703 			err = -EPERM;
704 			goto out;
705 		}
706 	}
707 	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
708 
709 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
710 		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
711 
712 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
713 	if (err)
714 		goto out;
715 
716 	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
717 	for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
718 		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
719 
720 out:
721 	kfree(in);
722 	kfree(out);
723 	return err;
724 }
725 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
726 
727 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
728 				 u8 other_vport, u8 port_num,
729 				 u16 vf_num,
730 				 struct mlx5_hca_vport_context *rep)
731 {
732 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
733 	int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
734 	int is_group_manager;
735 	void *out;
736 	void *ctx;
737 	int err;
738 
739 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
740 
741 	out = kzalloc(out_sz, GFP_KERNEL);
742 	if (!out)
743 		return -ENOMEM;
744 
745 	MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
746 
747 	if (other_vport) {
748 		if (is_group_manager) {
749 			MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
750 			MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
751 		} else {
752 			err = -EPERM;
753 			goto ex;
754 		}
755 	}
756 
757 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
758 		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
759 
760 	err = mlx5_cmd_exec(dev, in, sizeof(in), out,  out_sz);
761 	if (err)
762 		goto ex;
763 
764 	ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
765 	rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
766 	rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
767 	rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
768 	rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
769 	rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
770 	rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
771 				      port_physical_state);
772 	rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
773 	rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
774 					       port_physical_state);
775 	rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
776 	rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
777 	rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
778 	rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
779 					  cap_mask1_field_select);
780 	rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
781 	rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
782 					  cap_mask2_field_select);
783 	rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
784 	rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
785 					   init_type_reply);
786 	rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
787 	rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
788 					  subnet_timeout);
789 	rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
790 	rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
791 	rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
792 						  qkey_violation_counter);
793 	rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
794 						  pkey_violation_counter);
795 	rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
796 	rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
797 					    system_image_guid);
798 
799 ex:
800 	kfree(out);
801 	return err;
802 }
803 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
804 
805 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
806 					   u64 *sys_image_guid)
807 {
808 	struct mlx5_hca_vport_context *rep;
809 	int err;
810 
811 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
812 	if (!rep)
813 		return -ENOMEM;
814 
815 	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
816 	if (!err)
817 		*sys_image_guid = rep->sys_image_guid;
818 
819 	kfree(rep);
820 	return err;
821 }
822 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
823 
824 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
825 				   u64 *node_guid)
826 {
827 	struct mlx5_hca_vport_context *rep;
828 	int err;
829 
830 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
831 	if (!rep)
832 		return -ENOMEM;
833 
834 	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
835 	if (!err)
836 		*node_guid = rep->node_guid;
837 
838 	kfree(rep);
839 	return err;
840 }
841 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
842 
843 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
844 				 u32 vport,
845 				 int *promisc_uc,
846 				 int *promisc_mc,
847 				 int *promisc_all)
848 {
849 	u32 *out;
850 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
851 	int err;
852 
853 	out = kzalloc(outlen, GFP_KERNEL);
854 	if (!out)
855 		return -ENOMEM;
856 
857 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
858 	if (err)
859 		goto out;
860 
861 	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
862 			       nic_vport_context.promisc_uc);
863 	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
864 			       nic_vport_context.promisc_mc);
865 	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
866 				nic_vport_context.promisc_all);
867 
868 out:
869 	kfree(out);
870 	return err;
871 }
872 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
873 
874 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
875 				  int promisc_uc,
876 				  int promisc_mc,
877 				  int promisc_all)
878 {
879 	void *in;
880 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
881 	int err;
882 
883 	in = kvzalloc(inlen, GFP_KERNEL);
884 	if (!in)
885 		return -ENOMEM;
886 
887 	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
888 	MLX5_SET(modify_nic_vport_context_in, in,
889 		 nic_vport_context.promisc_uc, promisc_uc);
890 	MLX5_SET(modify_nic_vport_context_in, in,
891 		 nic_vport_context.promisc_mc, promisc_mc);
892 	MLX5_SET(modify_nic_vport_context_in, in,
893 		 nic_vport_context.promisc_all, promisc_all);
894 
895 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
896 
897 	kvfree(in);
898 
899 	return err;
900 }
901 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
902 
903 enum {
904 	UC_LOCAL_LB,
905 	MC_LOCAL_LB
906 };
907 
908 int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
909 {
910 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
911 	void *in;
912 	int err;
913 
914 	if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
915 	    !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
916 		return 0;
917 
918 	in = kvzalloc(inlen, GFP_KERNEL);
919 	if (!in)
920 		return -ENOMEM;
921 
922 	MLX5_SET(modify_nic_vport_context_in, in,
923 		 nic_vport_context.disable_mc_local_lb, !enable);
924 	MLX5_SET(modify_nic_vport_context_in, in,
925 		 nic_vport_context.disable_uc_local_lb, !enable);
926 
927 	if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
928 		MLX5_SET(modify_nic_vport_context_in, in,
929 			 field_select.disable_mc_local_lb, 1);
930 
931 	if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
932 		MLX5_SET(modify_nic_vport_context_in, in,
933 			 field_select.disable_uc_local_lb, 1);
934 
935 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
936 
937 	if (!err)
938 		mlx5_core_dbg(mdev, "%s local_lb\n",
939 			      enable ? "enable" : "disable");
940 
941 	kvfree(in);
942 	return err;
943 }
944 EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
945 
946 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
947 {
948 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
949 	u32 *out;
950 	int value;
951 	int err;
952 
953 	out = kzalloc(outlen, GFP_KERNEL);
954 	if (!out)
955 		return -ENOMEM;
956 
957 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
958 	if (err)
959 		goto out;
960 
961 	value = MLX5_GET(query_nic_vport_context_out, out,
962 			 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
963 
964 	value |= MLX5_GET(query_nic_vport_context_out, out,
965 			  nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
966 
967 	*status = !value;
968 
969 out:
970 	kfree(out);
971 	return err;
972 }
973 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
974 
975 enum mlx5_vport_roce_state {
976 	MLX5_VPORT_ROCE_DISABLED = 0,
977 	MLX5_VPORT_ROCE_ENABLED  = 1,
978 };
979 
980 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
981 					    enum mlx5_vport_roce_state state)
982 {
983 	void *in;
984 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
985 	int err;
986 
987 	in = kvzalloc(inlen, GFP_KERNEL);
988 	if (!in)
989 		return -ENOMEM;
990 
991 	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
992 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
993 		 state);
994 
995 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
996 
997 	kvfree(in);
998 
999 	return err;
1000 }
1001 
1002 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
1003 {
1004 	int err = 0;
1005 
1006 	mutex_lock(&mlx5_roce_en_lock);
1007 	if (!mdev->roce.roce_en)
1008 		err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
1009 
1010 	if (!err)
1011 		mdev->roce.roce_en++;
1012 	mutex_unlock(&mlx5_roce_en_lock);
1013 
1014 	return err;
1015 }
1016 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
1017 
1018 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
1019 {
1020 	int err = 0;
1021 
1022 	mutex_lock(&mlx5_roce_en_lock);
1023 	if (mdev->roce.roce_en) {
1024 		mdev->roce.roce_en--;
1025 		if (mdev->roce.roce_en == 0)
1026 			err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
1027 
1028 		if (err)
1029 			mdev->roce.roce_en++;
1030 	}
1031 	mutex_unlock(&mlx5_roce_en_lock);
1032 	return err;
1033 }
1034 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
1035 
1036 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
1037 				  int vf, u8 port_num, void *out,
1038 				  size_t out_sz)
1039 {
1040 	int	in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1041 	int	is_group_manager;
1042 	void   *in;
1043 	int	err;
1044 
1045 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1046 	in = kvzalloc(in_sz, GFP_KERNEL);
1047 	if (!in) {
1048 		err = -ENOMEM;
1049 		return err;
1050 	}
1051 
1052 	MLX5_SET(query_vport_counter_in, in, opcode,
1053 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1054 	if (other_vport) {
1055 		if (is_group_manager) {
1056 			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1057 			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1058 		} else {
1059 			err = -EPERM;
1060 			goto free;
1061 		}
1062 	}
1063 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1064 		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1065 
1066 	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_sz);
1067 free:
1068 	kvfree(in);
1069 	return err;
1070 }
1071 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1072 
1073 int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
1074 				u64 *rx_discard_vport_down,
1075 				u64 *tx_discard_vport_down)
1076 {
1077 	u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0};
1078 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
1079 	int err;
1080 
1081 	MLX5_SET(query_vnic_env_in, in, opcode,
1082 		 MLX5_CMD_OP_QUERY_VNIC_ENV);
1083 	MLX5_SET(query_vnic_env_in, in, op_mod, 0);
1084 	MLX5_SET(query_vnic_env_in, in, vport_number, vport);
1085 	if (vport)
1086 		MLX5_SET(query_vnic_env_in, in, other_vport, 1);
1087 
1088 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1089 	if (err)
1090 		return err;
1091 
1092 	*rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1093 					    vport_env.receive_discard_vport_down);
1094 	*tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1095 					    vport_env.transmit_discard_vport_down);
1096 	return 0;
1097 }
1098 
1099 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
1100 				       u8 other_vport, u8 port_num,
1101 				       int vf,
1102 				       struct mlx5_hca_vport_context *req)
1103 {
1104 	int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
1105 	u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
1106 	int is_group_manager;
1107 	void *in;
1108 	int err;
1109 	void *ctx;
1110 
1111 	mlx5_core_dbg(dev, "vf %d\n", vf);
1112 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1113 	in = kzalloc(in_sz, GFP_KERNEL);
1114 	if (!in)
1115 		return -ENOMEM;
1116 
1117 	memset(out, 0, sizeof(out));
1118 	MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1119 	if (other_vport) {
1120 		if (is_group_manager) {
1121 			MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1122 			MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1123 		} else {
1124 			err = -EPERM;
1125 			goto ex;
1126 		}
1127 	}
1128 
1129 	if (MLX5_CAP_GEN(dev, num_ports) > 1)
1130 		MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1131 
1132 	ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1133 	MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1134 	MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
1135 	MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
1136 	MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
1137 	MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
1138 	MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
1139 	MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
1140 	MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1141 	MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1142 	MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1143 	MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
1144 	MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
1145 	MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
1146 	MLX5_SET(hca_vport_context, ctx, lid, req->lid);
1147 	MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
1148 	MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
1149 	MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
1150 	MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
1151 	MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
1152 	MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
1153 	MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
1154 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
1155 ex:
1156 	kfree(in);
1157 	return err;
1158 }
1159 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
1160 
1161 int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
1162 				       struct mlx5_core_dev *port_mdev)
1163 {
1164 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1165 	void *in;
1166 	int err;
1167 
1168 	in = kvzalloc(inlen, GFP_KERNEL);
1169 	if (!in)
1170 		return -ENOMEM;
1171 
1172 	err = mlx5_nic_vport_enable_roce(port_mdev);
1173 	if (err)
1174 		goto free;
1175 
1176 	MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1177 	MLX5_SET(modify_nic_vport_context_in, in,
1178 		 nic_vport_context.affiliated_vhca_id,
1179 		 MLX5_CAP_GEN(master_mdev, vhca_id));
1180 	MLX5_SET(modify_nic_vport_context_in, in,
1181 		 nic_vport_context.affiliation_criteria,
1182 		 MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
1183 
1184 	err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
1185 	if (err)
1186 		mlx5_nic_vport_disable_roce(port_mdev);
1187 
1188 free:
1189 	kvfree(in);
1190 	return err;
1191 }
1192 EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
1193 
1194 int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
1195 {
1196 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1197 	void *in;
1198 	int err;
1199 
1200 	in = kvzalloc(inlen, GFP_KERNEL);
1201 	if (!in)
1202 		return -ENOMEM;
1203 
1204 	MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1205 	MLX5_SET(modify_nic_vport_context_in, in,
1206 		 nic_vport_context.affiliated_vhca_id, 0);
1207 	MLX5_SET(modify_nic_vport_context_in, in,
1208 		 nic_vport_context.affiliation_criteria, 0);
1209 
1210 	err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
1211 	if (!err)
1212 		mlx5_nic_vport_disable_roce(port_mdev);
1213 
1214 	kvfree(in);
1215 	return err;
1216 }
1217 EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
1218