xref: /openbmc/linux/drivers/infiniband/hw/mlx5/cmd.c (revision 1fa0a7dc)
1 /*
2  * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "cmd.h"
34 
35 int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
36 {
37 	u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
38 	u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)]   = {0};
39 	int err;
40 
41 	MLX5_SET(query_special_contexts_in, in, opcode,
42 		 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
43 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
44 	if (!err)
45 		*mkey = MLX5_GET(query_special_contexts_out, out,
46 				 dump_fill_mkey);
47 	return err;
48 }
49 
50 int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
51 {
52 	u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
53 	u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)]   = {};
54 	int err;
55 
56 	MLX5_SET(query_special_contexts_in, in, opcode,
57 		 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
58 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
59 	if (!err)
60 		*null_mkey = MLX5_GET(query_special_contexts_out, out,
61 				      null_mkey);
62 	return err;
63 }
64 
65 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
66 			       void *out, int out_size)
67 {
68 	u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { };
69 
70 	MLX5_SET(query_cong_params_in, in, opcode,
71 		 MLX5_CMD_OP_QUERY_CONG_PARAMS);
72 	MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
73 
74 	return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
75 }
76 
77 int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev,
78 				void *in, int in_size)
79 {
80 	u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { };
81 
82 	return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
83 }
84 
85 int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
86 			 u64 length, u32 alignment)
87 {
88 	struct mlx5_core_dev *dev = dm->dev;
89 	u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
90 					>> PAGE_SHIFT;
91 	u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
92 	u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
93 	u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
94 	u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
95 	u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
96 	u32 mlx5_alignment;
97 	u64 page_idx = 0;
98 	int ret = 0;
99 
100 	if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
101 		return -EINVAL;
102 
103 	/* mlx5 device sets alignment as 64*2^driver_value
104 	 * so normalizing is needed.
105 	 */
106 	mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
107 			 alignment - MLX5_MEMIC_BASE_ALIGN;
108 	if (mlx5_alignment > max_alignment)
109 		return -EINVAL;
110 
111 	MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
112 	MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
113 	MLX5_SET(alloc_memic_in, in, memic_size, length);
114 	MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
115 		 mlx5_alignment);
116 
117 	while (page_idx < num_memic_hw_pages) {
118 		spin_lock(&dm->lock);
119 		page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
120 						      num_memic_hw_pages,
121 						      page_idx,
122 						      num_pages, 0);
123 
124 		if (page_idx < num_memic_hw_pages)
125 			bitmap_set(dm->memic_alloc_pages,
126 				   page_idx, num_pages);
127 
128 		spin_unlock(&dm->lock);
129 
130 		if (page_idx >= num_memic_hw_pages)
131 			break;
132 
133 		MLX5_SET64(alloc_memic_in, in, range_start_addr,
134 			   hw_start_addr + (page_idx * PAGE_SIZE));
135 
136 		ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
137 		if (ret) {
138 			spin_lock(&dm->lock);
139 			bitmap_clear(dm->memic_alloc_pages,
140 				     page_idx, num_pages);
141 			spin_unlock(&dm->lock);
142 
143 			if (ret == -EAGAIN) {
144 				page_idx++;
145 				continue;
146 			}
147 
148 			return ret;
149 		}
150 
151 		*addr = dev->bar_addr +
152 			MLX5_GET64(alloc_memic_out, out, memic_start_addr);
153 
154 		return 0;
155 	}
156 
157 	return -ENOMEM;
158 }
159 
160 int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
161 {
162 	struct mlx5_core_dev *dev = dm->dev;
163 	u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
164 	u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
165 	u32 out[MLX5_ST_SZ_DW(dealloc_memic_out)] = {0};
166 	u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {0};
167 	u64 start_page_idx;
168 	int err;
169 
170 	addr -= dev->bar_addr;
171 	start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
172 
173 	MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
174 	MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
175 	MLX5_SET(dealloc_memic_in, in, memic_size, length);
176 
177 	err =  mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
178 
179 	if (!err) {
180 		spin_lock(&dm->lock);
181 		bitmap_clear(dm->memic_alloc_pages,
182 			     start_page_idx, num_pages);
183 		spin_unlock(&dm->lock);
184 	}
185 
186 	return err;
187 }
188 
189 int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
190 			  u16 uid, phys_addr_t *addr, u32 *obj_id)
191 {
192 	struct mlx5_core_dev *dev = dm->dev;
193 	u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
194 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
195 	u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
196 	unsigned long *block_map;
197 	u64 icm_start_addr;
198 	u32 log_icm_size;
199 	u32 max_blocks;
200 	u64 block_idx;
201 	void *sw_icm;
202 	int ret;
203 
204 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
205 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
206 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
207 	MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
208 
209 	switch (type) {
210 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
211 		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
212 						steering_sw_icm_start_address);
213 		log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size);
214 		block_map = dm->steering_sw_icm_alloc_blocks;
215 		break;
216 	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
217 		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
218 					header_modify_sw_icm_start_address);
219 		log_icm_size = MLX5_CAP_DEV_MEM(dev,
220 						log_header_modify_sw_icm_size);
221 		block_map = dm->header_modify_sw_icm_alloc_blocks;
222 		break;
223 	default:
224 		return -EINVAL;
225 	}
226 
227 	max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
228 	spin_lock(&dm->lock);
229 	block_idx = bitmap_find_next_zero_area(block_map,
230 					       max_blocks,
231 					       0,
232 					       num_blocks, 0);
233 
234 	if (block_idx < max_blocks)
235 		bitmap_set(block_map,
236 			   block_idx, num_blocks);
237 
238 	spin_unlock(&dm->lock);
239 
240 	if (block_idx >= max_blocks)
241 		return -ENOMEM;
242 
243 	sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm);
244 	icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
245 	MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr,
246 		   icm_start_addr);
247 	MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length));
248 
249 	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
250 	if (ret) {
251 		spin_lock(&dm->lock);
252 		bitmap_clear(block_map,
253 			     block_idx, num_blocks);
254 		spin_unlock(&dm->lock);
255 
256 		return ret;
257 	}
258 
259 	*addr = icm_start_addr;
260 	*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
261 
262 	return 0;
263 }
264 
265 int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
266 			    u16 uid, phys_addr_t addr, u32 obj_id)
267 {
268 	struct mlx5_core_dev *dev = dm->dev;
269 	u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
270 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
271 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
272 	unsigned long *block_map;
273 	u64 start_idx;
274 	int err;
275 
276 	switch (type) {
277 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
278 		start_idx =
279 			(addr - MLX5_CAP64_DEV_MEM(
280 					dev, steering_sw_icm_start_address)) >>
281 			MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
282 		block_map = dm->steering_sw_icm_alloc_blocks;
283 		break;
284 	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
285 		start_idx =
286 			(addr -
287 			 MLX5_CAP64_DEV_MEM(
288 				 dev, header_modify_sw_icm_start_address)) >>
289 			MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
290 		block_map = dm->header_modify_sw_icm_alloc_blocks;
291 		break;
292 	default:
293 		return -EINVAL;
294 	}
295 
296 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
297 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
298 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
299 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
300 	MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
301 
302 	err =  mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
303 	if (err)
304 		return err;
305 
306 	spin_lock(&dm->lock);
307 	bitmap_clear(block_map,
308 		     start_idx, num_blocks);
309 	spin_unlock(&dm->lock);
310 
311 	return 0;
312 }
313 
314 int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
315 {
316 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
317 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
318 
319 	MLX5_SET(ppcnt_reg, in, local_port, 1);
320 
321 	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
322 	return  mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPCNT,
323 				     0, 0);
324 }
325 
326 void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
327 {
328 	u32 in[MLX5_ST_SZ_DW(destroy_tir_in)]   = {};
329 	u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {};
330 
331 	MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
332 	MLX5_SET(destroy_tir_in, in, tirn, tirn);
333 	MLX5_SET(destroy_tir_in, in, uid, uid);
334 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
335 }
336 
337 void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
338 {
339 	u32 in[MLX5_ST_SZ_DW(destroy_tis_in)]   = {0};
340 	u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
341 
342 	MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
343 	MLX5_SET(destroy_tis_in, in, tisn, tisn);
344 	MLX5_SET(destroy_tis_in, in, uid, uid);
345 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
346 }
347 
348 void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
349 {
350 	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]   = {};
351 	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {};
352 
353 	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
354 	MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
355 	MLX5_SET(destroy_rqt_in, in, uid, uid);
356 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
357 }
358 
359 int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
360 				    u16 uid)
361 {
362 	u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)]   = {0};
363 	u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
364 	int err;
365 
366 	MLX5_SET(alloc_transport_domain_in, in, opcode,
367 		 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
368 	MLX5_SET(alloc_transport_domain_in, in, uid, uid);
369 
370 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
371 	if (!err)
372 		*tdn = MLX5_GET(alloc_transport_domain_out, out,
373 				transport_domain);
374 
375 	return err;
376 }
377 
378 void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
379 				       u16 uid)
380 {
381 	u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)]   = {0};
382 	u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
383 
384 	MLX5_SET(dealloc_transport_domain_in, in, opcode,
385 		 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
386 	MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
387 	MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
388 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
389 }
390 
391 void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
392 {
393 	u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {};
394 	u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)]   = {};
395 
396 	MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
397 	MLX5_SET(dealloc_pd_in, in, pd, pdn);
398 	MLX5_SET(dealloc_pd_in, in, uid, uid);
399 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
400 }
401 
402 int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
403 			u32 qpn, u16 uid)
404 {
405 	u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {};
406 	u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)]   = {};
407 	void *gid;
408 
409 	MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
410 	MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
411 	MLX5_SET(attach_to_mcg_in, in, uid, uid);
412 	gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
413 	memcpy(gid, mgid, sizeof(*mgid));
414 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
415 }
416 
417 int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
418 			u32 qpn, u16 uid)
419 {
420 	u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {};
421 	u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)]   = {};
422 	void *gid;
423 
424 	MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
425 	MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
426 	MLX5_SET(detach_from_mcg_in, in, uid, uid);
427 	gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
428 	memcpy(gid, mgid, sizeof(*mgid));
429 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
430 }
431 
432 int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
433 {
434 	u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
435 	u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)]   = {};
436 	int err;
437 
438 	MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
439 	MLX5_SET(alloc_xrcd_in, in, uid, uid);
440 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
441 	if (!err)
442 		*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
443 	return err;
444 }
445 
446 int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
447 {
448 	u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {};
449 	u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)]   = {};
450 
451 	MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
452 	MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
453 	MLX5_SET(dealloc_xrcd_in, in, uid, uid);
454 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
455 }
456 
457 int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
458 			     u16 uid)
459 {
460 	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]   = {0};
461 	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
462 	int err;
463 
464 	MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
465 	MLX5_SET(alloc_q_counter_in, in, uid, uid);
466 
467 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
468 	if (!err)
469 		*counter_id = MLX5_GET(alloc_q_counter_out, out,
470 				       counter_set_id);
471 	return err;
472 }
473 
474 int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
475 		     u16 opmod, u8 port)
476 {
477 	int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
478 	int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
479 	int err = -ENOMEM;
480 	void *data;
481 	void *resp;
482 	u32 *out;
483 	u32 *in;
484 
485 	in = kzalloc(inlen, GFP_KERNEL);
486 	out = kzalloc(outlen, GFP_KERNEL);
487 	if (!in || !out)
488 		goto out;
489 
490 	MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
491 	MLX5_SET(mad_ifc_in, in, op_mod, opmod);
492 	MLX5_SET(mad_ifc_in, in, port, port);
493 
494 	data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
495 	memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
496 
497 	err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
498 	if (err)
499 		goto out;
500 
501 	resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
502 	memcpy(outb, resp,
503 	       MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
504 
505 out:
506 	kfree(out);
507 	kfree(in);
508 	return err;
509 }
510