1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
7 				       bool other_vport,
8 				       u16 vport_number,
9 				       u64 *icm_address_rx,
10 				       u64 *icm_address_tx)
11 {
12 	u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
13 	u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
14 	int err;
15 
16 	MLX5_SET(query_esw_vport_context_in, in, opcode,
17 		 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
18 	MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
19 	MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
20 
21 	err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
22 	if (err)
23 		return err;
24 
25 	*icm_address_rx =
26 		MLX5_GET64(query_esw_vport_context_out, out,
27 			   esw_vport_context.sw_steering_vport_icm_address_rx);
28 	*icm_address_tx =
29 		MLX5_GET64(query_esw_vport_context_out, out,
30 			   esw_vport_context.sw_steering_vport_icm_address_tx);
31 	return 0;
32 }
33 
34 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
35 			  u16 vport_number, u16 *gvmi)
36 {
37 	u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
38 	int out_size;
39 	void *out;
40 	int err;
41 
42 	out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
43 	out = kzalloc(out_size, GFP_KERNEL);
44 	if (!out)
45 		return -ENOMEM;
46 
47 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
48 	MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
49 	MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
50 	MLX5_SET(query_hca_cap_in, in, op_mod,
51 		 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
52 		 HCA_CAP_OPMOD_GET_CUR);
53 
54 	err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
55 	if (err) {
56 		kfree(out);
57 		return err;
58 	}
59 
60 	*gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
61 
62 	kfree(out);
63 	return 0;
64 }
65 
66 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
67 			      struct mlx5dr_esw_caps *caps)
68 {
69 	caps->drop_icm_address_rx =
70 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
71 					 sw_steering_fdb_action_drop_icm_address_rx);
72 	caps->drop_icm_address_tx =
73 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
74 					 sw_steering_fdb_action_drop_icm_address_tx);
75 	caps->uplink_icm_address_rx =
76 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
77 					 sw_steering_uplink_icm_address_rx);
78 	caps->uplink_icm_address_tx =
79 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
80 					 sw_steering_uplink_icm_address_tx);
81 	caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
82 	if (!caps->sw_owner_v2)
83 		caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
84 
85 	return 0;
86 }
87 
88 static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
89 					  u16 vport, bool *roce_en)
90 {
91 	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
92 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
93 	int err;
94 
95 	MLX5_SET(query_nic_vport_context_in, in, opcode,
96 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
97 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
98 	MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
99 
100 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
101 	if (err)
102 		return err;
103 
104 	*roce_en = MLX5_GET(query_nic_vport_context_out, out,
105 			    nic_vport_context.roce_en);
106 	return 0;
107 }
108 
109 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
110 			    struct mlx5dr_cmd_caps *caps)
111 {
112 	bool roce_en;
113 	int err;
114 
115 	caps->prio_tag_required	= MLX5_CAP_GEN(mdev, prio_tag_required);
116 	caps->eswitch_manager	= MLX5_CAP_GEN(mdev, eswitch_manager);
117 	caps->gvmi		= MLX5_CAP_GEN(mdev, vhca_id);
118 	caps->flex_protocols	= MLX5_CAP_GEN(mdev, flex_parser_protocols);
119 	caps->sw_format_ver	= MLX5_CAP_GEN(mdev, steering_format_version);
120 
121 	if (MLX5_CAP_GEN(mdev, roce)) {
122 		err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
123 		if (err)
124 			return err;
125 
126 		caps->roce_caps.roce_en = roce_en;
127 		caps->roce_caps.fl_rc_qp_when_roce_disabled =
128 			MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
129 		caps->roce_caps.fl_rc_qp_when_roce_enabled =
130 			MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
131 	}
132 
133 	caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
134 
135 	if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
136 		caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
137 		caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
138 	}
139 
140 	if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
141 		caps->flex_parser_id_icmpv6_dw0 =
142 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
143 		caps->flex_parser_id_icmpv6_dw1 =
144 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
145 	}
146 
147 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
148 		caps->flex_parser_id_geneve_tlv_option_0 =
149 			MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
150 
151 	if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
152 		caps->flex_parser_id_mpls_over_gre =
153 			MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
154 
155 	if (caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
156 		caps->flex_parser_id_mpls_over_udp =
157 			MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
158 
159 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
160 		caps->flex_parser_id_gtpu_dw_0 =
161 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
162 
163 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
164 		caps->flex_parser_id_gtpu_teid =
165 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
166 
167 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
168 		caps->flex_parser_id_gtpu_dw_2 =
169 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
170 
171 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
172 		caps->flex_parser_id_gtpu_first_ext_dw_0 =
173 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
174 
175 	caps->nic_rx_drop_address =
176 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
177 	caps->nic_tx_drop_address =
178 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
179 	caps->nic_tx_allow_address =
180 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
181 
182 	caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
183 	caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
184 
185 	if (!caps->rx_sw_owner_v2)
186 		caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
187 	if (!caps->tx_sw_owner_v2)
188 		caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
189 
190 	caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
191 
192 	caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
193 	caps->hdr_modify_icm_addr =
194 		MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
195 
196 	caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
197 
198 	return 0;
199 }
200 
201 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
202 				enum fs_flow_table_type type,
203 				u32 table_id,
204 				struct mlx5dr_cmd_query_flow_table_details *output)
205 {
206 	u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
207 	u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
208 	int err;
209 
210 	MLX5_SET(query_flow_table_in, in, opcode,
211 		 MLX5_CMD_OP_QUERY_FLOW_TABLE);
212 
213 	MLX5_SET(query_flow_table_in, in, table_type, type);
214 	MLX5_SET(query_flow_table_in, in, table_id, table_id);
215 
216 	err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
217 	if (err)
218 		return err;
219 
220 	output->status = MLX5_GET(query_flow_table_out, out, status);
221 	output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
222 
223 	output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
224 						 flow_table_context.sw_owner_icm_root_1);
225 	output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
226 						 flow_table_context.sw_owner_icm_root_0);
227 
228 	return 0;
229 }
230 
231 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
232 {
233 	u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
234 
235 	MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
236 
237 	return mlx5_cmd_exec_in(mdev, sync_steering, in);
238 }
239 
240 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
241 					u32 table_type,
242 					u32 table_id,
243 					u32 group_id,
244 					u32 modify_header_id,
245 					u32 vport_id)
246 {
247 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
248 	void *in_flow_context;
249 	unsigned int inlen;
250 	void *in_dests;
251 	u32 *in;
252 	int err;
253 
254 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
255 		1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
256 
257 	in = kvzalloc(inlen, GFP_KERNEL);
258 	if (!in)
259 		return -ENOMEM;
260 
261 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
262 	MLX5_SET(set_fte_in, in, table_type, table_type);
263 	MLX5_SET(set_fte_in, in, table_id, table_id);
264 
265 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
266 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
267 	MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
268 	MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
269 	MLX5_SET(flow_context, in_flow_context, action,
270 		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
271 		 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
272 
273 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
274 	MLX5_SET(dest_format_struct, in_dests, destination_type,
275 		 MLX5_FLOW_DESTINATION_TYPE_VPORT);
276 	MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id);
277 
278 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
279 	kvfree(in);
280 
281 	return err;
282 }
283 
284 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
285 				    u32 table_type,
286 				    u32 table_id)
287 {
288 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
289 
290 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
291 	MLX5_SET(delete_fte_in, in, table_type, table_type);
292 	MLX5_SET(delete_fte_in, in, table_id, table_id);
293 
294 	return mlx5_cmd_exec_in(mdev, delete_fte, in);
295 }
296 
297 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
298 				   u32 table_type,
299 				   u8 num_of_actions,
300 				   u64 *actions,
301 				   u32 *modify_header_id)
302 {
303 	u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
304 	void *p_actions;
305 	u32 inlen;
306 	u32 *in;
307 	int err;
308 
309 	inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
310 		 num_of_actions * sizeof(u64);
311 	in = kvzalloc(inlen, GFP_KERNEL);
312 	if (!in)
313 		return -ENOMEM;
314 
315 	MLX5_SET(alloc_modify_header_context_in, in, opcode,
316 		 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
317 	MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
318 	MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
319 	p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
320 	memcpy(p_actions, actions, num_of_actions * sizeof(u64));
321 
322 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
323 	if (err)
324 		goto out;
325 
326 	*modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
327 				     modify_header_id);
328 out:
329 	kvfree(in);
330 	return err;
331 }
332 
333 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
334 				     u32 modify_header_id)
335 {
336 	u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
337 
338 	MLX5_SET(dealloc_modify_header_context_in, in, opcode,
339 		 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
340 	MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
341 		 modify_header_id);
342 
343 	return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
344 }
345 
346 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
347 				       u32 table_type,
348 				       u32 table_id,
349 				       u32 *group_id)
350 {
351 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
352 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
353 	u32 *in;
354 	int err;
355 
356 	in = kvzalloc(inlen, GFP_KERNEL);
357 	if (!in)
358 		return -ENOMEM;
359 
360 	MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
361 	MLX5_SET(create_flow_group_in, in, table_type, table_type);
362 	MLX5_SET(create_flow_group_in, in, table_id, table_id);
363 
364 	err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
365 	if (err)
366 		goto out;
367 
368 	*group_id = MLX5_GET(create_flow_group_out, out, group_id);
369 
370 out:
371 	kvfree(in);
372 	return err;
373 }
374 
375 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
376 				  u32 table_type,
377 				  u32 table_id,
378 				  u32 group_id)
379 {
380 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
381 
382 	MLX5_SET(destroy_flow_group_in, in, opcode,
383 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
384 	MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
385 	MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
386 	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
387 
388 	return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
389 }
390 
391 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
392 				 struct mlx5dr_cmd_create_flow_table_attr *attr,
393 				 u64 *fdb_rx_icm_addr,
394 				 u32 *table_id)
395 {
396 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
397 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
398 	void *ft_mdev;
399 	int err;
400 
401 	MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
402 	MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
403 
404 	ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
405 	MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
406 	MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
407 	MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
408 
409 	if (attr->sw_owner) {
410 		/* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
411 		 * icm_addr_1 used for FDB TX
412 		 */
413 		if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
414 			MLX5_SET64(flow_table_context, ft_mdev,
415 				   sw_owner_icm_root_0, attr->icm_addr_rx);
416 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
417 			MLX5_SET64(flow_table_context, ft_mdev,
418 				   sw_owner_icm_root_0, attr->icm_addr_tx);
419 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
420 			MLX5_SET64(flow_table_context, ft_mdev,
421 				   sw_owner_icm_root_0, attr->icm_addr_rx);
422 			MLX5_SET64(flow_table_context, ft_mdev,
423 				   sw_owner_icm_root_1, attr->icm_addr_tx);
424 		}
425 	}
426 
427 	MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
428 		 attr->decap_en);
429 	MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
430 		 attr->reformat_en);
431 
432 	err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
433 	if (err)
434 		return err;
435 
436 	*table_id = MLX5_GET(create_flow_table_out, out, table_id);
437 	if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
438 	    fdb_rx_icm_addr)
439 		*fdb_rx_icm_addr =
440 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
441 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
442 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
443 
444 	return 0;
445 }
446 
447 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
448 				  u32 table_id,
449 				  u32 table_type)
450 {
451 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
452 
453 	MLX5_SET(destroy_flow_table_in, in, opcode,
454 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
455 	MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
456 	MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
457 
458 	return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
459 }
460 
461 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
462 				   enum mlx5_reformat_ctx_type rt,
463 				   u8 reformat_param_0,
464 				   u8 reformat_param_1,
465 				   size_t reformat_size,
466 				   void *reformat_data,
467 				   u32 *reformat_id)
468 {
469 	u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
470 	size_t inlen, cmd_data_sz, cmd_total_sz;
471 	void *prctx;
472 	void *pdata;
473 	void *in;
474 	int err;
475 
476 	cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
477 	cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
478 					packet_reformat_context.reformat_data);
479 	inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
480 	in = kvzalloc(inlen, GFP_KERNEL);
481 	if (!in)
482 		return -ENOMEM;
483 
484 	MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
485 		 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
486 
487 	prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
488 	pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
489 
490 	MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
491 	MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
492 	MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
493 	MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
494 	if (reformat_data && reformat_size)
495 		memcpy(pdata, reformat_data, reformat_size);
496 
497 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
498 	if (err)
499 		return err;
500 
501 	*reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
502 	kvfree(in);
503 
504 	return err;
505 }
506 
507 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
508 				     u32 reformat_id)
509 {
510 	u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
511 
512 	MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
513 		 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
514 	MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
515 		 reformat_id);
516 
517 	mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
518 }
519 
520 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
521 			 u16 index, struct mlx5dr_cmd_gid_attr *attr)
522 {
523 	u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
524 	u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
525 	int err;
526 
527 	MLX5_SET(query_roce_address_in, in, opcode,
528 		 MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
529 
530 	MLX5_SET(query_roce_address_in, in, roce_address_index, index);
531 	MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
532 
533 	err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
534 	if (err)
535 		return err;
536 
537 	memcpy(&attr->gid,
538 	       MLX5_ADDR_OF(query_roce_address_out,
539 			    out, roce_address.source_l3_address),
540 	       sizeof(attr->gid));
541 	memcpy(attr->mac,
542 	       MLX5_ADDR_OF(query_roce_address_out, out,
543 			    roce_address.source_mac_47_32),
544 	       sizeof(attr->mac));
545 
546 	if (MLX5_GET(query_roce_address_out, out,
547 		     roce_address.roce_version) == MLX5_ROCE_VERSION_2)
548 		attr->roce_ver = MLX5_ROCE_VERSION_2;
549 	else
550 		attr->roce_ver = MLX5_ROCE_VERSION_1;
551 
552 	return 0;
553 }
554 
555 static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
556 					struct mlx5dr_cmd_fte_info *fte,
557 					bool *extended_dest)
558 {
559 	int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
560 	int num_fwd_destinations = 0;
561 	int num_encap = 0;
562 	int i;
563 
564 	*extended_dest = false;
565 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
566 		return 0;
567 	for (i = 0; i < fte->dests_size; i++) {
568 		if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
569 			continue;
570 		if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
571 		    fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
572 			num_encap++;
573 		num_fwd_destinations++;
574 	}
575 
576 	if (num_fwd_destinations > 1 && num_encap > 0)
577 		*extended_dest = true;
578 
579 	if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
580 		mlx5_core_warn(dev, "FW does not support extended destination");
581 		return -EOPNOTSUPP;
582 	}
583 	if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
584 		mlx5_core_warn(dev, "FW does not support more than %d encaps",
585 			       1 << fw_log_max_fdb_encap_uplink);
586 		return -EOPNOTSUPP;
587 	}
588 
589 	return 0;
590 }
591 
592 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
593 		       int opmod, int modify_mask,
594 		       struct mlx5dr_cmd_ft_info *ft,
595 		       u32 group_id,
596 		       struct mlx5dr_cmd_fte_info *fte)
597 {
598 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
599 	void *in_flow_context, *vlan;
600 	bool extended_dest = false;
601 	void *in_match_value;
602 	unsigned int inlen;
603 	int dst_cnt_size;
604 	void *in_dests;
605 	u32 *in;
606 	int err;
607 	int i;
608 
609 	if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
610 		return -EOPNOTSUPP;
611 
612 	if (!extended_dest)
613 		dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
614 	else
615 		dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
616 
617 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
618 	in = kvzalloc(inlen, GFP_KERNEL);
619 	if (!in)
620 		return -ENOMEM;
621 
622 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
623 	MLX5_SET(set_fte_in, in, op_mod, opmod);
624 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
625 	MLX5_SET(set_fte_in, in, table_type, ft->type);
626 	MLX5_SET(set_fte_in, in, table_id, ft->id);
627 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
628 	if (ft->vport) {
629 		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
630 		MLX5_SET(set_fte_in, in, other_vport, 1);
631 	}
632 
633 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
634 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
635 
636 	MLX5_SET(flow_context, in_flow_context, flow_tag,
637 		 fte->flow_context.flow_tag);
638 	MLX5_SET(flow_context, in_flow_context, flow_source,
639 		 fte->flow_context.flow_source);
640 
641 	MLX5_SET(flow_context, in_flow_context, extended_destination,
642 		 extended_dest);
643 	if (extended_dest) {
644 		u32 action;
645 
646 		action = fte->action.action &
647 			~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
648 		MLX5_SET(flow_context, in_flow_context, action, action);
649 	} else {
650 		MLX5_SET(flow_context, in_flow_context, action,
651 			 fte->action.action);
652 		if (fte->action.pkt_reformat)
653 			MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
654 				 fte->action.pkt_reformat->id);
655 	}
656 	if (fte->action.modify_hdr)
657 		MLX5_SET(flow_context, in_flow_context, modify_header_id,
658 			 fte->action.modify_hdr->id);
659 
660 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
661 
662 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
663 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
664 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
665 
666 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
667 
668 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
669 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
670 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
671 
672 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
673 				      match_value);
674 	memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
675 
676 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
677 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
678 		int list_size = 0;
679 
680 		for (i = 0; i < fte->dests_size; i++) {
681 			unsigned int id, type = fte->dest_arr[i].type;
682 
683 			if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
684 				continue;
685 
686 			switch (type) {
687 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
688 				id = fte->dest_arr[i].ft_num;
689 				type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
690 				break;
691 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
692 				id = fte->dest_arr[i].ft_id;
693 				break;
694 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
695 				id = fte->dest_arr[i].vport.num;
696 				MLX5_SET(dest_format_struct, in_dests,
697 					 destination_eswitch_owner_vhca_id_valid,
698 					 !!(fte->dest_arr[i].vport.flags &
699 					    MLX5_FLOW_DEST_VPORT_VHCA_ID));
700 				MLX5_SET(dest_format_struct, in_dests,
701 					 destination_eswitch_owner_vhca_id,
702 					 fte->dest_arr[i].vport.vhca_id);
703 				if (extended_dest && (fte->dest_arr[i].vport.flags &
704 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
705 					MLX5_SET(dest_format_struct, in_dests,
706 						 packet_reformat,
707 						 !!(fte->dest_arr[i].vport.flags &
708 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
709 					MLX5_SET(extended_dest_format, in_dests,
710 						 packet_reformat_id,
711 						 fte->dest_arr[i].vport.reformat_id);
712 				}
713 				break;
714 			default:
715 				id = fte->dest_arr[i].tir_num;
716 			}
717 
718 			MLX5_SET(dest_format_struct, in_dests, destination_type,
719 				 type);
720 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
721 			in_dests += dst_cnt_size;
722 			list_size++;
723 		}
724 
725 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
726 			 list_size);
727 	}
728 
729 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
730 		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
731 					log_max_flow_counter,
732 					ft->type));
733 		int list_size = 0;
734 
735 		for (i = 0; i < fte->dests_size; i++) {
736 			if (fte->dest_arr[i].type !=
737 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
738 				continue;
739 
740 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
741 				 fte->dest_arr[i].counter_id);
742 			in_dests += dst_cnt_size;
743 			list_size++;
744 		}
745 		if (list_size > max_list_size) {
746 			err = -EINVAL;
747 			goto err_out;
748 		}
749 
750 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
751 			 list_size);
752 	}
753 
754 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
755 err_out:
756 	kvfree(in);
757 	return err;
758 }
759