1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev * mdev,bool other_vport,u16 vport_number,u64 * icm_address_rx,u64 * icm_address_tx)6 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
7 				       bool other_vport,
8 				       u16 vport_number,
9 				       u64 *icm_address_rx,
10 				       u64 *icm_address_tx)
11 {
12 	u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
13 	u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
14 	int err;
15 
16 	MLX5_SET(query_esw_vport_context_in, in, opcode,
17 		 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
18 	MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
19 	MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
20 
21 	err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
22 	if (err)
23 		return err;
24 
25 	*icm_address_rx =
26 		MLX5_GET64(query_esw_vport_context_out, out,
27 			   esw_vport_context.sw_steering_vport_icm_address_rx);
28 	*icm_address_tx =
29 		MLX5_GET64(query_esw_vport_context_out, out,
30 			   esw_vport_context.sw_steering_vport_icm_address_tx);
31 	return 0;
32 }
33 
mlx5dr_cmd_query_gvmi(struct mlx5_core_dev * mdev,bool other_vport,u16 vport_number,u16 * gvmi)34 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
35 			  u16 vport_number, u16 *gvmi)
36 {
37 	bool ec_vf_func = other_vport ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
38 	u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
39 	int out_size;
40 	void *out;
41 	int err;
42 
43 	out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
44 	out = kzalloc(out_size, GFP_KERNEL);
45 	if (!out)
46 		return -ENOMEM;
47 
48 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
49 	MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
50 	MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
51 	MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
52 	MLX5_SET(query_hca_cap_in, in, op_mod,
53 		 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
54 		 HCA_CAP_OPMOD_GET_CUR);
55 
56 	err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
57 	if (err) {
58 		kfree(out);
59 		return err;
60 	}
61 
62 	*gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
63 
64 	kfree(out);
65 	return 0;
66 }
67 
mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev * mdev,struct mlx5dr_esw_caps * caps)68 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
69 			      struct mlx5dr_esw_caps *caps)
70 {
71 	caps->drop_icm_address_rx =
72 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
73 					 sw_steering_fdb_action_drop_icm_address_rx);
74 	caps->drop_icm_address_tx =
75 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
76 					 sw_steering_fdb_action_drop_icm_address_tx);
77 	caps->uplink_icm_address_rx =
78 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
79 					 sw_steering_uplink_icm_address_rx);
80 	caps->uplink_icm_address_tx =
81 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
82 					 sw_steering_uplink_icm_address_tx);
83 	caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
84 	if (!caps->sw_owner_v2)
85 		caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
86 
87 	return 0;
88 }
89 
dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev * mdev,u16 vport,bool * roce_en)90 static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
91 					  u16 vport, bool *roce_en)
92 {
93 	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
94 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
95 	int err;
96 
97 	MLX5_SET(query_nic_vport_context_in, in, opcode,
98 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
99 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
100 	MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
101 
102 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
103 	if (err)
104 		return err;
105 
106 	*roce_en = MLX5_GET(query_nic_vport_context_out, out,
107 			    nic_vport_context.roce_en);
108 	return 0;
109 }
110 
mlx5dr_cmd_query_device(struct mlx5_core_dev * mdev,struct mlx5dr_cmd_caps * caps)111 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
112 			    struct mlx5dr_cmd_caps *caps)
113 {
114 	bool roce_en;
115 	int err;
116 
117 	caps->prio_tag_required	= MLX5_CAP_GEN(mdev, prio_tag_required);
118 	caps->eswitch_manager	= MLX5_CAP_GEN(mdev, eswitch_manager);
119 	caps->gvmi		= MLX5_CAP_GEN(mdev, vhca_id);
120 	caps->flex_protocols	= MLX5_CAP_GEN(mdev, flex_parser_protocols);
121 	caps->sw_format_ver	= MLX5_CAP_GEN(mdev, steering_format_version);
122 	caps->roce_caps.fl_rc_qp_when_roce_disabled =
123 		MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
124 
125 	if (MLX5_CAP_GEN(mdev, roce)) {
126 		err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
127 		if (err)
128 			return err;
129 
130 		caps->roce_caps.roce_en = roce_en;
131 		caps->roce_caps.fl_rc_qp_when_roce_disabled |=
132 			MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
133 		caps->roce_caps.fl_rc_qp_when_roce_enabled =
134 			MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
135 	}
136 
137 	caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
138 
139 	caps->support_modify_argument =
140 		MLX5_CAP_GEN_64(mdev, general_obj_types) &
141 		MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT;
142 
143 	if (caps->support_modify_argument) {
144 		caps->log_header_modify_argument_granularity =
145 			MLX5_CAP_GEN(mdev, log_header_modify_argument_granularity);
146 		caps->log_header_modify_argument_max_alloc =
147 			MLX5_CAP_GEN(mdev, log_header_modify_argument_max_alloc);
148 	}
149 
150 	/* geneve_tlv_option_0_exist is the indication of
151 	 * STE support for lookup type flex_parser_ok
152 	 */
153 	caps->flex_parser_ok_bits_supp =
154 		MLX5_CAP_FLOWTABLE(mdev,
155 				   flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
156 
157 	if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
158 		caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
159 		caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
160 	}
161 
162 	if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
163 		caps->flex_parser_id_icmpv6_dw0 =
164 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
165 		caps->flex_parser_id_icmpv6_dw1 =
166 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
167 	}
168 
169 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
170 		caps->flex_parser_id_geneve_tlv_option_0 =
171 			MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
172 
173 	if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
174 		caps->flex_parser_id_mpls_over_gre =
175 			MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
176 
177 	if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
178 		caps->flex_parser_id_mpls_over_udp =
179 			MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
180 
181 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
182 		caps->flex_parser_id_gtpu_dw_0 =
183 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
184 
185 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
186 		caps->flex_parser_id_gtpu_teid =
187 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
188 
189 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
190 		caps->flex_parser_id_gtpu_dw_2 =
191 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
192 
193 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
194 		caps->flex_parser_id_gtpu_first_ext_dw_0 =
195 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
196 
197 	caps->nic_rx_drop_address =
198 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
199 	caps->nic_tx_drop_address =
200 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
201 	caps->nic_tx_allow_address =
202 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
203 
204 	caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
205 	caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
206 
207 	if (!caps->rx_sw_owner_v2)
208 		caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
209 	if (!caps->tx_sw_owner_v2)
210 		caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
211 
212 	caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
213 
214 	caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
215 	caps->hdr_modify_icm_addr =
216 		MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
217 
218 	caps->log_modify_pattern_icm_size =
219 		MLX5_CAP_DEV_MEM(mdev, log_header_modify_pattern_sw_icm_size);
220 
221 	caps->hdr_modify_pattern_icm_addr =
222 		MLX5_CAP64_DEV_MEM(mdev, header_modify_pattern_sw_icm_start_address);
223 
224 	caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
225 
226 	caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
227 
228 	return 0;
229 }
230 
mlx5dr_cmd_query_flow_table(struct mlx5_core_dev * dev,enum fs_flow_table_type type,u32 table_id,struct mlx5dr_cmd_query_flow_table_details * output)231 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
232 				enum fs_flow_table_type type,
233 				u32 table_id,
234 				struct mlx5dr_cmd_query_flow_table_details *output)
235 {
236 	u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
237 	u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
238 	int err;
239 
240 	MLX5_SET(query_flow_table_in, in, opcode,
241 		 MLX5_CMD_OP_QUERY_FLOW_TABLE);
242 
243 	MLX5_SET(query_flow_table_in, in, table_type, type);
244 	MLX5_SET(query_flow_table_in, in, table_id, table_id);
245 
246 	err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
247 	if (err)
248 		return err;
249 
250 	output->status = MLX5_GET(query_flow_table_out, out, status);
251 	output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
252 
253 	output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
254 						 flow_table_context.sw_owner_icm_root_1);
255 	output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
256 						 flow_table_context.sw_owner_icm_root_0);
257 
258 	return 0;
259 }
260 
mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev * dev,u32 sampler_id,u64 * rx_icm_addr,u64 * tx_icm_addr)261 int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
262 				  u32 sampler_id,
263 				  u64 *rx_icm_addr,
264 				  u64 *tx_icm_addr)
265 {
266 	u32 out[MLX5_ST_SZ_DW(query_sampler_obj_out)] = {};
267 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
268 	void *attr;
269 	int ret;
270 
271 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
272 		 MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
273 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
274 		 MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
275 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
276 
277 	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
278 	if (ret)
279 		return ret;
280 
281 	attr = MLX5_ADDR_OF(query_sampler_obj_out, out, sampler_object);
282 
283 	*rx_icm_addr = MLX5_GET64(sampler_obj, attr,
284 				  sw_steering_icm_address_rx);
285 	*tx_icm_addr = MLX5_GET64(sampler_obj, attr,
286 				  sw_steering_icm_address_tx);
287 
288 	return 0;
289 }
290 
mlx5dr_cmd_sync_steering(struct mlx5_core_dev * mdev)291 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
292 {
293 	u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
294 
295 	/* Skip SYNC in case the device is internal error state.
296 	 * Besides a device error, this also happens when we're
297 	 * in fast teardown
298 	 */
299 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
300 		return 0;
301 
302 	MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
303 
304 	return mlx5_cmd_exec_in(mdev, sync_steering, in);
305 }
306 
mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 group_id,u32 modify_header_id,u16 vport)307 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
308 					u32 table_type,
309 					u32 table_id,
310 					u32 group_id,
311 					u32 modify_header_id,
312 					u16 vport)
313 {
314 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
315 	void *in_flow_context;
316 	unsigned int inlen;
317 	void *in_dests;
318 	u32 *in;
319 	int err;
320 
321 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
322 		1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
323 
324 	in = kvzalloc(inlen, GFP_KERNEL);
325 	if (!in)
326 		return -ENOMEM;
327 
328 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
329 	MLX5_SET(set_fte_in, in, table_type, table_type);
330 	MLX5_SET(set_fte_in, in, table_id, table_id);
331 
332 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
333 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
334 	MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
335 	MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
336 	MLX5_SET(flow_context, in_flow_context, action,
337 		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
338 		 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
339 
340 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
341 	MLX5_SET(dest_format_struct, in_dests, destination_type,
342 		 MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT);
343 	MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
344 
345 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
346 	kvfree(in);
347 
348 	return err;
349 }
350 
mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id)351 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
352 				    u32 table_type,
353 				    u32 table_id)
354 {
355 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
356 
357 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
358 	MLX5_SET(delete_fte_in, in, table_type, table_type);
359 	MLX5_SET(delete_fte_in, in, table_id, table_id);
360 
361 	return mlx5_cmd_exec_in(mdev, delete_fte, in);
362 }
363 
mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev * mdev,u32 table_type,u8 num_of_actions,u64 * actions,u32 * modify_header_id)364 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
365 				   u32 table_type,
366 				   u8 num_of_actions,
367 				   u64 *actions,
368 				   u32 *modify_header_id)
369 {
370 	u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
371 	void *p_actions;
372 	u32 inlen;
373 	u32 *in;
374 	int err;
375 
376 	inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
377 		 num_of_actions * sizeof(u64);
378 	in = kvzalloc(inlen, GFP_KERNEL);
379 	if (!in)
380 		return -ENOMEM;
381 
382 	MLX5_SET(alloc_modify_header_context_in, in, opcode,
383 		 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
384 	MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
385 	MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
386 	p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
387 	memcpy(p_actions, actions, num_of_actions * sizeof(u64));
388 
389 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
390 	if (err)
391 		goto out;
392 
393 	*modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
394 				     modify_header_id);
395 out:
396 	kvfree(in);
397 	return err;
398 }
399 
mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev * mdev,u32 modify_header_id)400 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
401 				     u32 modify_header_id)
402 {
403 	u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
404 
405 	MLX5_SET(dealloc_modify_header_context_in, in, opcode,
406 		 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
407 	MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
408 		 modify_header_id);
409 
410 	return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
411 }
412 
mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 * group_id)413 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
414 				       u32 table_type,
415 				       u32 table_id,
416 				       u32 *group_id)
417 {
418 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
419 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
420 	u32 *in;
421 	int err;
422 
423 	in = kvzalloc(inlen, GFP_KERNEL);
424 	if (!in)
425 		return -ENOMEM;
426 
427 	MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
428 	MLX5_SET(create_flow_group_in, in, table_type, table_type);
429 	MLX5_SET(create_flow_group_in, in, table_id, table_id);
430 
431 	err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
432 	if (err)
433 		goto out;
434 
435 	*group_id = MLX5_GET(create_flow_group_out, out, group_id);
436 
437 out:
438 	kvfree(in);
439 	return err;
440 }
441 
mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev * mdev,u32 table_type,u32 table_id,u32 group_id)442 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
443 				  u32 table_type,
444 				  u32 table_id,
445 				  u32 group_id)
446 {
447 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
448 
449 	MLX5_SET(destroy_flow_group_in, in, opcode,
450 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
451 	MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
452 	MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
453 	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
454 
455 	return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
456 }
457 
mlx5dr_cmd_create_flow_table(struct mlx5_core_dev * mdev,struct mlx5dr_cmd_create_flow_table_attr * attr,u64 * fdb_rx_icm_addr,u32 * table_id)458 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
459 				 struct mlx5dr_cmd_create_flow_table_attr *attr,
460 				 u64 *fdb_rx_icm_addr,
461 				 u32 *table_id)
462 {
463 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
464 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
465 	void *ft_mdev;
466 	int err;
467 
468 	MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
469 	MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
470 	MLX5_SET(create_flow_table_in, in, uid, attr->uid);
471 
472 	ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
473 	MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
474 	MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
475 	MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
476 
477 	if (attr->sw_owner) {
478 		/* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
479 		 * icm_addr_1 used for FDB TX
480 		 */
481 		if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
482 			MLX5_SET64(flow_table_context, ft_mdev,
483 				   sw_owner_icm_root_0, attr->icm_addr_rx);
484 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
485 			MLX5_SET64(flow_table_context, ft_mdev,
486 				   sw_owner_icm_root_0, attr->icm_addr_tx);
487 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
488 			MLX5_SET64(flow_table_context, ft_mdev,
489 				   sw_owner_icm_root_0, attr->icm_addr_rx);
490 			MLX5_SET64(flow_table_context, ft_mdev,
491 				   sw_owner_icm_root_1, attr->icm_addr_tx);
492 		}
493 	}
494 
495 	MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
496 		 attr->decap_en);
497 	MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
498 		 attr->reformat_en);
499 
500 	err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
501 	if (err)
502 		return err;
503 
504 	*table_id = MLX5_GET(create_flow_table_out, out, table_id);
505 	if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
506 	    fdb_rx_icm_addr)
507 		*fdb_rx_icm_addr =
508 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
509 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
510 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
511 
512 	return 0;
513 }
514 
mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev * mdev,u32 table_id,u32 table_type)515 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
516 				  u32 table_id,
517 				  u32 table_type)
518 {
519 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
520 
521 	MLX5_SET(destroy_flow_table_in, in, opcode,
522 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
523 	MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
524 	MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
525 
526 	return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
527 }
528 
mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev * mdev,enum mlx5_reformat_ctx_type rt,u8 reformat_param_0,u8 reformat_param_1,size_t reformat_size,void * reformat_data,u32 * reformat_id)529 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
530 				   enum mlx5_reformat_ctx_type rt,
531 				   u8 reformat_param_0,
532 				   u8 reformat_param_1,
533 				   size_t reformat_size,
534 				   void *reformat_data,
535 				   u32 *reformat_id)
536 {
537 	u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
538 	size_t inlen, cmd_data_sz, cmd_total_sz;
539 	void *prctx;
540 	void *pdata;
541 	void *in;
542 	int err;
543 
544 	cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
545 	cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
546 					packet_reformat_context.reformat_data);
547 	inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
548 	in = kvzalloc(inlen, GFP_KERNEL);
549 	if (!in)
550 		return -ENOMEM;
551 
552 	MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
553 		 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
554 
555 	prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
556 	pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
557 
558 	MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
559 	MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
560 	MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
561 	MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
562 	if (reformat_data && reformat_size)
563 		memcpy(pdata, reformat_data, reformat_size);
564 
565 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
566 	if (err)
567 		goto err_free_in;
568 
569 	*reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
570 
571 err_free_in:
572 	kvfree(in);
573 	return err;
574 }
575 
mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev * mdev,u32 reformat_id)576 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
577 				     u32 reformat_id)
578 {
579 	u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
580 
581 	MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
582 		 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
583 	MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
584 		 reformat_id);
585 
586 	mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
587 }
588 
dr_cmd_set_definer_format(void * ptr,u16 format_id,u8 * dw_selectors,u8 * byte_selectors)589 static void dr_cmd_set_definer_format(void *ptr, u16 format_id,
590 				      u8 *dw_selectors,
591 				      u8 *byte_selectors)
592 {
593 	if (format_id != MLX5_IFC_DEFINER_FORMAT_ID_SELECT)
594 		return;
595 
596 	MLX5_SET(match_definer, ptr, format_select_dw0, dw_selectors[0]);
597 	MLX5_SET(match_definer, ptr, format_select_dw1, dw_selectors[1]);
598 	MLX5_SET(match_definer, ptr, format_select_dw2, dw_selectors[2]);
599 	MLX5_SET(match_definer, ptr, format_select_dw3, dw_selectors[3]);
600 	MLX5_SET(match_definer, ptr, format_select_dw4, dw_selectors[4]);
601 	MLX5_SET(match_definer, ptr, format_select_dw5, dw_selectors[5]);
602 	MLX5_SET(match_definer, ptr, format_select_dw6, dw_selectors[6]);
603 	MLX5_SET(match_definer, ptr, format_select_dw7, dw_selectors[7]);
604 	MLX5_SET(match_definer, ptr, format_select_dw8, dw_selectors[8]);
605 
606 	MLX5_SET(match_definer, ptr, format_select_byte0, byte_selectors[0]);
607 	MLX5_SET(match_definer, ptr, format_select_byte1, byte_selectors[1]);
608 	MLX5_SET(match_definer, ptr, format_select_byte2, byte_selectors[2]);
609 	MLX5_SET(match_definer, ptr, format_select_byte3, byte_selectors[3]);
610 	MLX5_SET(match_definer, ptr, format_select_byte4, byte_selectors[4]);
611 	MLX5_SET(match_definer, ptr, format_select_byte5, byte_selectors[5]);
612 	MLX5_SET(match_definer, ptr, format_select_byte6, byte_selectors[6]);
613 	MLX5_SET(match_definer, ptr, format_select_byte7, byte_selectors[7]);
614 }
615 
mlx5dr_cmd_create_definer(struct mlx5_core_dev * mdev,u16 format_id,u8 * dw_selectors,u8 * byte_selectors,u8 * match_mask,u32 * definer_id)616 int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
617 			      u16 format_id,
618 			      u8 *dw_selectors,
619 			      u8 *byte_selectors,
620 			      u8 *match_mask,
621 			      u32 *definer_id)
622 {
623 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
624 	u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
625 	void *ptr;
626 	int err;
627 
628 	ptr = MLX5_ADDR_OF(create_match_definer_in, in,
629 			   general_obj_in_cmd_hdr);
630 	MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
631 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
632 	MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
633 		 MLX5_OBJ_TYPE_MATCH_DEFINER);
634 
635 	ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
636 	MLX5_SET(match_definer, ptr, format_id, format_id);
637 
638 	dr_cmd_set_definer_format(ptr, format_id,
639 				  dw_selectors, byte_selectors);
640 
641 	ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
642 	memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
643 
644 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
645 	if (err)
646 		return err;
647 
648 	*definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
649 
650 	return 0;
651 }
652 
653 void
mlx5dr_cmd_destroy_definer(struct mlx5_core_dev * mdev,u32 definer_id)654 mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev, u32 definer_id)
655 {
656 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
657 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
658 
659 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
660 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
661 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
662 
663 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
664 }
665 
mlx5dr_cmd_query_gid(struct mlx5_core_dev * mdev,u8 vhca_port_num,u16 index,struct mlx5dr_cmd_gid_attr * attr)666 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
667 			 u16 index, struct mlx5dr_cmd_gid_attr *attr)
668 {
669 	u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
670 	u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
671 	int err;
672 
673 	MLX5_SET(query_roce_address_in, in, opcode,
674 		 MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
675 
676 	MLX5_SET(query_roce_address_in, in, roce_address_index, index);
677 	MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
678 
679 	err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
680 	if (err)
681 		return err;
682 
683 	memcpy(&attr->gid,
684 	       MLX5_ADDR_OF(query_roce_address_out,
685 			    out, roce_address.source_l3_address),
686 	       sizeof(attr->gid));
687 	memcpy(attr->mac,
688 	       MLX5_ADDR_OF(query_roce_address_out, out,
689 			    roce_address.source_mac_47_32),
690 	       sizeof(attr->mac));
691 
692 	if (MLX5_GET(query_roce_address_out, out,
693 		     roce_address.roce_version) == MLX5_ROCE_VERSION_2)
694 		attr->roce_ver = MLX5_ROCE_VERSION_2;
695 	else
696 		attr->roce_ver = MLX5_ROCE_VERSION_1;
697 
698 	return 0;
699 }
700 
mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev * dev,u16 log_obj_range,u32 pd,u32 * obj_id)701 int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
702 					u16 log_obj_range, u32 pd,
703 					u32 *obj_id)
704 {
705 	u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {};
706 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
707 	void *attr;
708 	int ret;
709 
710 	attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr);
711 	MLX5_SET(general_obj_in_cmd_hdr, attr, opcode,
712 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
713 	MLX5_SET(general_obj_in_cmd_hdr, attr, obj_type,
714 		 MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
715 	MLX5_SET(general_obj_in_cmd_hdr, attr,
716 		 op_param.create.log_obj_range, log_obj_range);
717 
718 	attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg);
719 	MLX5_SET(modify_header_arg, attr, access_pd, pd);
720 
721 	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
722 	if (ret)
723 		return ret;
724 
725 	*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
726 	return 0;
727 }
728 
mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev * dev,u32 obj_id)729 void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
730 					  u32 obj_id)
731 {
732 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
733 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
734 
735 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
736 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
737 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
738 		 MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
739 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
740 
741 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
742 }
743 
mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev * dev,struct mlx5dr_cmd_fte_info * fte,bool * extended_dest)744 static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
745 					struct mlx5dr_cmd_fte_info *fte,
746 					bool *extended_dest)
747 {
748 	int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
749 	int num_fwd_destinations = 0;
750 	int num_encap = 0;
751 	int i;
752 
753 	*extended_dest = false;
754 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
755 		return 0;
756 	for (i = 0; i < fte->dests_size; i++) {
757 		if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
758 		    fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE)
759 			continue;
760 		if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
761 		     fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
762 		    fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
763 			num_encap++;
764 		num_fwd_destinations++;
765 	}
766 
767 	if (num_fwd_destinations > 1 && num_encap > 0)
768 		*extended_dest = true;
769 
770 	if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
771 		mlx5_core_warn(dev, "FW does not support extended destination");
772 		return -EOPNOTSUPP;
773 	}
774 	if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
775 		mlx5_core_warn(dev, "FW does not support more than %d encaps",
776 			       1 << fw_log_max_fdb_encap_uplink);
777 		return -EOPNOTSUPP;
778 	}
779 
780 	return 0;
781 }
782 
mlx5dr_cmd_set_fte(struct mlx5_core_dev * dev,int opmod,int modify_mask,struct mlx5dr_cmd_ft_info * ft,u32 group_id,struct mlx5dr_cmd_fte_info * fte)783 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
784 		       int opmod, int modify_mask,
785 		       struct mlx5dr_cmd_ft_info *ft,
786 		       u32 group_id,
787 		       struct mlx5dr_cmd_fte_info *fte)
788 {
789 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
790 	void *in_flow_context, *vlan;
791 	bool extended_dest = false;
792 	void *in_match_value;
793 	unsigned int inlen;
794 	int dst_cnt_size;
795 	void *in_dests;
796 	u32 *in;
797 	int err;
798 	int i;
799 
800 	if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
801 		return -EOPNOTSUPP;
802 
803 	if (!extended_dest)
804 		dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
805 	else
806 		dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
807 
808 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
809 	in = kvzalloc(inlen, GFP_KERNEL);
810 	if (!in)
811 		return -ENOMEM;
812 
813 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
814 	MLX5_SET(set_fte_in, in, op_mod, opmod);
815 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
816 	MLX5_SET(set_fte_in, in, table_type, ft->type);
817 	MLX5_SET(set_fte_in, in, table_id, ft->id);
818 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
819 	MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level);
820 	if (ft->vport) {
821 		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
822 		MLX5_SET(set_fte_in, in, other_vport, 1);
823 	}
824 
825 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
826 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
827 
828 	MLX5_SET(flow_context, in_flow_context, flow_tag,
829 		 fte->flow_context.flow_tag);
830 	MLX5_SET(flow_context, in_flow_context, flow_source,
831 		 fte->flow_context.flow_source);
832 
833 	MLX5_SET(flow_context, in_flow_context, extended_destination,
834 		 extended_dest);
835 	if (extended_dest) {
836 		u32 action;
837 
838 		action = fte->action.action &
839 			~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
840 		MLX5_SET(flow_context, in_flow_context, action, action);
841 	} else {
842 		MLX5_SET(flow_context, in_flow_context, action,
843 			 fte->action.action);
844 		if (fte->action.pkt_reformat)
845 			MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
846 				 fte->action.pkt_reformat->id);
847 	}
848 	if (fte->action.modify_hdr)
849 		MLX5_SET(flow_context, in_flow_context, modify_header_id,
850 			 fte->action.modify_hdr->id);
851 
852 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
853 
854 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
855 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
856 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
857 
858 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
859 
860 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
861 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
862 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
863 
864 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
865 				      match_value);
866 	memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
867 
868 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
869 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
870 		int list_size = 0;
871 
872 		for (i = 0; i < fte->dests_size; i++) {
873 			enum mlx5_flow_destination_type type = fte->dest_arr[i].type;
874 			enum mlx5_ifc_flow_destination_type ifc_type;
875 			unsigned int id;
876 
877 			if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
878 				continue;
879 
880 			switch (type) {
881 			case MLX5_FLOW_DESTINATION_TYPE_NONE:
882 				continue;
883 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
884 				id = fte->dest_arr[i].ft_num;
885 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
886 				break;
887 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
888 				id = fte->dest_arr[i].ft_id;
889 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
890 
891 				break;
892 			case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
893 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
894 				if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) {
895 					id = fte->dest_arr[i].vport.num;
896 					MLX5_SET(dest_format_struct, in_dests,
897 						 destination_eswitch_owner_vhca_id_valid,
898 						 !!(fte->dest_arr[i].vport.flags &
899 						    MLX5_FLOW_DEST_VPORT_VHCA_ID));
900 					ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
901 				} else {
902 					id = 0;
903 					ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
904 					MLX5_SET(dest_format_struct, in_dests,
905 						 destination_eswitch_owner_vhca_id_valid, 1);
906 				}
907 				MLX5_SET(dest_format_struct, in_dests,
908 					 destination_eswitch_owner_vhca_id,
909 					 fte->dest_arr[i].vport.vhca_id);
910 				if (extended_dest && (fte->dest_arr[i].vport.flags &
911 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
912 					MLX5_SET(dest_format_struct, in_dests,
913 						 packet_reformat,
914 						 !!(fte->dest_arr[i].vport.flags &
915 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
916 					MLX5_SET(extended_dest_format, in_dests,
917 						 packet_reformat_id,
918 						 fte->dest_arr[i].vport.reformat_id);
919 				}
920 				break;
921 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
922 				id = fte->dest_arr[i].sampler_id;
923 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
924 				break;
925 			default:
926 				id = fte->dest_arr[i].tir_num;
927 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
928 			}
929 
930 			MLX5_SET(dest_format_struct, in_dests, destination_type,
931 				 ifc_type);
932 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
933 			in_dests += dst_cnt_size;
934 			list_size++;
935 		}
936 
937 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
938 			 list_size);
939 	}
940 
941 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
942 		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
943 					log_max_flow_counter,
944 					ft->type));
945 		int list_size = 0;
946 
947 		for (i = 0; i < fte->dests_size; i++) {
948 			if (fte->dest_arr[i].type !=
949 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
950 				continue;
951 
952 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
953 				 fte->dest_arr[i].counter_id);
954 			in_dests += dst_cnt_size;
955 			list_size++;
956 		}
957 		if (list_size > max_list_size) {
958 			err = -EINVAL;
959 			goto err_out;
960 		}
961 
962 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
963 			 list_size);
964 	}
965 
966 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
967 err_out:
968 	kvfree(in);
969 	return err;
970 }
971