1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
7 				       bool other_vport,
8 				       u16 vport_number,
9 				       u64 *icm_address_rx,
10 				       u64 *icm_address_tx)
11 {
12 	u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
13 	u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
14 	int err;
15 
16 	MLX5_SET(query_esw_vport_context_in, in, opcode,
17 		 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
18 	MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
19 	MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
20 
21 	err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
22 	if (err)
23 		return err;
24 
25 	*icm_address_rx =
26 		MLX5_GET64(query_esw_vport_context_out, out,
27 			   esw_vport_context.sw_steering_vport_icm_address_rx);
28 	*icm_address_tx =
29 		MLX5_GET64(query_esw_vport_context_out, out,
30 			   esw_vport_context.sw_steering_vport_icm_address_tx);
31 	return 0;
32 }
33 
34 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
35 			  u16 vport_number, u16 *gvmi)
36 {
37 	u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
38 	int out_size;
39 	void *out;
40 	int err;
41 
42 	out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
43 	out = kzalloc(out_size, GFP_KERNEL);
44 	if (!out)
45 		return -ENOMEM;
46 
47 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
48 	MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
49 	MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
50 	MLX5_SET(query_hca_cap_in, in, op_mod,
51 		 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
52 		 HCA_CAP_OPMOD_GET_CUR);
53 
54 	err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
55 	if (err) {
56 		kfree(out);
57 		return err;
58 	}
59 
60 	*gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
61 
62 	kfree(out);
63 	return 0;
64 }
65 
66 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
67 			      struct mlx5dr_esw_caps *caps)
68 {
69 	caps->drop_icm_address_rx =
70 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
71 					 sw_steering_fdb_action_drop_icm_address_rx);
72 	caps->drop_icm_address_tx =
73 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
74 					 sw_steering_fdb_action_drop_icm_address_tx);
75 	caps->uplink_icm_address_rx =
76 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
77 					 sw_steering_uplink_icm_address_rx);
78 	caps->uplink_icm_address_tx =
79 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
80 					 sw_steering_uplink_icm_address_tx);
81 	caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
82 	if (!caps->sw_owner_v2)
83 		caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
84 
85 	return 0;
86 }
87 
88 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
89 			    struct mlx5dr_cmd_caps *caps)
90 {
91 	caps->prio_tag_required	= MLX5_CAP_GEN(mdev, prio_tag_required);
92 	caps->eswitch_manager	= MLX5_CAP_GEN(mdev, eswitch_manager);
93 	caps->gvmi		= MLX5_CAP_GEN(mdev, vhca_id);
94 	caps->flex_protocols	= MLX5_CAP_GEN(mdev, flex_parser_protocols);
95 	caps->sw_format_ver	= MLX5_CAP_GEN(mdev, steering_format_version);
96 
97 	if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
98 		caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
99 		caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
100 	}
101 
102 	if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
103 		caps->flex_parser_id_icmpv6_dw0 =
104 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
105 		caps->flex_parser_id_icmpv6_dw1 =
106 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
107 	}
108 
109 	caps->nic_rx_drop_address =
110 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
111 	caps->nic_tx_drop_address =
112 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
113 	caps->nic_tx_allow_address =
114 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
115 
116 	caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
117 	caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
118 
119 	if (!caps->rx_sw_owner_v2)
120 		caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
121 	if (!caps->tx_sw_owner_v2)
122 		caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
123 
124 	caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
125 
126 	caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
127 	caps->hdr_modify_icm_addr =
128 		MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
129 
130 	caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
131 
132 	return 0;
133 }
134 
135 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
136 				enum fs_flow_table_type type,
137 				u32 table_id,
138 				struct mlx5dr_cmd_query_flow_table_details *output)
139 {
140 	u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
141 	u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
142 	int err;
143 
144 	MLX5_SET(query_flow_table_in, in, opcode,
145 		 MLX5_CMD_OP_QUERY_FLOW_TABLE);
146 
147 	MLX5_SET(query_flow_table_in, in, table_type, type);
148 	MLX5_SET(query_flow_table_in, in, table_id, table_id);
149 
150 	err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
151 	if (err)
152 		return err;
153 
154 	output->status = MLX5_GET(query_flow_table_out, out, status);
155 	output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
156 
157 	output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
158 						 flow_table_context.sw_owner_icm_root_1);
159 	output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
160 						 flow_table_context.sw_owner_icm_root_0);
161 
162 	return 0;
163 }
164 
165 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
166 {
167 	u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
168 
169 	MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
170 
171 	return mlx5_cmd_exec_in(mdev, sync_steering, in);
172 }
173 
174 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
175 					u32 table_type,
176 					u32 table_id,
177 					u32 group_id,
178 					u32 modify_header_id,
179 					u32 vport_id)
180 {
181 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
182 	void *in_flow_context;
183 	unsigned int inlen;
184 	void *in_dests;
185 	u32 *in;
186 	int err;
187 
188 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
189 		1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
190 
191 	in = kvzalloc(inlen, GFP_KERNEL);
192 	if (!in)
193 		return -ENOMEM;
194 
195 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
196 	MLX5_SET(set_fte_in, in, table_type, table_type);
197 	MLX5_SET(set_fte_in, in, table_id, table_id);
198 
199 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
200 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
201 	MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
202 	MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
203 	MLX5_SET(flow_context, in_flow_context, action,
204 		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
205 		 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
206 
207 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
208 	MLX5_SET(dest_format_struct, in_dests, destination_type,
209 		 MLX5_FLOW_DESTINATION_TYPE_VPORT);
210 	MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id);
211 
212 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
213 	kvfree(in);
214 
215 	return err;
216 }
217 
218 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
219 				    u32 table_type,
220 				    u32 table_id)
221 {
222 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
223 
224 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
225 	MLX5_SET(delete_fte_in, in, table_type, table_type);
226 	MLX5_SET(delete_fte_in, in, table_id, table_id);
227 
228 	return mlx5_cmd_exec_in(mdev, delete_fte, in);
229 }
230 
231 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
232 				   u32 table_type,
233 				   u8 num_of_actions,
234 				   u64 *actions,
235 				   u32 *modify_header_id)
236 {
237 	u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
238 	void *p_actions;
239 	u32 inlen;
240 	u32 *in;
241 	int err;
242 
243 	inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
244 		 num_of_actions * sizeof(u64);
245 	in = kvzalloc(inlen, GFP_KERNEL);
246 	if (!in)
247 		return -ENOMEM;
248 
249 	MLX5_SET(alloc_modify_header_context_in, in, opcode,
250 		 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
251 	MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
252 	MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
253 	p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
254 	memcpy(p_actions, actions, num_of_actions * sizeof(u64));
255 
256 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
257 	if (err)
258 		goto out;
259 
260 	*modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
261 				     modify_header_id);
262 out:
263 	kvfree(in);
264 	return err;
265 }
266 
267 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
268 				     u32 modify_header_id)
269 {
270 	u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
271 
272 	MLX5_SET(dealloc_modify_header_context_in, in, opcode,
273 		 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
274 	MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
275 		 modify_header_id);
276 
277 	return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
278 }
279 
280 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
281 				       u32 table_type,
282 				       u32 table_id,
283 				       u32 *group_id)
284 {
285 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
286 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
287 	u32 *in;
288 	int err;
289 
290 	in = kzalloc(inlen, GFP_KERNEL);
291 	if (!in)
292 		return -ENOMEM;
293 
294 	MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
295 	MLX5_SET(create_flow_group_in, in, table_type, table_type);
296 	MLX5_SET(create_flow_group_in, in, table_id, table_id);
297 
298 	err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
299 	if (err)
300 		goto out;
301 
302 	*group_id = MLX5_GET(create_flow_group_out, out, group_id);
303 
304 out:
305 	kfree(in);
306 	return err;
307 }
308 
309 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
310 				  u32 table_type,
311 				  u32 table_id,
312 				  u32 group_id)
313 {
314 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
315 
316 	MLX5_SET(destroy_flow_group_in, in, opcode,
317 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
318 	MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
319 	MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
320 	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
321 
322 	return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
323 }
324 
325 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
326 				 struct mlx5dr_cmd_create_flow_table_attr *attr,
327 				 u64 *fdb_rx_icm_addr,
328 				 u32 *table_id)
329 {
330 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
331 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
332 	void *ft_mdev;
333 	int err;
334 
335 	MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
336 	MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
337 
338 	ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
339 	MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
340 	MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
341 	MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
342 
343 	if (attr->sw_owner) {
344 		/* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
345 		 * icm_addr_1 used for FDB TX
346 		 */
347 		if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
348 			MLX5_SET64(flow_table_context, ft_mdev,
349 				   sw_owner_icm_root_0, attr->icm_addr_rx);
350 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
351 			MLX5_SET64(flow_table_context, ft_mdev,
352 				   sw_owner_icm_root_0, attr->icm_addr_tx);
353 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
354 			MLX5_SET64(flow_table_context, ft_mdev,
355 				   sw_owner_icm_root_0, attr->icm_addr_rx);
356 			MLX5_SET64(flow_table_context, ft_mdev,
357 				   sw_owner_icm_root_1, attr->icm_addr_tx);
358 		}
359 	}
360 
361 	MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
362 		 attr->decap_en);
363 	MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
364 		 attr->reformat_en);
365 
366 	err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
367 	if (err)
368 		return err;
369 
370 	*table_id = MLX5_GET(create_flow_table_out, out, table_id);
371 	if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
372 	    fdb_rx_icm_addr)
373 		*fdb_rx_icm_addr =
374 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
375 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
376 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
377 
378 	return 0;
379 }
380 
381 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
382 				  u32 table_id,
383 				  u32 table_type)
384 {
385 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
386 
387 	MLX5_SET(destroy_flow_table_in, in, opcode,
388 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
389 	MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
390 	MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
391 
392 	return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
393 }
394 
395 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
396 				   enum mlx5_reformat_ctx_type rt,
397 				   size_t reformat_size,
398 				   void *reformat_data,
399 				   u32 *reformat_id)
400 {
401 	u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
402 	size_t inlen, cmd_data_sz, cmd_total_sz;
403 	void *prctx;
404 	void *pdata;
405 	void *in;
406 	int err;
407 
408 	cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
409 	cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
410 					packet_reformat_context.reformat_data);
411 	inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
412 	in = kvzalloc(inlen, GFP_KERNEL);
413 	if (!in)
414 		return -ENOMEM;
415 
416 	MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
417 		 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
418 
419 	prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
420 	pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
421 
422 	MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
423 	MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
424 	memcpy(pdata, reformat_data, reformat_size);
425 
426 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
427 	if (err)
428 		return err;
429 
430 	*reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
431 	kvfree(in);
432 
433 	return err;
434 }
435 
436 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
437 				     u32 reformat_id)
438 {
439 	u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
440 
441 	MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
442 		 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
443 	MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
444 		 reformat_id);
445 
446 	mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
447 }
448 
449 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
450 			 u16 index, struct mlx5dr_cmd_gid_attr *attr)
451 {
452 	u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
453 	u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
454 	int err;
455 
456 	MLX5_SET(query_roce_address_in, in, opcode,
457 		 MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
458 
459 	MLX5_SET(query_roce_address_in, in, roce_address_index, index);
460 	MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
461 
462 	err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
463 	if (err)
464 		return err;
465 
466 	memcpy(&attr->gid,
467 	       MLX5_ADDR_OF(query_roce_address_out,
468 			    out, roce_address.source_l3_address),
469 	       sizeof(attr->gid));
470 	memcpy(attr->mac,
471 	       MLX5_ADDR_OF(query_roce_address_out, out,
472 			    roce_address.source_mac_47_32),
473 	       sizeof(attr->mac));
474 
475 	if (MLX5_GET(query_roce_address_out, out,
476 		     roce_address.roce_version) == MLX5_ROCE_VERSION_2)
477 		attr->roce_ver = MLX5_ROCE_VERSION_2;
478 	else
479 		attr->roce_ver = MLX5_ROCE_VERSION_1;
480 
481 	return 0;
482 }
483 
484 static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
485 					struct mlx5dr_cmd_fte_info *fte,
486 					bool *extended_dest)
487 {
488 	int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
489 	int num_fwd_destinations = 0;
490 	int num_encap = 0;
491 	int i;
492 
493 	*extended_dest = false;
494 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
495 		return 0;
496 	for (i = 0; i < fte->dests_size; i++) {
497 		if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
498 			continue;
499 		if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
500 		    fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
501 			num_encap++;
502 		num_fwd_destinations++;
503 	}
504 
505 	if (num_fwd_destinations > 1 && num_encap > 0)
506 		*extended_dest = true;
507 
508 	if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
509 		mlx5_core_warn(dev, "FW does not support extended destination");
510 		return -EOPNOTSUPP;
511 	}
512 	if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
513 		mlx5_core_warn(dev, "FW does not support more than %d encaps",
514 			       1 << fw_log_max_fdb_encap_uplink);
515 		return -EOPNOTSUPP;
516 	}
517 
518 	return 0;
519 }
520 
521 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
522 		       int opmod, int modify_mask,
523 		       struct mlx5dr_cmd_ft_info *ft,
524 		       u32 group_id,
525 		       struct mlx5dr_cmd_fte_info *fte)
526 {
527 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
528 	void *in_flow_context, *vlan;
529 	bool extended_dest = false;
530 	void *in_match_value;
531 	unsigned int inlen;
532 	int dst_cnt_size;
533 	void *in_dests;
534 	u32 *in;
535 	int err;
536 	int i;
537 
538 	if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
539 		return -EOPNOTSUPP;
540 
541 	if (!extended_dest)
542 		dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
543 	else
544 		dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
545 
546 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
547 	in = kvzalloc(inlen, GFP_KERNEL);
548 	if (!in)
549 		return -ENOMEM;
550 
551 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
552 	MLX5_SET(set_fte_in, in, op_mod, opmod);
553 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
554 	MLX5_SET(set_fte_in, in, table_type, ft->type);
555 	MLX5_SET(set_fte_in, in, table_id, ft->id);
556 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
557 	if (ft->vport) {
558 		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
559 		MLX5_SET(set_fte_in, in, other_vport, 1);
560 	}
561 
562 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
563 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
564 
565 	MLX5_SET(flow_context, in_flow_context, flow_tag,
566 		 fte->flow_context.flow_tag);
567 	MLX5_SET(flow_context, in_flow_context, flow_source,
568 		 fte->flow_context.flow_source);
569 
570 	MLX5_SET(flow_context, in_flow_context, extended_destination,
571 		 extended_dest);
572 	if (extended_dest) {
573 		u32 action;
574 
575 		action = fte->action.action &
576 			~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
577 		MLX5_SET(flow_context, in_flow_context, action, action);
578 	} else {
579 		MLX5_SET(flow_context, in_flow_context, action,
580 			 fte->action.action);
581 		if (fte->action.pkt_reformat)
582 			MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
583 				 fte->action.pkt_reformat->id);
584 	}
585 	if (fte->action.modify_hdr)
586 		MLX5_SET(flow_context, in_flow_context, modify_header_id,
587 			 fte->action.modify_hdr->id);
588 
589 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
590 
591 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
592 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
593 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
594 
595 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
596 
597 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
598 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
599 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
600 
601 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
602 				      match_value);
603 	memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
604 
605 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
606 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
607 		int list_size = 0;
608 
609 		for (i = 0; i < fte->dests_size; i++) {
610 			unsigned int id, type = fte->dest_arr[i].type;
611 
612 			if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
613 				continue;
614 
615 			switch (type) {
616 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
617 				id = fte->dest_arr[i].ft_num;
618 				type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
619 				break;
620 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
621 				id = fte->dest_arr[i].ft_id;
622 				break;
623 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
624 				id = fte->dest_arr[i].vport.num;
625 				MLX5_SET(dest_format_struct, in_dests,
626 					 destination_eswitch_owner_vhca_id_valid,
627 					 !!(fte->dest_arr[i].vport.flags &
628 					    MLX5_FLOW_DEST_VPORT_VHCA_ID));
629 				MLX5_SET(dest_format_struct, in_dests,
630 					 destination_eswitch_owner_vhca_id,
631 					 fte->dest_arr[i].vport.vhca_id);
632 				if (extended_dest && (fte->dest_arr[i].vport.flags &
633 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
634 					MLX5_SET(dest_format_struct, in_dests,
635 						 packet_reformat,
636 						 !!(fte->dest_arr[i].vport.flags &
637 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
638 					MLX5_SET(extended_dest_format, in_dests,
639 						 packet_reformat_id,
640 						 fte->dest_arr[i].vport.reformat_id);
641 				}
642 				break;
643 			default:
644 				id = fte->dest_arr[i].tir_num;
645 			}
646 
647 			MLX5_SET(dest_format_struct, in_dests, destination_type,
648 				 type);
649 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
650 			in_dests += dst_cnt_size;
651 			list_size++;
652 		}
653 
654 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
655 			 list_size);
656 	}
657 
658 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
659 		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
660 					log_max_flow_counter,
661 					ft->type));
662 		int list_size = 0;
663 
664 		for (i = 0; i < fte->dests_size; i++) {
665 			if (fte->dest_arr[i].type !=
666 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
667 				continue;
668 
669 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
670 				 fte->dest_arr[i].counter_id);
671 			in_dests += dst_cnt_size;
672 			list_size++;
673 		}
674 		if (list_size > max_list_size) {
675 			err = -EINVAL;
676 			goto err_out;
677 		}
678 
679 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
680 			 list_size);
681 	}
682 
683 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
684 err_out:
685 	kvfree(in);
686 	return err;
687 }
688