1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
7 				       bool other_vport,
8 				       u16 vport_number,
9 				       u64 *icm_address_rx,
10 				       u64 *icm_address_tx)
11 {
12 	u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
13 	u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
14 	int err;
15 
16 	MLX5_SET(query_esw_vport_context_in, in, opcode,
17 		 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
18 	MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
19 	MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
20 
21 	err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
22 	if (err)
23 		return err;
24 
25 	*icm_address_rx =
26 		MLX5_GET64(query_esw_vport_context_out, out,
27 			   esw_vport_context.sw_steering_vport_icm_address_rx);
28 	*icm_address_tx =
29 		MLX5_GET64(query_esw_vport_context_out, out,
30 			   esw_vport_context.sw_steering_vport_icm_address_tx);
31 	return 0;
32 }
33 
34 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
35 			  u16 vport_number, u16 *gvmi)
36 {
37 	u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
38 	int out_size;
39 	void *out;
40 	int err;
41 
42 	out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
43 	out = kzalloc(out_size, GFP_KERNEL);
44 	if (!out)
45 		return -ENOMEM;
46 
47 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
48 	MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
49 	MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
50 	MLX5_SET(query_hca_cap_in, in, op_mod,
51 		 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
52 		 HCA_CAP_OPMOD_GET_CUR);
53 
54 	err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
55 	if (err) {
56 		kfree(out);
57 		return err;
58 	}
59 
60 	*gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
61 
62 	kfree(out);
63 	return 0;
64 }
65 
66 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
67 			      struct mlx5dr_esw_caps *caps)
68 {
69 	caps->drop_icm_address_rx =
70 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
71 					 sw_steering_fdb_action_drop_icm_address_rx);
72 	caps->drop_icm_address_tx =
73 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
74 					 sw_steering_fdb_action_drop_icm_address_tx);
75 	caps->uplink_icm_address_rx =
76 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
77 					 sw_steering_uplink_icm_address_rx);
78 	caps->uplink_icm_address_tx =
79 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
80 					 sw_steering_uplink_icm_address_tx);
81 	caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
82 	if (!caps->sw_owner_v2)
83 		caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
84 
85 	return 0;
86 }
87 
88 static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
89 					  u16 vport, bool *roce_en)
90 {
91 	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
92 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
93 	int err;
94 
95 	MLX5_SET(query_nic_vport_context_in, in, opcode,
96 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
97 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
98 	MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
99 
100 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
101 	if (err)
102 		return err;
103 
104 	*roce_en = MLX5_GET(query_nic_vport_context_out, out,
105 			    nic_vport_context.roce_en);
106 	return 0;
107 }
108 
109 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
110 			    struct mlx5dr_cmd_caps *caps)
111 {
112 	bool roce_en;
113 	int err;
114 
115 	caps->prio_tag_required	= MLX5_CAP_GEN(mdev, prio_tag_required);
116 	caps->eswitch_manager	= MLX5_CAP_GEN(mdev, eswitch_manager);
117 	caps->gvmi		= MLX5_CAP_GEN(mdev, vhca_id);
118 	caps->flex_protocols	= MLX5_CAP_GEN(mdev, flex_parser_protocols);
119 	caps->sw_format_ver	= MLX5_CAP_GEN(mdev, steering_format_version);
120 
121 	if (MLX5_CAP_GEN(mdev, roce)) {
122 		err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
123 		if (err)
124 			return err;
125 
126 		caps->roce_caps.roce_en = roce_en;
127 		caps->roce_caps.fl_rc_qp_when_roce_disabled =
128 			MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
129 		caps->roce_caps.fl_rc_qp_when_roce_enabled =
130 			MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
131 	}
132 
133 	caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
134 
135 	/* geneve_tlv_option_0_exist is the indication of
136 	 * STE support for lookup type flex_parser_ok
137 	 */
138 	caps->flex_parser_ok_bits_supp =
139 		MLX5_CAP_FLOWTABLE(mdev,
140 				   flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
141 
142 	if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
143 		caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
144 		caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
145 	}
146 
147 	if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
148 		caps->flex_parser_id_icmpv6_dw0 =
149 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
150 		caps->flex_parser_id_icmpv6_dw1 =
151 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
152 	}
153 
154 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
155 		caps->flex_parser_id_geneve_tlv_option_0 =
156 			MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
157 
158 	if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
159 		caps->flex_parser_id_mpls_over_gre =
160 			MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
161 
162 	if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
163 		caps->flex_parser_id_mpls_over_udp =
164 			MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
165 
166 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
167 		caps->flex_parser_id_gtpu_dw_0 =
168 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
169 
170 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
171 		caps->flex_parser_id_gtpu_teid =
172 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
173 
174 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
175 		caps->flex_parser_id_gtpu_dw_2 =
176 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
177 
178 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
179 		caps->flex_parser_id_gtpu_first_ext_dw_0 =
180 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
181 
182 	caps->nic_rx_drop_address =
183 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
184 	caps->nic_tx_drop_address =
185 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
186 	caps->nic_tx_allow_address =
187 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
188 
189 	caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
190 	caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
191 
192 	if (!caps->rx_sw_owner_v2)
193 		caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
194 	if (!caps->tx_sw_owner_v2)
195 		caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
196 
197 	caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
198 
199 	caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
200 	caps->hdr_modify_icm_addr =
201 		MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
202 
203 	caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
204 
205 	caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
206 
207 	return 0;
208 }
209 
210 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
211 				enum fs_flow_table_type type,
212 				u32 table_id,
213 				struct mlx5dr_cmd_query_flow_table_details *output)
214 {
215 	u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
216 	u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
217 	int err;
218 
219 	MLX5_SET(query_flow_table_in, in, opcode,
220 		 MLX5_CMD_OP_QUERY_FLOW_TABLE);
221 
222 	MLX5_SET(query_flow_table_in, in, table_type, type);
223 	MLX5_SET(query_flow_table_in, in, table_id, table_id);
224 
225 	err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
226 	if (err)
227 		return err;
228 
229 	output->status = MLX5_GET(query_flow_table_out, out, status);
230 	output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
231 
232 	output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
233 						 flow_table_context.sw_owner_icm_root_1);
234 	output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
235 						 flow_table_context.sw_owner_icm_root_0);
236 
237 	return 0;
238 }
239 
240 int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
241 				  u32 sampler_id,
242 				  u64 *rx_icm_addr,
243 				  u64 *tx_icm_addr)
244 {
245 	u32 out[MLX5_ST_SZ_DW(query_sampler_obj_out)] = {};
246 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
247 	void *attr;
248 	int ret;
249 
250 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
251 		 MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
252 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
253 		 MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
254 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
255 
256 	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
257 	if (ret)
258 		return ret;
259 
260 	attr = MLX5_ADDR_OF(query_sampler_obj_out, out, sampler_object);
261 
262 	*rx_icm_addr = MLX5_GET64(sampler_obj, attr,
263 				  sw_steering_icm_address_rx);
264 	*tx_icm_addr = MLX5_GET64(sampler_obj, attr,
265 				  sw_steering_icm_address_tx);
266 
267 	return 0;
268 }
269 
270 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
271 {
272 	u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
273 
274 	/* Skip SYNC in case the device is internal error state.
275 	 * Besides a device error, this also happens when we're
276 	 * in fast teardown
277 	 */
278 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
279 		return 0;
280 
281 	MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
282 
283 	return mlx5_cmd_exec_in(mdev, sync_steering, in);
284 }
285 
286 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
287 					u32 table_type,
288 					u32 table_id,
289 					u32 group_id,
290 					u32 modify_header_id,
291 					u16 vport)
292 {
293 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
294 	void *in_flow_context;
295 	unsigned int inlen;
296 	void *in_dests;
297 	u32 *in;
298 	int err;
299 
300 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
301 		1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
302 
303 	in = kvzalloc(inlen, GFP_KERNEL);
304 	if (!in)
305 		return -ENOMEM;
306 
307 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
308 	MLX5_SET(set_fte_in, in, table_type, table_type);
309 	MLX5_SET(set_fte_in, in, table_id, table_id);
310 
311 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
312 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
313 	MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
314 	MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
315 	MLX5_SET(flow_context, in_flow_context, action,
316 		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
317 		 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
318 
319 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
320 	MLX5_SET(dest_format_struct, in_dests, destination_type,
321 		 MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT);
322 	MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
323 
324 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
325 	kvfree(in);
326 
327 	return err;
328 }
329 
330 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
331 				    u32 table_type,
332 				    u32 table_id)
333 {
334 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
335 
336 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
337 	MLX5_SET(delete_fte_in, in, table_type, table_type);
338 	MLX5_SET(delete_fte_in, in, table_id, table_id);
339 
340 	return mlx5_cmd_exec_in(mdev, delete_fte, in);
341 }
342 
343 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
344 				   u32 table_type,
345 				   u8 num_of_actions,
346 				   u64 *actions,
347 				   u32 *modify_header_id)
348 {
349 	u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
350 	void *p_actions;
351 	u32 inlen;
352 	u32 *in;
353 	int err;
354 
355 	inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
356 		 num_of_actions * sizeof(u64);
357 	in = kvzalloc(inlen, GFP_KERNEL);
358 	if (!in)
359 		return -ENOMEM;
360 
361 	MLX5_SET(alloc_modify_header_context_in, in, opcode,
362 		 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
363 	MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
364 	MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
365 	p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
366 	memcpy(p_actions, actions, num_of_actions * sizeof(u64));
367 
368 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
369 	if (err)
370 		goto out;
371 
372 	*modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
373 				     modify_header_id);
374 out:
375 	kvfree(in);
376 	return err;
377 }
378 
379 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
380 				     u32 modify_header_id)
381 {
382 	u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
383 
384 	MLX5_SET(dealloc_modify_header_context_in, in, opcode,
385 		 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
386 	MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
387 		 modify_header_id);
388 
389 	return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
390 }
391 
392 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
393 				       u32 table_type,
394 				       u32 table_id,
395 				       u32 *group_id)
396 {
397 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
398 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
399 	u32 *in;
400 	int err;
401 
402 	in = kvzalloc(inlen, GFP_KERNEL);
403 	if (!in)
404 		return -ENOMEM;
405 
406 	MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
407 	MLX5_SET(create_flow_group_in, in, table_type, table_type);
408 	MLX5_SET(create_flow_group_in, in, table_id, table_id);
409 
410 	err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
411 	if (err)
412 		goto out;
413 
414 	*group_id = MLX5_GET(create_flow_group_out, out, group_id);
415 
416 out:
417 	kvfree(in);
418 	return err;
419 }
420 
421 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
422 				  u32 table_type,
423 				  u32 table_id,
424 				  u32 group_id)
425 {
426 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
427 
428 	MLX5_SET(destroy_flow_group_in, in, opcode,
429 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
430 	MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
431 	MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
432 	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
433 
434 	return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
435 }
436 
437 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
438 				 struct mlx5dr_cmd_create_flow_table_attr *attr,
439 				 u64 *fdb_rx_icm_addr,
440 				 u32 *table_id)
441 {
442 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
443 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
444 	void *ft_mdev;
445 	int err;
446 
447 	MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
448 	MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
449 	MLX5_SET(create_flow_table_in, in, uid, attr->uid);
450 
451 	ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
452 	MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
453 	MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
454 	MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
455 
456 	if (attr->sw_owner) {
457 		/* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
458 		 * icm_addr_1 used for FDB TX
459 		 */
460 		if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
461 			MLX5_SET64(flow_table_context, ft_mdev,
462 				   sw_owner_icm_root_0, attr->icm_addr_rx);
463 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
464 			MLX5_SET64(flow_table_context, ft_mdev,
465 				   sw_owner_icm_root_0, attr->icm_addr_tx);
466 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
467 			MLX5_SET64(flow_table_context, ft_mdev,
468 				   sw_owner_icm_root_0, attr->icm_addr_rx);
469 			MLX5_SET64(flow_table_context, ft_mdev,
470 				   sw_owner_icm_root_1, attr->icm_addr_tx);
471 		}
472 	}
473 
474 	MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
475 		 attr->decap_en);
476 	MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
477 		 attr->reformat_en);
478 
479 	err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
480 	if (err)
481 		return err;
482 
483 	*table_id = MLX5_GET(create_flow_table_out, out, table_id);
484 	if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
485 	    fdb_rx_icm_addr)
486 		*fdb_rx_icm_addr =
487 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
488 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
489 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
490 
491 	return 0;
492 }
493 
494 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
495 				  u32 table_id,
496 				  u32 table_type)
497 {
498 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
499 
500 	MLX5_SET(destroy_flow_table_in, in, opcode,
501 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
502 	MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
503 	MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
504 
505 	return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
506 }
507 
508 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
509 				   enum mlx5_reformat_ctx_type rt,
510 				   u8 reformat_param_0,
511 				   u8 reformat_param_1,
512 				   size_t reformat_size,
513 				   void *reformat_data,
514 				   u32 *reformat_id)
515 {
516 	u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
517 	size_t inlen, cmd_data_sz, cmd_total_sz;
518 	void *prctx;
519 	void *pdata;
520 	void *in;
521 	int err;
522 
523 	cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
524 	cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
525 					packet_reformat_context.reformat_data);
526 	inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
527 	in = kvzalloc(inlen, GFP_KERNEL);
528 	if (!in)
529 		return -ENOMEM;
530 
531 	MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
532 		 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
533 
534 	prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
535 	pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
536 
537 	MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
538 	MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
539 	MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
540 	MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
541 	if (reformat_data && reformat_size)
542 		memcpy(pdata, reformat_data, reformat_size);
543 
544 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
545 	if (err)
546 		return err;
547 
548 	*reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
549 	kvfree(in);
550 
551 	return err;
552 }
553 
554 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
555 				     u32 reformat_id)
556 {
557 	u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
558 
559 	MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
560 		 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
561 	MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
562 		 reformat_id);
563 
564 	mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
565 }
566 
567 static void dr_cmd_set_definer_format(void *ptr, u16 format_id,
568 				      u8 *dw_selectors,
569 				      u8 *byte_selectors)
570 {
571 	if (format_id != MLX5_IFC_DEFINER_FORMAT_ID_SELECT)
572 		return;
573 
574 	MLX5_SET(match_definer, ptr, format_select_dw0, dw_selectors[0]);
575 	MLX5_SET(match_definer, ptr, format_select_dw1, dw_selectors[1]);
576 	MLX5_SET(match_definer, ptr, format_select_dw2, dw_selectors[2]);
577 	MLX5_SET(match_definer, ptr, format_select_dw3, dw_selectors[3]);
578 	MLX5_SET(match_definer, ptr, format_select_dw4, dw_selectors[4]);
579 	MLX5_SET(match_definer, ptr, format_select_dw5, dw_selectors[5]);
580 	MLX5_SET(match_definer, ptr, format_select_dw6, dw_selectors[6]);
581 	MLX5_SET(match_definer, ptr, format_select_dw7, dw_selectors[7]);
582 	MLX5_SET(match_definer, ptr, format_select_dw8, dw_selectors[8]);
583 
584 	MLX5_SET(match_definer, ptr, format_select_byte0, byte_selectors[0]);
585 	MLX5_SET(match_definer, ptr, format_select_byte1, byte_selectors[1]);
586 	MLX5_SET(match_definer, ptr, format_select_byte2, byte_selectors[2]);
587 	MLX5_SET(match_definer, ptr, format_select_byte3, byte_selectors[3]);
588 	MLX5_SET(match_definer, ptr, format_select_byte4, byte_selectors[4]);
589 	MLX5_SET(match_definer, ptr, format_select_byte5, byte_selectors[5]);
590 	MLX5_SET(match_definer, ptr, format_select_byte6, byte_selectors[6]);
591 	MLX5_SET(match_definer, ptr, format_select_byte7, byte_selectors[7]);
592 }
593 
594 int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
595 			      u16 format_id,
596 			      u8 *dw_selectors,
597 			      u8 *byte_selectors,
598 			      u8 *match_mask,
599 			      u32 *definer_id)
600 {
601 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
602 	u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
603 	void *ptr;
604 	int err;
605 
606 	ptr = MLX5_ADDR_OF(create_match_definer_in, in,
607 			   general_obj_in_cmd_hdr);
608 	MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
609 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
610 	MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
611 		 MLX5_OBJ_TYPE_MATCH_DEFINER);
612 
613 	ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
614 	MLX5_SET(match_definer, ptr, format_id, format_id);
615 
616 	dr_cmd_set_definer_format(ptr, format_id,
617 				  dw_selectors, byte_selectors);
618 
619 	ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
620 	memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
621 
622 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
623 	if (err)
624 		return err;
625 
626 	*definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
627 
628 	return 0;
629 }
630 
631 void
632 mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev, u32 definer_id)
633 {
634 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
635 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
636 
637 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
638 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
639 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
640 
641 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
642 }
643 
644 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
645 			 u16 index, struct mlx5dr_cmd_gid_attr *attr)
646 {
647 	u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
648 	u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
649 	int err;
650 
651 	MLX5_SET(query_roce_address_in, in, opcode,
652 		 MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
653 
654 	MLX5_SET(query_roce_address_in, in, roce_address_index, index);
655 	MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
656 
657 	err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
658 	if (err)
659 		return err;
660 
661 	memcpy(&attr->gid,
662 	       MLX5_ADDR_OF(query_roce_address_out,
663 			    out, roce_address.source_l3_address),
664 	       sizeof(attr->gid));
665 	memcpy(attr->mac,
666 	       MLX5_ADDR_OF(query_roce_address_out, out,
667 			    roce_address.source_mac_47_32),
668 	       sizeof(attr->mac));
669 
670 	if (MLX5_GET(query_roce_address_out, out,
671 		     roce_address.roce_version) == MLX5_ROCE_VERSION_2)
672 		attr->roce_ver = MLX5_ROCE_VERSION_2;
673 	else
674 		attr->roce_ver = MLX5_ROCE_VERSION_1;
675 
676 	return 0;
677 }
678 
679 static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
680 					struct mlx5dr_cmd_fte_info *fte,
681 					bool *extended_dest)
682 {
683 	int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
684 	int num_fwd_destinations = 0;
685 	int num_encap = 0;
686 	int i;
687 
688 	*extended_dest = false;
689 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
690 		return 0;
691 	for (i = 0; i < fte->dests_size; i++) {
692 		if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
693 		    fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE)
694 			continue;
695 		if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
696 		     fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
697 		    fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
698 			num_encap++;
699 		num_fwd_destinations++;
700 	}
701 
702 	if (num_fwd_destinations > 1 && num_encap > 0)
703 		*extended_dest = true;
704 
705 	if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
706 		mlx5_core_warn(dev, "FW does not support extended destination");
707 		return -EOPNOTSUPP;
708 	}
709 	if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
710 		mlx5_core_warn(dev, "FW does not support more than %d encaps",
711 			       1 << fw_log_max_fdb_encap_uplink);
712 		return -EOPNOTSUPP;
713 	}
714 
715 	return 0;
716 }
717 
718 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
719 		       int opmod, int modify_mask,
720 		       struct mlx5dr_cmd_ft_info *ft,
721 		       u32 group_id,
722 		       struct mlx5dr_cmd_fte_info *fte)
723 {
724 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
725 	void *in_flow_context, *vlan;
726 	bool extended_dest = false;
727 	void *in_match_value;
728 	unsigned int inlen;
729 	int dst_cnt_size;
730 	void *in_dests;
731 	u32 *in;
732 	int err;
733 	int i;
734 
735 	if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
736 		return -EOPNOTSUPP;
737 
738 	if (!extended_dest)
739 		dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
740 	else
741 		dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
742 
743 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
744 	in = kvzalloc(inlen, GFP_KERNEL);
745 	if (!in)
746 		return -ENOMEM;
747 
748 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
749 	MLX5_SET(set_fte_in, in, op_mod, opmod);
750 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
751 	MLX5_SET(set_fte_in, in, table_type, ft->type);
752 	MLX5_SET(set_fte_in, in, table_id, ft->id);
753 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
754 	MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level);
755 	if (ft->vport) {
756 		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
757 		MLX5_SET(set_fte_in, in, other_vport, 1);
758 	}
759 
760 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
761 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
762 
763 	MLX5_SET(flow_context, in_flow_context, flow_tag,
764 		 fte->flow_context.flow_tag);
765 	MLX5_SET(flow_context, in_flow_context, flow_source,
766 		 fte->flow_context.flow_source);
767 
768 	MLX5_SET(flow_context, in_flow_context, extended_destination,
769 		 extended_dest);
770 	if (extended_dest) {
771 		u32 action;
772 
773 		action = fte->action.action &
774 			~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
775 		MLX5_SET(flow_context, in_flow_context, action, action);
776 	} else {
777 		MLX5_SET(flow_context, in_flow_context, action,
778 			 fte->action.action);
779 		if (fte->action.pkt_reformat)
780 			MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
781 				 fte->action.pkt_reformat->id);
782 	}
783 	if (fte->action.modify_hdr)
784 		MLX5_SET(flow_context, in_flow_context, modify_header_id,
785 			 fte->action.modify_hdr->id);
786 
787 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
788 
789 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
790 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
791 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
792 
793 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
794 
795 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
796 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
797 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
798 
799 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
800 				      match_value);
801 	memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
802 
803 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
804 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
805 		int list_size = 0;
806 
807 		for (i = 0; i < fte->dests_size; i++) {
808 			enum mlx5_flow_destination_type type = fte->dest_arr[i].type;
809 			enum mlx5_ifc_flow_destination_type ifc_type;
810 			unsigned int id;
811 
812 			if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
813 				continue;
814 
815 			switch (type) {
816 			case MLX5_FLOW_DESTINATION_TYPE_NONE:
817 				continue;
818 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
819 				id = fte->dest_arr[i].ft_num;
820 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
821 				break;
822 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
823 				id = fte->dest_arr[i].ft_id;
824 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
825 
826 				break;
827 			case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
828 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
829 				if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) {
830 					id = fte->dest_arr[i].vport.num;
831 					MLX5_SET(dest_format_struct, in_dests,
832 						 destination_eswitch_owner_vhca_id_valid,
833 						 !!(fte->dest_arr[i].vport.flags &
834 						    MLX5_FLOW_DEST_VPORT_VHCA_ID));
835 					ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
836 				} else {
837 					id = 0;
838 					ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
839 					MLX5_SET(dest_format_struct, in_dests,
840 						 destination_eswitch_owner_vhca_id_valid, 1);
841 				}
842 				MLX5_SET(dest_format_struct, in_dests,
843 					 destination_eswitch_owner_vhca_id,
844 					 fte->dest_arr[i].vport.vhca_id);
845 				if (extended_dest && (fte->dest_arr[i].vport.flags &
846 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
847 					MLX5_SET(dest_format_struct, in_dests,
848 						 packet_reformat,
849 						 !!(fte->dest_arr[i].vport.flags &
850 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
851 					MLX5_SET(extended_dest_format, in_dests,
852 						 packet_reformat_id,
853 						 fte->dest_arr[i].vport.reformat_id);
854 				}
855 				break;
856 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
857 				id = fte->dest_arr[i].sampler_id;
858 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
859 				break;
860 			default:
861 				id = fte->dest_arr[i].tir_num;
862 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
863 			}
864 
865 			MLX5_SET(dest_format_struct, in_dests, destination_type,
866 				 ifc_type);
867 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
868 			in_dests += dst_cnt_size;
869 			list_size++;
870 		}
871 
872 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
873 			 list_size);
874 	}
875 
876 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
877 		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
878 					log_max_flow_counter,
879 					ft->type));
880 		int list_size = 0;
881 
882 		for (i = 0; i < fte->dests_size; i++) {
883 			if (fte->dest_arr[i].type !=
884 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
885 				continue;
886 
887 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
888 				 fte->dest_arr[i].counter_id);
889 			in_dests += dst_cnt_size;
890 			list_size++;
891 		}
892 		if (list_size > max_list_size) {
893 			err = -EINVAL;
894 			goto err_out;
895 		}
896 
897 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
898 			 list_size);
899 	}
900 
901 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
902 err_out:
903 	kvfree(in);
904 	return err;
905 }
906