1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_ste.h"
7 
8 struct dr_hw_ste_format {
9 	u8 ctrl[DR_STE_SIZE_CTRL];
10 	u8 tag[DR_STE_SIZE_TAG];
11 	u8 mask[DR_STE_SIZE_MASK];
12 };
13 
14 static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
15 {
16 	u32 crc = crc32(0, input_data, length);
17 
18 	return (__force u32)htonl(crc);
19 }
20 
21 bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
22 {
23 	return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
24 }
25 
26 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
27 {
28 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
29 	u8 masked[DR_STE_SIZE_TAG] = {};
30 	u32 crc32, index;
31 	u16 bit;
32 	int i;
33 
34 	/* Don't calculate CRC if the result is predicted */
35 	if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
36 		return 0;
37 
38 	/* Mask tag using byte mask, bit per byte */
39 	bit = 1 << (DR_STE_SIZE_TAG - 1);
40 	for (i = 0; i < DR_STE_SIZE_TAG; i++) {
41 		if (htbl->byte_mask & bit)
42 			masked[i] = hw_ste->tag[i];
43 
44 		bit = bit >> 1;
45 	}
46 
47 	crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
48 	index = crc32 & (htbl->chunk->num_of_entries - 1);
49 
50 	return index;
51 }
52 
53 u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
54 {
55 	u16 byte_mask = 0;
56 	int i;
57 
58 	for (i = 0; i < DR_STE_SIZE_MASK; i++) {
59 		byte_mask = byte_mask << 1;
60 		if (bit_mask[i] == 0xff)
61 			byte_mask |= 1;
62 	}
63 	return byte_mask;
64 }
65 
66 static u8 *dr_ste_get_tag(u8 *hw_ste_p)
67 {
68 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
69 
70 	return hw_ste->tag;
71 }
72 
73 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
74 {
75 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
76 
77 	memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
78 }
79 
80 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
81 {
82 	memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
83 	memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
84 }
85 
86 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
87 {
88 	hw_ste->tag[0] = 0xdc;
89 	hw_ste->mask[0] = 0;
90 }
91 
92 void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
93 			      u8 *hw_ste_p, u64 miss_addr)
94 {
95 	ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
96 }
97 
98 static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
99 				    struct mlx5dr_ste *ste, u64 miss_addr)
100 {
101 	u8 *hw_ste_p = ste->hw_ste;
102 
103 	ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
104 	ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
105 	dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
106 }
107 
108 void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
109 			     u8 *hw_ste, u64 icm_addr, u32 ht_size)
110 {
111 	ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
112 }
113 
114 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
115 {
116 	u32 index = ste - ste->htbl->ste_arr;
117 
118 	return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
119 }
120 
121 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
122 {
123 	u32 index = ste - ste->htbl->ste_arr;
124 
125 	return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
126 }
127 
128 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
129 {
130 	u32 index = ste - ste->htbl->ste_arr;
131 
132 	return &ste->htbl->miss_list[index];
133 }
134 
135 static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
136 				   struct mlx5dr_ste *ste,
137 				   struct mlx5dr_ste_htbl *next_htbl)
138 {
139 	struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
140 	u8 *hw_ste = ste->hw_ste;
141 
142 	ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
143 	ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
144 	ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
145 
146 	dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
147 }
148 
149 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
150 				u8 ste_location)
151 {
152 	return ste_location == nic_matcher->num_of_builders;
153 }
154 
155 /* Replace relevant fields, except of:
156  * htbl - keep the origin htbl
157  * miss_list + list - already took the src from the list.
158  * icm_addr/mr_addr - depends on the hosting table.
159  *
160  * Before:
161  * | a | -> | b | -> | c | ->
162  *
163  * After:
164  * | a | -> | c | ->
165  * While the data that was in b copied to a.
166  */
167 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
168 {
169 	memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
170 	dst->next_htbl = src->next_htbl;
171 	if (dst->next_htbl)
172 		dst->next_htbl->pointing_ste = dst;
173 
174 	dst->refcount = src->refcount;
175 }
176 
177 /* Free ste which is the head and the only one in miss_list */
178 static void
179 dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
180 		       struct mlx5dr_ste *ste,
181 		       struct mlx5dr_matcher_rx_tx *nic_matcher,
182 		       struct mlx5dr_ste_send_info *ste_info_head,
183 		       struct list_head *send_ste_list,
184 		       struct mlx5dr_ste_htbl *stats_tbl)
185 {
186 	u8 tmp_data_ste[DR_STE_SIZE] = {};
187 	struct mlx5dr_ste tmp_ste = {};
188 	u64 miss_addr;
189 
190 	tmp_ste.hw_ste = tmp_data_ste;
191 
192 	/* Use temp ste because dr_ste_always_miss_addr
193 	 * touches bit_mask area which doesn't exist at ste->hw_ste.
194 	 */
195 	memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
196 	miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
197 	dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
198 	memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
199 
200 	list_del_init(&ste->miss_list_node);
201 
202 	/* Write full STE size in order to have "always_miss" */
203 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
204 						  0, tmp_data_ste,
205 						  ste_info_head,
206 						  send_ste_list,
207 						  true /* Copy data */);
208 
209 	stats_tbl->ctrl.num_of_valid_entries--;
210 }
211 
212 /* Free ste which is the head but NOT the only one in miss_list:
213  * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
214  */
215 static void
216 dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
217 			struct mlx5dr_ste *ste,
218 			struct mlx5dr_ste *next_ste,
219 			struct mlx5dr_ste_send_info *ste_info_head,
220 			struct list_head *send_ste_list,
221 			struct mlx5dr_ste_htbl *stats_tbl)
222 
223 {
224 	struct mlx5dr_ste_htbl *next_miss_htbl;
225 	u8 hw_ste[DR_STE_SIZE] = {};
226 	int sb_idx;
227 
228 	next_miss_htbl = next_ste->htbl;
229 
230 	/* Remove from the miss_list the next_ste before copy */
231 	list_del_init(&next_ste->miss_list_node);
232 
233 	/* Move data from next into ste */
234 	dr_ste_replace(ste, next_ste);
235 
236 	/* Update the rule on STE change */
237 	mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
238 
239 	/* Copy all 64 hw_ste bytes */
240 	memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
241 	sb_idx = ste->ste_chain_location - 1;
242 	mlx5dr_ste_set_bit_mask(hw_ste,
243 				nic_matcher->ste_builder[sb_idx].bit_mask);
244 
245 	/* Del the htbl that contains the next_ste.
246 	 * The origin htbl stay with the same number of entries.
247 	 */
248 	mlx5dr_htbl_put(next_miss_htbl);
249 
250 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
251 						  0, hw_ste,
252 						  ste_info_head,
253 						  send_ste_list,
254 						  true /* Copy data */);
255 
256 	stats_tbl->ctrl.num_of_collisions--;
257 	stats_tbl->ctrl.num_of_valid_entries--;
258 }
259 
260 /* Free ste that is located in the middle of the miss list:
261  * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
262  */
263 static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
264 				     struct mlx5dr_ste *ste,
265 				     struct mlx5dr_ste_send_info *ste_info,
266 				     struct list_head *send_ste_list,
267 				     struct mlx5dr_ste_htbl *stats_tbl)
268 {
269 	struct mlx5dr_ste *prev_ste;
270 	u64 miss_addr;
271 
272 	prev_ste = list_prev_entry(ste, miss_list_node);
273 	if (WARN_ON(!prev_ste))
274 		return;
275 
276 	miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
277 	ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
278 
279 	mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
280 						  prev_ste->hw_ste, ste_info,
281 						  send_ste_list, true /* Copy data*/);
282 
283 	list_del_init(&ste->miss_list_node);
284 
285 	stats_tbl->ctrl.num_of_valid_entries--;
286 	stats_tbl->ctrl.num_of_collisions--;
287 }
288 
289 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
290 		     struct mlx5dr_matcher *matcher,
291 		     struct mlx5dr_matcher_rx_tx *nic_matcher)
292 {
293 	struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
294 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
295 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
296 	struct mlx5dr_ste_send_info ste_info_head;
297 	struct mlx5dr_ste *next_ste, *first_ste;
298 	bool put_on_origin_table = true;
299 	struct mlx5dr_ste_htbl *stats_tbl;
300 	LIST_HEAD(send_ste_list);
301 
302 	first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
303 				     struct mlx5dr_ste, miss_list_node);
304 	stats_tbl = first_ste->htbl;
305 
306 	/* Two options:
307 	 * 1. ste is head:
308 	 *	a. head ste is the only ste in the miss list
309 	 *	b. head ste is not the only ste in the miss-list
310 	 * 2. ste is not head
311 	 */
312 	if (first_ste == ste) { /* Ste is the head */
313 		struct mlx5dr_ste *last_ste;
314 
315 		last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
316 					   struct mlx5dr_ste, miss_list_node);
317 		if (last_ste == first_ste)
318 			next_ste = NULL;
319 		else
320 			next_ste = list_next_entry(ste, miss_list_node);
321 
322 		if (!next_ste) {
323 			/* One and only entry in the list */
324 			dr_ste_remove_head_ste(ste_ctx, ste,
325 					       nic_matcher,
326 					       &ste_info_head,
327 					       &send_ste_list,
328 					       stats_tbl);
329 		} else {
330 			/* First but not only entry in the list */
331 			dr_ste_replace_head_ste(nic_matcher, ste,
332 						next_ste, &ste_info_head,
333 						&send_ste_list, stats_tbl);
334 			put_on_origin_table = false;
335 		}
336 	} else { /* Ste in the middle of the list */
337 		dr_ste_remove_middle_ste(ste_ctx, ste,
338 					 &ste_info_head, &send_ste_list,
339 					 stats_tbl);
340 	}
341 
342 	/* Update HW */
343 	list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
344 				 &send_ste_list, send_list) {
345 		list_del(&cur_ste_info->send_list);
346 		mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
347 					 cur_ste_info->data, cur_ste_info->size,
348 					 cur_ste_info->offset);
349 	}
350 
351 	if (put_on_origin_table)
352 		mlx5dr_htbl_put(ste->htbl);
353 }
354 
355 bool mlx5dr_ste_equal_tag(void *src, void *dst)
356 {
357 	struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
358 	struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
359 
360 	return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
361 }
362 
363 void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
364 					  u8 *hw_ste,
365 					  struct mlx5dr_ste_htbl *next_htbl)
366 {
367 	struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
368 
369 	ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
370 }
371 
372 void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
373 				     u8 *hw_ste_p, u32 ste_size)
374 {
375 	if (ste_ctx->prepare_for_postsend)
376 		ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
377 }
378 
379 /* Init one ste as a pattern for ste data array */
380 void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
381 				  u16 gvmi,
382 				  enum mlx5dr_domain_nic_type nic_type,
383 				  struct mlx5dr_ste_htbl *htbl,
384 				  u8 *formatted_ste,
385 				  struct mlx5dr_htbl_connect_info *connect_info)
386 {
387 	bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
388 	struct mlx5dr_ste ste = {};
389 
390 	ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
391 	ste.hw_ste = formatted_ste;
392 
393 	if (connect_info->type == CONNECT_HIT)
394 		dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
395 	else
396 		dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
397 }
398 
399 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
400 				      struct mlx5dr_domain_rx_tx *nic_dmn,
401 				      struct mlx5dr_ste_htbl *htbl,
402 				      struct mlx5dr_htbl_connect_info *connect_info,
403 				      bool update_hw_ste)
404 {
405 	u8 formatted_ste[DR_STE_SIZE] = {};
406 
407 	mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
408 				     dmn->info.caps.gvmi,
409 				     nic_dmn->type,
410 				     htbl,
411 				     formatted_ste,
412 				     connect_info);
413 
414 	return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
415 }
416 
417 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
418 				struct mlx5dr_matcher_rx_tx *nic_matcher,
419 				struct mlx5dr_ste *ste,
420 				u8 *cur_hw_ste,
421 				enum mlx5dr_icm_chunk_size log_table_size)
422 {
423 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
424 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
425 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
426 	struct mlx5dr_htbl_connect_info info;
427 	struct mlx5dr_ste_htbl *next_htbl;
428 
429 	if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
430 		u16 next_lu_type;
431 		u16 byte_mask;
432 
433 		next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
434 		byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
435 
436 		next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
437 						  log_table_size,
438 						  next_lu_type,
439 						  byte_mask);
440 		if (!next_htbl) {
441 			mlx5dr_dbg(dmn, "Failed allocating table\n");
442 			return -ENOMEM;
443 		}
444 
445 		/* Write new table to HW */
446 		info.type = CONNECT_MISS;
447 		info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
448 		if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
449 						      &info, false)) {
450 			mlx5dr_info(dmn, "Failed writing table to HW\n");
451 			goto free_table;
452 		}
453 
454 		mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
455 						     cur_hw_ste, next_htbl);
456 		ste->next_htbl = next_htbl;
457 		next_htbl->pointing_ste = ste;
458 	}
459 
460 	return 0;
461 
462 free_table:
463 	mlx5dr_ste_htbl_free(next_htbl);
464 	return -ENOENT;
465 }
466 
467 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
468 					      enum mlx5dr_icm_chunk_size chunk_size,
469 					      u16 lu_type, u16 byte_mask)
470 {
471 	struct mlx5dr_icm_chunk *chunk;
472 	struct mlx5dr_ste_htbl *htbl;
473 	int i;
474 
475 	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
476 	if (!htbl)
477 		return NULL;
478 
479 	chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
480 	if (!chunk)
481 		goto out_free_htbl;
482 
483 	htbl->chunk = chunk;
484 	htbl->lu_type = lu_type;
485 	htbl->byte_mask = byte_mask;
486 	htbl->ste_arr = chunk->ste_arr;
487 	htbl->hw_ste_arr = chunk->hw_ste_arr;
488 	htbl->miss_list = chunk->miss_list;
489 	htbl->refcount = 0;
490 
491 	for (i = 0; i < chunk->num_of_entries; i++) {
492 		struct mlx5dr_ste *ste = &htbl->ste_arr[i];
493 
494 		ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
495 		ste->htbl = htbl;
496 		ste->refcount = 0;
497 		INIT_LIST_HEAD(&ste->miss_list_node);
498 		INIT_LIST_HEAD(&htbl->miss_list[i]);
499 	}
500 
501 	htbl->chunk_size = chunk_size;
502 	return htbl;
503 
504 out_free_htbl:
505 	kfree(htbl);
506 	return NULL;
507 }
508 
509 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
510 {
511 	if (htbl->refcount)
512 		return -EBUSY;
513 
514 	mlx5dr_icm_free_chunk(htbl->chunk);
515 	kfree(htbl);
516 	return 0;
517 }
518 
519 void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
520 			       struct mlx5dr_domain *dmn,
521 			       u8 *action_type_set,
522 			       u8 *hw_ste_arr,
523 			       struct mlx5dr_ste_actions_attr *attr,
524 			       u32 *added_stes)
525 {
526 	ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
527 				attr, added_stes);
528 }
529 
530 void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
531 			       struct mlx5dr_domain *dmn,
532 			       u8 *action_type_set,
533 			       u8 *hw_ste_arr,
534 			       struct mlx5dr_ste_actions_attr *attr,
535 			       u32 *added_stes)
536 {
537 	ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
538 				attr, added_stes);
539 }
540 
541 const struct mlx5dr_ste_action_modify_field *
542 mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
543 {
544 	const struct mlx5dr_ste_action_modify_field *hw_field;
545 
546 	if (sw_field >= ste_ctx->modify_field_arr_sz)
547 		return NULL;
548 
549 	hw_field = &ste_ctx->modify_field_arr[sw_field];
550 	if (!hw_field->end && !hw_field->start)
551 		return NULL;
552 
553 	return hw_field;
554 }
555 
556 void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
557 			       __be64 *hw_action,
558 			       u8 hw_field,
559 			       u8 shifter,
560 			       u8 length,
561 			       u32 data)
562 {
563 	ste_ctx->set_action_set((u8 *)hw_action,
564 				hw_field, shifter, length, data);
565 }
566 
567 void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
568 			       __be64 *hw_action,
569 			       u8 hw_field,
570 			       u8 shifter,
571 			       u8 length,
572 			       u32 data)
573 {
574 	ste_ctx->set_action_add((u8 *)hw_action,
575 				hw_field, shifter, length, data);
576 }
577 
578 void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
579 				__be64 *hw_action,
580 				u8 dst_hw_field,
581 				u8 dst_shifter,
582 				u8 dst_len,
583 				u8 src_hw_field,
584 				u8 src_shifter)
585 {
586 	ste_ctx->set_action_copy((u8 *)hw_action,
587 				 dst_hw_field, dst_shifter, dst_len,
588 				 src_hw_field, src_shifter);
589 }
590 
591 int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
592 					void *data, u32 data_sz,
593 					u8 *hw_action, u32 hw_action_sz,
594 					u16 *used_hw_action_num)
595 {
596 	/* Only Ethernet frame is supported, with VLAN (18) or without (14) */
597 	if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
598 		return -EINVAL;
599 
600 	return ste_ctx->set_action_decap_l3_list(data, data_sz,
601 						 hw_action, hw_action_sz,
602 						 used_hw_action_num);
603 }
604 
605 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
606 			       u8 match_criteria,
607 			       struct mlx5dr_match_param *mask,
608 			       struct mlx5dr_match_param *value)
609 {
610 	if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
611 		if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
612 			mlx5dr_err(dmn,
613 				   "Partial mask source_port is not supported\n");
614 			return -EINVAL;
615 		}
616 		if (mask->misc.source_eswitch_owner_vhca_id &&
617 		    mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
618 			mlx5dr_err(dmn,
619 				   "Partial mask source_eswitch_owner_vhca_id is not supported\n");
620 			return -EINVAL;
621 		}
622 	}
623 
624 	return 0;
625 }
626 
627 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
628 			     struct mlx5dr_matcher_rx_tx *nic_matcher,
629 			     struct mlx5dr_match_param *value,
630 			     u8 *ste_arr)
631 {
632 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
633 	bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
634 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
635 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
636 	struct mlx5dr_ste_build *sb;
637 	int ret, i;
638 
639 	ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
640 					 &matcher->mask, value);
641 	if (ret)
642 		return ret;
643 
644 	sb = nic_matcher->ste_builder;
645 	for (i = 0; i < nic_matcher->num_of_builders; i++) {
646 		ste_ctx->ste_init(ste_arr,
647 				  sb->lu_type,
648 				  is_rx,
649 				  dmn->info.caps.gvmi);
650 
651 		mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
652 
653 		ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
654 		if (ret)
655 			return ret;
656 
657 		/* Connect the STEs */
658 		if (i < (nic_matcher->num_of_builders - 1)) {
659 			/* Need the next builder for these fields,
660 			 * not relevant for the last ste in the chain.
661 			 */
662 			sb++;
663 			ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
664 			ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
665 		}
666 		ste_arr += DR_STE_SIZE;
667 	}
668 	return 0;
669 }
670 
671 #define IFC_GET_CLR(typ, p, fld, clear) ({ \
672 	void *__p = (p); \
673 	u32 __t = MLX5_GET(typ, __p, fld); \
674 	if (clear) \
675 		MLX5_SET(typ, __p, fld, 0); \
676 	__t; \
677 })
678 
679 #define memcpy_and_clear(to, from, len, clear) ({ \
680 	void *__to = (to), *__from = (from); \
681 	size_t __len = (len); \
682 	memcpy(__to, __from, __len); \
683 	if (clear) \
684 		memset(__from, 0, __len); \
685 })
686 
687 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr)
688 {
689 	spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr);
690 	spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr);
691 	spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr);
692 	spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr);
693 	spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr);
694 
695 	spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr);
696 	spec->source_eswitch_owner_vhca_id =
697 		IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr);
698 
699 	spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr);
700 	spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr);
701 	spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr);
702 	spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr);
703 	spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr);
704 	spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr);
705 
706 	spec->outer_second_cvlan_tag =
707 		IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr);
708 	spec->inner_second_cvlan_tag =
709 		IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr);
710 	spec->outer_second_svlan_tag =
711 		IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr);
712 	spec->inner_second_svlan_tag =
713 		IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr);
714 	spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr);
715 
716 	spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr);
717 	spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr);
718 
719 	spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
720 
721 	spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
722 	spec->geneve_tlv_option_0_exist =
723 		IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr);
724 	spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
725 
726 	spec->outer_ipv6_flow_label =
727 		IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr);
728 
729 	spec->inner_ipv6_flow_label =
730 		IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr);
731 
732 	spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr);
733 	spec->geneve_protocol_type =
734 		IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr);
735 
736 	spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr);
737 }
738 
739 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr)
740 {
741 	__be32 raw_ip[4];
742 
743 	spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr);
744 
745 	spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr);
746 	spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr);
747 
748 	spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr);
749 
750 	spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr);
751 	spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr);
752 	spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr);
753 	spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr);
754 
755 	spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr);
756 	spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr);
757 	spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr);
758 	spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr);
759 	spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr);
760 	spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr);
761 	spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr);
762 	spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr);
763 	spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
764 	spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
765 
766 	spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
767 
768 	spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
769 	spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr);
770 
771 	memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
772 					      src_ipv4_src_ipv6.ipv6_layout.ipv6),
773 			 sizeof(raw_ip), clr);
774 
775 	spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
776 	spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
777 	spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
778 	spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
779 
780 	memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
781 					      dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
782 			 sizeof(raw_ip), clr);
783 
784 	spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
785 	spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
786 	spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
787 	spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
788 }
789 
790 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr)
791 {
792 	spec->outer_first_mpls_label =
793 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr);
794 	spec->outer_first_mpls_exp =
795 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr);
796 	spec->outer_first_mpls_s_bos =
797 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr);
798 	spec->outer_first_mpls_ttl =
799 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr);
800 	spec->inner_first_mpls_label =
801 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr);
802 	spec->inner_first_mpls_exp =
803 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr);
804 	spec->inner_first_mpls_s_bos =
805 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr);
806 	spec->inner_first_mpls_ttl =
807 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr);
808 	spec->outer_first_mpls_over_gre_label =
809 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr);
810 	spec->outer_first_mpls_over_gre_exp =
811 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr);
812 	spec->outer_first_mpls_over_gre_s_bos =
813 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr);
814 	spec->outer_first_mpls_over_gre_ttl =
815 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr);
816 	spec->outer_first_mpls_over_udp_label =
817 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr);
818 	spec->outer_first_mpls_over_udp_exp =
819 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr);
820 	spec->outer_first_mpls_over_udp_s_bos =
821 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr);
822 	spec->outer_first_mpls_over_udp_ttl =
823 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr);
824 	spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr);
825 	spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr);
826 	spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr);
827 	spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr);
828 	spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr);
829 	spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr);
830 	spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr);
831 	spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr);
832 	spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr);
833 }
834 
835 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr)
836 {
837 	spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr);
838 	spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr);
839 	spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr);
840 	spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr);
841 	spec->outer_vxlan_gpe_vni =
842 		IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr);
843 	spec->outer_vxlan_gpe_next_protocol =
844 		IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr);
845 	spec->outer_vxlan_gpe_flags =
846 		IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr);
847 	spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr);
848 	spec->icmpv6_header_data =
849 		IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr);
850 	spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr);
851 	spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr);
852 	spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr);
853 	spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr);
854 	spec->geneve_tlv_option_0_data =
855 		IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr);
856 	spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr);
857 	spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr);
858 	spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr);
859 	spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr);
860 	spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr);
861 	spec->gtpu_first_ext_dw_0 =
862 		IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr);
863 }
864 
865 static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr)
866 {
867 	spec->prog_sample_field_id_0 =
868 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr);
869 	spec->prog_sample_field_value_0 =
870 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr);
871 	spec->prog_sample_field_id_1 =
872 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr);
873 	spec->prog_sample_field_value_1 =
874 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr);
875 	spec->prog_sample_field_id_2 =
876 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr);
877 	spec->prog_sample_field_value_2 =
878 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr);
879 	spec->prog_sample_field_id_3 =
880 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr);
881 	spec->prog_sample_field_value_3 =
882 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
883 }
884 
885 static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr)
886 {
887 	spec->macsec_tag_0 =
888 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr);
889 	spec->macsec_tag_1 =
890 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr);
891 	spec->macsec_tag_2 =
892 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr);
893 	spec->macsec_tag_3 =
894 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr);
895 	spec->tunnel_header_0 =
896 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr);
897 	spec->tunnel_header_1 =
898 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr);
899 	spec->tunnel_header_2 =
900 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr);
901 	spec->tunnel_header_3 =
902 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr);
903 }
904 
905 void mlx5dr_ste_copy_param(u8 match_criteria,
906 			   struct mlx5dr_match_param *set_param,
907 			   struct mlx5dr_match_parameters *mask,
908 			   bool clr)
909 {
910 	u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
911 	u8 *data = (u8 *)mask->match_buf;
912 	size_t param_location;
913 	void *buff;
914 
915 	if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
916 		if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
917 			memcpy(tail_param, data, mask->match_sz);
918 			buff = tail_param;
919 		} else {
920 			buff = mask->match_buf;
921 		}
922 		dr_ste_copy_mask_spec(buff, &set_param->outer, clr);
923 	}
924 	param_location = sizeof(struct mlx5dr_match_spec);
925 
926 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
927 		if (mask->match_sz < param_location +
928 		    sizeof(struct mlx5dr_match_misc)) {
929 			memcpy(tail_param, data + param_location,
930 			       mask->match_sz - param_location);
931 			buff = tail_param;
932 		} else {
933 			buff = data + param_location;
934 		}
935 		dr_ste_copy_mask_misc(buff, &set_param->misc, clr);
936 	}
937 	param_location += sizeof(struct mlx5dr_match_misc);
938 
939 	if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
940 		if (mask->match_sz < param_location +
941 		    sizeof(struct mlx5dr_match_spec)) {
942 			memcpy(tail_param, data + param_location,
943 			       mask->match_sz - param_location);
944 			buff = tail_param;
945 		} else {
946 			buff = data + param_location;
947 		}
948 		dr_ste_copy_mask_spec(buff, &set_param->inner, clr);
949 	}
950 	param_location += sizeof(struct mlx5dr_match_spec);
951 
952 	if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
953 		if (mask->match_sz < param_location +
954 		    sizeof(struct mlx5dr_match_misc2)) {
955 			memcpy(tail_param, data + param_location,
956 			       mask->match_sz - param_location);
957 			buff = tail_param;
958 		} else {
959 			buff = data + param_location;
960 		}
961 		dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr);
962 	}
963 
964 	param_location += sizeof(struct mlx5dr_match_misc2);
965 
966 	if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
967 		if (mask->match_sz < param_location +
968 		    sizeof(struct mlx5dr_match_misc3)) {
969 			memcpy(tail_param, data + param_location,
970 			       mask->match_sz - param_location);
971 			buff = tail_param;
972 		} else {
973 			buff = data + param_location;
974 		}
975 		dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr);
976 	}
977 
978 	param_location += sizeof(struct mlx5dr_match_misc3);
979 
980 	if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
981 		if (mask->match_sz < param_location +
982 		    sizeof(struct mlx5dr_match_misc4)) {
983 			memcpy(tail_param, data + param_location,
984 			       mask->match_sz - param_location);
985 			buff = tail_param;
986 		} else {
987 			buff = data + param_location;
988 		}
989 		dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
990 	}
991 
992 	param_location += sizeof(struct mlx5dr_match_misc4);
993 
994 	if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
995 		if (mask->match_sz < param_location +
996 		    sizeof(struct mlx5dr_match_misc5)) {
997 			memcpy(tail_param, data + param_location,
998 			       mask->match_sz - param_location);
999 			buff = tail_param;
1000 		} else {
1001 			buff = data + param_location;
1002 		}
1003 		dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr);
1004 	}
1005 }
1006 
1007 void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
1008 				     struct mlx5dr_ste_build *sb,
1009 				     struct mlx5dr_match_param *mask,
1010 				     bool inner, bool rx)
1011 {
1012 	sb->rx = rx;
1013 	sb->inner = inner;
1014 	ste_ctx->build_eth_l2_src_dst_init(sb, mask);
1015 }
1016 
1017 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
1018 				      struct mlx5dr_ste_build *sb,
1019 				      struct mlx5dr_match_param *mask,
1020 				      bool inner, bool rx)
1021 {
1022 	sb->rx = rx;
1023 	sb->inner = inner;
1024 	ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
1025 }
1026 
1027 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
1028 				      struct mlx5dr_ste_build *sb,
1029 				      struct mlx5dr_match_param *mask,
1030 				      bool inner, bool rx)
1031 {
1032 	sb->rx = rx;
1033 	sb->inner = inner;
1034 	ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
1035 }
1036 
1037 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
1038 					  struct mlx5dr_ste_build *sb,
1039 					  struct mlx5dr_match_param *mask,
1040 					  bool inner, bool rx)
1041 {
1042 	sb->rx = rx;
1043 	sb->inner = inner;
1044 	ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
1045 }
1046 
1047 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
1048 				 struct mlx5dr_ste_build *sb,
1049 				 struct mlx5dr_match_param *mask,
1050 				 bool inner, bool rx)
1051 {
1052 	sb->rx = rx;
1053 	sb->inner = inner;
1054 	ste_ctx->build_eth_l2_src_init(sb, mask);
1055 }
1056 
1057 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
1058 				 struct mlx5dr_ste_build *sb,
1059 				 struct mlx5dr_match_param *mask,
1060 				 bool inner, bool rx)
1061 {
1062 	sb->rx = rx;
1063 	sb->inner = inner;
1064 	ste_ctx->build_eth_l2_dst_init(sb, mask);
1065 }
1066 
1067 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
1068 				 struct mlx5dr_ste_build *sb,
1069 				 struct mlx5dr_match_param *mask, bool inner, bool rx)
1070 {
1071 	sb->rx = rx;
1072 	sb->inner = inner;
1073 	ste_ctx->build_eth_l2_tnl_init(sb, mask);
1074 }
1075 
1076 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1077 				       struct mlx5dr_ste_build *sb,
1078 				       struct mlx5dr_match_param *mask,
1079 				       bool inner, bool rx)
1080 {
1081 	sb->rx = rx;
1082 	sb->inner = inner;
1083 	ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
1084 }
1085 
1086 void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
1087 				     struct mlx5dr_ste_build *sb,
1088 				     struct mlx5dr_match_param *mask,
1089 				     bool inner, bool rx)
1090 {
1091 	sb->rx = rx;
1092 	sb->inner = inner;
1093 	ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
1094 }
1095 
1096 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1097 					     struct mlx5dr_ste_build *sb,
1098 					     u8 *tag)
1099 {
1100 	return 0;
1101 }
1102 
1103 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1104 {
1105 	sb->rx = rx;
1106 	sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1107 	sb->byte_mask = 0;
1108 	sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1109 }
1110 
1111 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
1112 			   struct mlx5dr_ste_build *sb,
1113 			   struct mlx5dr_match_param *mask,
1114 			   bool inner, bool rx)
1115 {
1116 	sb->rx = rx;
1117 	sb->inner = inner;
1118 	ste_ctx->build_mpls_init(sb, mask);
1119 }
1120 
1121 void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
1122 			      struct mlx5dr_ste_build *sb,
1123 			      struct mlx5dr_match_param *mask,
1124 			      bool inner, bool rx)
1125 {
1126 	sb->rx = rx;
1127 	sb->inner = inner;
1128 	ste_ctx->build_tnl_gre_init(sb, mask);
1129 }
1130 
1131 void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
1132 					struct mlx5dr_ste_build *sb,
1133 					struct mlx5dr_match_param *mask,
1134 					struct mlx5dr_cmd_caps *caps,
1135 					bool inner, bool rx)
1136 {
1137 	sb->rx = rx;
1138 	sb->inner = inner;
1139 	sb->caps = caps;
1140 	return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
1141 }
1142 
1143 void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
1144 					struct mlx5dr_ste_build *sb,
1145 					struct mlx5dr_match_param *mask,
1146 					struct mlx5dr_cmd_caps *caps,
1147 					bool inner, bool rx)
1148 {
1149 	sb->rx = rx;
1150 	sb->inner = inner;
1151 	sb->caps = caps;
1152 	return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
1153 }
1154 
1155 void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
1156 			   struct mlx5dr_ste_build *sb,
1157 			   struct mlx5dr_match_param *mask,
1158 			   struct mlx5dr_cmd_caps *caps,
1159 			   bool inner, bool rx)
1160 {
1161 	sb->rx = rx;
1162 	sb->inner = inner;
1163 	sb->caps = caps;
1164 	ste_ctx->build_icmp_init(sb, mask);
1165 }
1166 
1167 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
1168 				      struct mlx5dr_ste_build *sb,
1169 				      struct mlx5dr_match_param *mask,
1170 				      bool inner, bool rx)
1171 {
1172 	sb->rx = rx;
1173 	sb->inner = inner;
1174 	ste_ctx->build_general_purpose_init(sb, mask);
1175 }
1176 
1177 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1178 				  struct mlx5dr_ste_build *sb,
1179 				  struct mlx5dr_match_param *mask,
1180 				  bool inner, bool rx)
1181 {
1182 	sb->rx = rx;
1183 	sb->inner = inner;
1184 	ste_ctx->build_eth_l4_misc_init(sb, mask);
1185 }
1186 
1187 void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
1188 				    struct mlx5dr_ste_build *sb,
1189 				    struct mlx5dr_match_param *mask,
1190 				    bool inner, bool rx)
1191 {
1192 	sb->rx = rx;
1193 	sb->inner = inner;
1194 	ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
1195 }
1196 
1197 void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
1198 				 struct mlx5dr_ste_build *sb,
1199 				 struct mlx5dr_match_param *mask,
1200 				 bool inner, bool rx)
1201 {
1202 	sb->rx = rx;
1203 	sb->inner = inner;
1204 	ste_ctx->build_tnl_geneve_init(sb, mask);
1205 }
1206 
1207 void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
1208 					 struct mlx5dr_ste_build *sb,
1209 					 struct mlx5dr_match_param *mask,
1210 					 struct mlx5dr_cmd_caps *caps,
1211 					 bool inner, bool rx)
1212 {
1213 	sb->rx = rx;
1214 	sb->caps = caps;
1215 	sb->inner = inner;
1216 	ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
1217 }
1218 
1219 void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
1220 					       struct mlx5dr_ste_build *sb,
1221 					       struct mlx5dr_match_param *mask,
1222 					       struct mlx5dr_cmd_caps *caps,
1223 					       bool inner, bool rx)
1224 {
1225 	if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init)
1226 		return;
1227 
1228 	sb->rx = rx;
1229 	sb->caps = caps;
1230 	sb->inner = inner;
1231 	ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask);
1232 }
1233 
1234 void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
1235 			       struct mlx5dr_ste_build *sb,
1236 			       struct mlx5dr_match_param *mask,
1237 			       bool inner, bool rx)
1238 {
1239 	sb->rx = rx;
1240 	sb->inner = inner;
1241 	ste_ctx->build_tnl_gtpu_init(sb, mask);
1242 }
1243 
1244 void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1245 					     struct mlx5dr_ste_build *sb,
1246 					     struct mlx5dr_match_param *mask,
1247 					     struct mlx5dr_cmd_caps *caps,
1248 					     bool inner, bool rx)
1249 {
1250 	sb->rx = rx;
1251 	sb->caps = caps;
1252 	sb->inner = inner;
1253 	ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
1254 }
1255 
1256 void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1257 					     struct mlx5dr_ste_build *sb,
1258 					     struct mlx5dr_match_param *mask,
1259 					     struct mlx5dr_cmd_caps *caps,
1260 					     bool inner, bool rx)
1261 {
1262 	sb->rx = rx;
1263 	sb->caps = caps;
1264 	sb->inner = inner;
1265 	ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
1266 }
1267 
1268 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
1269 				 struct mlx5dr_ste_build *sb,
1270 				 struct mlx5dr_match_param *mask,
1271 				 bool inner, bool rx)
1272 {
1273 	sb->rx = rx;
1274 	sb->inner = inner;
1275 	ste_ctx->build_register_0_init(sb, mask);
1276 }
1277 
1278 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
1279 				 struct mlx5dr_ste_build *sb,
1280 				 struct mlx5dr_match_param *mask,
1281 				 bool inner, bool rx)
1282 {
1283 	sb->rx = rx;
1284 	sb->inner = inner;
1285 	ste_ctx->build_register_1_init(sb, mask);
1286 }
1287 
1288 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
1289 				   struct mlx5dr_ste_build *sb,
1290 				   struct mlx5dr_match_param *mask,
1291 				   struct mlx5dr_domain *dmn,
1292 				   bool inner, bool rx)
1293 {
1294 	/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
1295 	sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
1296 
1297 	sb->rx = rx;
1298 	sb->dmn = dmn;
1299 	sb->inner = inner;
1300 	ste_ctx->build_src_gvmi_qpn_init(sb, mask);
1301 }
1302 
1303 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1304 				    struct mlx5dr_ste_build *sb,
1305 				    struct mlx5dr_match_param *mask,
1306 				    bool inner, bool rx)
1307 {
1308 	sb->rx = rx;
1309 	sb->inner = inner;
1310 	ste_ctx->build_flex_parser_0_init(sb, mask);
1311 }
1312 
1313 void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1314 				    struct mlx5dr_ste_build *sb,
1315 				    struct mlx5dr_match_param *mask,
1316 				    bool inner, bool rx)
1317 {
1318 	sb->rx = rx;
1319 	sb->inner = inner;
1320 	ste_ctx->build_flex_parser_1_init(sb, mask);
1321 }
1322 
1323 void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
1324 				     struct mlx5dr_ste_build *sb,
1325 				     struct mlx5dr_match_param *mask,
1326 				     bool inner, bool rx)
1327 {
1328 	sb->rx = rx;
1329 	sb->inner = inner;
1330 	ste_ctx->build_tnl_header_0_1_init(sb, mask);
1331 }
1332 
1333 static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
1334 	[MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
1335 	[MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
1336 };
1337 
1338 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
1339 {
1340 	if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
1341 		return NULL;
1342 
1343 	return mlx5dr_ste_ctx_arr[version];
1344 }
1345