1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_ste.h"
7 
8 struct dr_hw_ste_format {
9 	u8 ctrl[DR_STE_SIZE_CTRL];
10 	u8 tag[DR_STE_SIZE_TAG];
11 	u8 mask[DR_STE_SIZE_MASK];
12 };
13 
14 static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
15 {
16 	u32 crc = crc32(0, input_data, length);
17 
18 	return (__force u32)htonl(crc);
19 }
20 
21 bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
22 {
23 	return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
24 }
25 
26 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
27 {
28 	u32 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
29 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
30 	u8 masked[DR_STE_SIZE_TAG] = {};
31 	u32 crc32, index;
32 	u16 bit;
33 	int i;
34 
35 	/* Don't calculate CRC if the result is predicted */
36 	if (num_entries == 1 || htbl->byte_mask == 0)
37 		return 0;
38 
39 	/* Mask tag using byte mask, bit per byte */
40 	bit = 1 << (DR_STE_SIZE_TAG - 1);
41 	for (i = 0; i < DR_STE_SIZE_TAG; i++) {
42 		if (htbl->byte_mask & bit)
43 			masked[i] = hw_ste->tag[i];
44 
45 		bit = bit >> 1;
46 	}
47 
48 	crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
49 	index = crc32 & (num_entries - 1);
50 
51 	return index;
52 }
53 
54 u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
55 {
56 	u16 byte_mask = 0;
57 	int i;
58 
59 	for (i = 0; i < DR_STE_SIZE_MASK; i++) {
60 		byte_mask = byte_mask << 1;
61 		if (bit_mask[i] == 0xff)
62 			byte_mask |= 1;
63 	}
64 	return byte_mask;
65 }
66 
67 static u8 *dr_ste_get_tag(u8 *hw_ste_p)
68 {
69 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
70 
71 	return hw_ste->tag;
72 }
73 
74 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
75 {
76 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
77 
78 	memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
79 }
80 
81 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
82 {
83 	memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
84 	memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
85 }
86 
87 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
88 {
89 	hw_ste->tag[0] = 0xdc;
90 	hw_ste->mask[0] = 0;
91 }
92 
93 bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx,
94 				 u8 *hw_ste_p)
95 {
96 	if (!ste_ctx->is_miss_addr_set)
97 		return false;
98 
99 	/* check if miss address is already set for this type of STE */
100 	return ste_ctx->is_miss_addr_set(hw_ste_p);
101 }
102 
103 void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
104 			      u8 *hw_ste_p, u64 miss_addr)
105 {
106 	ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
107 }
108 
109 static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
110 				    u8 *hw_ste, u64 miss_addr)
111 {
112 	ste_ctx->set_next_lu_type(hw_ste, MLX5DR_STE_LU_TYPE_DONT_CARE);
113 	ste_ctx->set_miss_addr(hw_ste, miss_addr);
114 	dr_ste_set_always_miss((struct dr_hw_ste_format *)hw_ste);
115 }
116 
117 void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
118 			     u8 *hw_ste, u64 icm_addr, u32 ht_size)
119 {
120 	ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
121 }
122 
123 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
124 {
125 	u64 base_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(ste->htbl->chunk);
126 	u32 index = ste - ste->htbl->chunk->ste_arr;
127 
128 	return base_icm_addr + DR_STE_SIZE * index;
129 }
130 
131 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
132 {
133 	u32 index = ste - ste->htbl->chunk->ste_arr;
134 
135 	return mlx5dr_icm_pool_get_chunk_mr_addr(ste->htbl->chunk) + DR_STE_SIZE * index;
136 }
137 
138 u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste)
139 {
140 	u64 index = ste - ste->htbl->chunk->ste_arr;
141 
142 	return ste->htbl->chunk->hw_ste_arr + DR_STE_SIZE_REDUCED * index;
143 }
144 
145 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
146 {
147 	u32 index = ste - ste->htbl->chunk->ste_arr;
148 
149 	return &ste->htbl->chunk->miss_list[index];
150 }
151 
152 static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
153 				   u8 *hw_ste,
154 				   struct mlx5dr_ste_htbl *next_htbl)
155 {
156 	struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
157 
158 	ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
159 	ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
160 	ste_ctx->set_hit_addr(hw_ste, mlx5dr_icm_pool_get_chunk_icm_addr(chunk),
161 			      mlx5dr_icm_pool_get_chunk_num_of_entries(chunk));
162 
163 	dr_ste_set_always_hit((struct dr_hw_ste_format *)hw_ste);
164 }
165 
166 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
167 				u8 ste_location)
168 {
169 	return ste_location == nic_matcher->num_of_builders;
170 }
171 
172 /* Replace relevant fields, except of:
173  * htbl - keep the origin htbl
174  * miss_list + list - already took the src from the list.
175  * icm_addr/mr_addr - depends on the hosting table.
176  *
177  * Before:
178  * | a | -> | b | -> | c | ->
179  *
180  * After:
181  * | a | -> | c | ->
182  * While the data that was in b copied to a.
183  */
184 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
185 {
186 	memcpy(mlx5dr_ste_get_hw_ste(dst), mlx5dr_ste_get_hw_ste(src),
187 	       DR_STE_SIZE_REDUCED);
188 	dst->next_htbl = src->next_htbl;
189 	if (dst->next_htbl)
190 		dst->next_htbl->pointing_ste = dst;
191 
192 	dst->refcount = src->refcount;
193 }
194 
195 /* Free ste which is the head and the only one in miss_list */
196 static void
197 dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
198 		       struct mlx5dr_ste *ste,
199 		       struct mlx5dr_matcher_rx_tx *nic_matcher,
200 		       struct mlx5dr_ste_send_info *ste_info_head,
201 		       struct list_head *send_ste_list,
202 		       struct mlx5dr_ste_htbl *stats_tbl)
203 {
204 	u8 tmp_data_ste[DR_STE_SIZE] = {};
205 	u64 miss_addr;
206 
207 	miss_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
208 
209 	/* Use temp ste because dr_ste_always_miss_addr
210 	 * touches bit_mask area which doesn't exist at ste->hw_ste.
211 	 * Need to use a full-sized (DR_STE_SIZE) hw_ste.
212 	 */
213 	memcpy(tmp_data_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
214 	dr_ste_always_miss_addr(ste_ctx, tmp_data_ste, miss_addr);
215 	memcpy(mlx5dr_ste_get_hw_ste(ste), tmp_data_ste, DR_STE_SIZE_REDUCED);
216 
217 	list_del_init(&ste->miss_list_node);
218 
219 	/* Write full STE size in order to have "always_miss" */
220 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
221 						  0, tmp_data_ste,
222 						  ste_info_head,
223 						  send_ste_list,
224 						  true /* Copy data */);
225 
226 	stats_tbl->ctrl.num_of_valid_entries--;
227 }
228 
229 /* Free ste which is the head but NOT the only one in miss_list:
230  * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
231  */
232 static void
233 dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
234 			struct mlx5dr_ste *ste,
235 			struct mlx5dr_ste *next_ste,
236 			struct mlx5dr_ste_send_info *ste_info_head,
237 			struct list_head *send_ste_list,
238 			struct mlx5dr_ste_htbl *stats_tbl)
239 
240 {
241 	struct mlx5dr_ste_htbl *next_miss_htbl;
242 	u8 hw_ste[DR_STE_SIZE] = {};
243 	int sb_idx;
244 
245 	next_miss_htbl = next_ste->htbl;
246 
247 	/* Remove from the miss_list the next_ste before copy */
248 	list_del_init(&next_ste->miss_list_node);
249 
250 	/* Move data from next into ste */
251 	dr_ste_replace(ste, next_ste);
252 
253 	/* Update the rule on STE change */
254 	mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
255 
256 	/* Copy all 64 hw_ste bytes */
257 	memcpy(hw_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
258 	sb_idx = ste->ste_chain_location - 1;
259 	mlx5dr_ste_set_bit_mask(hw_ste,
260 				nic_matcher->ste_builder[sb_idx].bit_mask);
261 
262 	/* Del the htbl that contains the next_ste.
263 	 * The origin htbl stay with the same number of entries.
264 	 */
265 	mlx5dr_htbl_put(next_miss_htbl);
266 
267 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
268 						  0, hw_ste,
269 						  ste_info_head,
270 						  send_ste_list,
271 						  true /* Copy data */);
272 
273 	stats_tbl->ctrl.num_of_collisions--;
274 	stats_tbl->ctrl.num_of_valid_entries--;
275 }
276 
277 /* Free ste that is located in the middle of the miss list:
278  * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
279  */
280 static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
281 				     struct mlx5dr_ste *ste,
282 				     struct mlx5dr_ste_send_info *ste_info,
283 				     struct list_head *send_ste_list,
284 				     struct mlx5dr_ste_htbl *stats_tbl)
285 {
286 	struct mlx5dr_ste *prev_ste;
287 	u64 miss_addr;
288 
289 	prev_ste = list_prev_entry(ste, miss_list_node);
290 	if (WARN_ON(!prev_ste))
291 		return;
292 
293 	miss_addr = ste_ctx->get_miss_addr(mlx5dr_ste_get_hw_ste(ste));
294 	ste_ctx->set_miss_addr(mlx5dr_ste_get_hw_ste(prev_ste), miss_addr);
295 
296 	mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
297 						  mlx5dr_ste_get_hw_ste(prev_ste),
298 						  ste_info, send_ste_list,
299 						  true /* Copy data*/);
300 
301 	list_del_init(&ste->miss_list_node);
302 
303 	stats_tbl->ctrl.num_of_valid_entries--;
304 	stats_tbl->ctrl.num_of_collisions--;
305 }
306 
307 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
308 		     struct mlx5dr_matcher *matcher,
309 		     struct mlx5dr_matcher_rx_tx *nic_matcher)
310 {
311 	struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
312 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
313 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
314 	struct mlx5dr_ste_send_info ste_info_head;
315 	struct mlx5dr_ste *next_ste, *first_ste;
316 	bool put_on_origin_table = true;
317 	struct mlx5dr_ste_htbl *stats_tbl;
318 	LIST_HEAD(send_ste_list);
319 
320 	first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
321 				     struct mlx5dr_ste, miss_list_node);
322 	stats_tbl = first_ste->htbl;
323 
324 	/* Two options:
325 	 * 1. ste is head:
326 	 *	a. head ste is the only ste in the miss list
327 	 *	b. head ste is not the only ste in the miss-list
328 	 * 2. ste is not head
329 	 */
330 	if (first_ste == ste) { /* Ste is the head */
331 		struct mlx5dr_ste *last_ste;
332 
333 		last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
334 					   struct mlx5dr_ste, miss_list_node);
335 		if (last_ste == first_ste)
336 			next_ste = NULL;
337 		else
338 			next_ste = list_next_entry(ste, miss_list_node);
339 
340 		if (!next_ste) {
341 			/* One and only entry in the list */
342 			dr_ste_remove_head_ste(ste_ctx, ste,
343 					       nic_matcher,
344 					       &ste_info_head,
345 					       &send_ste_list,
346 					       stats_tbl);
347 		} else {
348 			/* First but not only entry in the list */
349 			dr_ste_replace_head_ste(nic_matcher, ste,
350 						next_ste, &ste_info_head,
351 						&send_ste_list, stats_tbl);
352 			put_on_origin_table = false;
353 		}
354 	} else { /* Ste in the middle of the list */
355 		dr_ste_remove_middle_ste(ste_ctx, ste,
356 					 &ste_info_head, &send_ste_list,
357 					 stats_tbl);
358 	}
359 
360 	/* Update HW */
361 	list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
362 				 &send_ste_list, send_list) {
363 		list_del(&cur_ste_info->send_list);
364 		mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
365 					 cur_ste_info->data, cur_ste_info->size,
366 					 cur_ste_info->offset);
367 	}
368 
369 	if (put_on_origin_table)
370 		mlx5dr_htbl_put(ste->htbl);
371 }
372 
373 bool mlx5dr_ste_equal_tag(void *src, void *dst)
374 {
375 	struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
376 	struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
377 
378 	return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
379 }
380 
381 void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
382 					  u8 *hw_ste,
383 					  struct mlx5dr_ste_htbl *next_htbl)
384 {
385 	u64 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(next_htbl->chunk);
386 	u32 num_entries =
387 		mlx5dr_icm_pool_get_chunk_num_of_entries(next_htbl->chunk);
388 
389 	ste_ctx->set_hit_addr(hw_ste, icm_addr, num_entries);
390 }
391 
392 void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
393 				     u8 *hw_ste_p, u32 ste_size)
394 {
395 	if (ste_ctx->prepare_for_postsend)
396 		ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
397 }
398 
399 /* Init one ste as a pattern for ste data array */
400 void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
401 				  u16 gvmi,
402 				  enum mlx5dr_domain_nic_type nic_type,
403 				  struct mlx5dr_ste_htbl *htbl,
404 				  u8 *formatted_ste,
405 				  struct mlx5dr_htbl_connect_info *connect_info)
406 {
407 	bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
408 	u8 tmp_hw_ste[DR_STE_SIZE] = {0};
409 
410 	ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
411 
412 	/* Use temp ste because dr_ste_always_miss_addr/hit_htbl
413 	 * touches bit_mask area which doesn't exist at ste->hw_ste.
414 	 * Need to use a full-sized (DR_STE_SIZE) hw_ste.
415 	 */
416 	memcpy(tmp_hw_ste, formatted_ste, DR_STE_SIZE_REDUCED);
417 	if (connect_info->type == CONNECT_HIT)
418 		dr_ste_always_hit_htbl(ste_ctx, tmp_hw_ste,
419 				       connect_info->hit_next_htbl);
420 	else
421 		dr_ste_always_miss_addr(ste_ctx, tmp_hw_ste,
422 					connect_info->miss_icm_addr);
423 	memcpy(formatted_ste, tmp_hw_ste, DR_STE_SIZE_REDUCED);
424 }
425 
426 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
427 				      struct mlx5dr_domain_rx_tx *nic_dmn,
428 				      struct mlx5dr_ste_htbl *htbl,
429 				      struct mlx5dr_htbl_connect_info *connect_info,
430 				      bool update_hw_ste)
431 {
432 	u8 formatted_ste[DR_STE_SIZE] = {};
433 
434 	mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
435 				     dmn->info.caps.gvmi,
436 				     nic_dmn->type,
437 				     htbl,
438 				     formatted_ste,
439 				     connect_info);
440 
441 	return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
442 }
443 
444 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
445 				struct mlx5dr_matcher_rx_tx *nic_matcher,
446 				struct mlx5dr_ste *ste,
447 				u8 *cur_hw_ste,
448 				enum mlx5dr_icm_chunk_size log_table_size)
449 {
450 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
451 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
452 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
453 	struct mlx5dr_htbl_connect_info info;
454 	struct mlx5dr_ste_htbl *next_htbl;
455 
456 	if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
457 		u16 next_lu_type;
458 		u16 byte_mask;
459 
460 		next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
461 		byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
462 
463 		next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
464 						  log_table_size,
465 						  next_lu_type,
466 						  byte_mask);
467 		if (!next_htbl) {
468 			mlx5dr_dbg(dmn, "Failed allocating table\n");
469 			return -ENOMEM;
470 		}
471 
472 		/* Write new table to HW */
473 		info.type = CONNECT_MISS;
474 		info.miss_icm_addr =
475 			mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
476 		if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
477 						      &info, false)) {
478 			mlx5dr_info(dmn, "Failed writing table to HW\n");
479 			goto free_table;
480 		}
481 
482 		mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
483 						     cur_hw_ste, next_htbl);
484 		ste->next_htbl = next_htbl;
485 		next_htbl->pointing_ste = ste;
486 	}
487 
488 	return 0;
489 
490 free_table:
491 	mlx5dr_ste_htbl_free(next_htbl);
492 	return -ENOENT;
493 }
494 
495 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
496 					      enum mlx5dr_icm_chunk_size chunk_size,
497 					      u16 lu_type, u16 byte_mask)
498 {
499 	struct mlx5dr_icm_chunk *chunk;
500 	struct mlx5dr_ste_htbl *htbl;
501 	u32 num_entries;
502 	int i;
503 
504 	htbl = mlx5dr_icm_pool_alloc_htbl(pool);
505 	if (!htbl)
506 		return NULL;
507 
508 	chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
509 	if (!chunk)
510 		goto out_free_htbl;
511 
512 	htbl->chunk = chunk;
513 	htbl->lu_type = lu_type;
514 	htbl->byte_mask = byte_mask;
515 	htbl->refcount = 0;
516 	htbl->pointing_ste = NULL;
517 	htbl->ctrl.num_of_valid_entries = 0;
518 	htbl->ctrl.num_of_collisions = 0;
519 	num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
520 
521 	for (i = 0; i < num_entries; i++) {
522 		struct mlx5dr_ste *ste = &chunk->ste_arr[i];
523 
524 		ste->htbl = htbl;
525 		ste->refcount = 0;
526 		INIT_LIST_HEAD(&ste->miss_list_node);
527 		INIT_LIST_HEAD(&chunk->miss_list[i]);
528 	}
529 
530 	return htbl;
531 
532 out_free_htbl:
533 	mlx5dr_icm_pool_free_htbl(pool, htbl);
534 	return NULL;
535 }
536 
537 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
538 {
539 	struct mlx5dr_icm_pool *pool = htbl->chunk->buddy_mem->pool;
540 
541 	if (htbl->refcount)
542 		return -EBUSY;
543 
544 	mlx5dr_icm_free_chunk(htbl->chunk);
545 	mlx5dr_icm_pool_free_htbl(pool, htbl);
546 
547 	return 0;
548 }
549 
550 void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
551 			       struct mlx5dr_domain *dmn,
552 			       u8 *action_type_set,
553 			       u8 *hw_ste_arr,
554 			       struct mlx5dr_ste_actions_attr *attr,
555 			       u32 *added_stes)
556 {
557 	ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
558 				hw_ste_arr, attr, added_stes);
559 }
560 
561 void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
562 			       struct mlx5dr_domain *dmn,
563 			       u8 *action_type_set,
564 			       u8 *hw_ste_arr,
565 			       struct mlx5dr_ste_actions_attr *attr,
566 			       u32 *added_stes)
567 {
568 	ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
569 				hw_ste_arr, attr, added_stes);
570 }
571 
572 const struct mlx5dr_ste_action_modify_field *
573 mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
574 {
575 	const struct mlx5dr_ste_action_modify_field *hw_field;
576 
577 	if (sw_field >= ste_ctx->modify_field_arr_sz)
578 		return NULL;
579 
580 	hw_field = &ste_ctx->modify_field_arr[sw_field];
581 	if (!hw_field->end && !hw_field->start)
582 		return NULL;
583 
584 	return hw_field;
585 }
586 
587 void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
588 			       __be64 *hw_action,
589 			       u8 hw_field,
590 			       u8 shifter,
591 			       u8 length,
592 			       u32 data)
593 {
594 	ste_ctx->set_action_set((u8 *)hw_action,
595 				hw_field, shifter, length, data);
596 }
597 
598 void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
599 			       __be64 *hw_action,
600 			       u8 hw_field,
601 			       u8 shifter,
602 			       u8 length,
603 			       u32 data)
604 {
605 	ste_ctx->set_action_add((u8 *)hw_action,
606 				hw_field, shifter, length, data);
607 }
608 
609 void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
610 				__be64 *hw_action,
611 				u8 dst_hw_field,
612 				u8 dst_shifter,
613 				u8 dst_len,
614 				u8 src_hw_field,
615 				u8 src_shifter)
616 {
617 	ste_ctx->set_action_copy((u8 *)hw_action,
618 				 dst_hw_field, dst_shifter, dst_len,
619 				 src_hw_field, src_shifter);
620 }
621 
622 int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
623 					void *data, u32 data_sz,
624 					u8 *hw_action, u32 hw_action_sz,
625 					u16 *used_hw_action_num)
626 {
627 	/* Only Ethernet frame is supported, with VLAN (18) or without (14) */
628 	if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
629 		return -EINVAL;
630 
631 	return ste_ctx->set_action_decap_l3_list(data, data_sz,
632 						 hw_action, hw_action_sz,
633 						 used_hw_action_num);
634 }
635 
636 static int
637 dr_ste_alloc_modify_hdr_chunk(struct mlx5dr_action *action)
638 {
639 	struct mlx5dr_domain *dmn = action->rewrite->dmn;
640 	u32 chunk_size;
641 	int ret;
642 
643 	chunk_size = ilog2(roundup_pow_of_two(action->rewrite->num_of_actions));
644 
645 	/* HW modify action index granularity is at least 64B */
646 	chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8);
647 
648 	action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
649 							chunk_size);
650 	if (!action->rewrite->chunk)
651 		return -ENOMEM;
652 
653 	action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(action->rewrite->chunk) -
654 				  dmn->info.caps.hdr_modify_icm_addr) /
655 				 DR_ACTION_CACHE_LINE_SIZE;
656 
657 	ret = mlx5dr_send_postsend_action(action->rewrite->dmn, action);
658 	if (ret)
659 		goto free_chunk;
660 
661 	return 0;
662 
663 free_chunk:
664 	mlx5dr_icm_free_chunk(action->rewrite->chunk);
665 	return -ENOMEM;
666 }
667 
668 static void dr_ste_free_modify_hdr_chunk(struct mlx5dr_action *action)
669 {
670 	mlx5dr_icm_free_chunk(action->rewrite->chunk);
671 }
672 
673 int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action)
674 {
675 	struct mlx5dr_domain *dmn = action->rewrite->dmn;
676 
677 	if (mlx5dr_domain_is_support_ptrn_arg(dmn))
678 		return dmn->ste_ctx->alloc_modify_hdr_chunk(action);
679 
680 	return dr_ste_alloc_modify_hdr_chunk(action);
681 }
682 
683 void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action)
684 {
685 	struct mlx5dr_domain *dmn = action->rewrite->dmn;
686 
687 	if (mlx5dr_domain_is_support_ptrn_arg(dmn))
688 		return dmn->ste_ctx->dealloc_modify_hdr_chunk(action);
689 
690 	return dr_ste_free_modify_hdr_chunk(action);
691 }
692 
693 static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
694 				       struct mlx5dr_match_spec *spec)
695 {
696 	if (spec->ip_version) {
697 		if (spec->ip_version != 0xf) {
698 			mlx5dr_err(dmn,
699 				   "Partial ip_version mask with src/dst IP is not supported\n");
700 			return -EINVAL;
701 		}
702 	} else if (spec->ethertype != 0xffff &&
703 		   (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
704 		mlx5dr_err(dmn,
705 			   "Partial/no ethertype mask with src/dst IP is not supported\n");
706 		return -EINVAL;
707 	}
708 
709 	return 0;
710 }
711 
712 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
713 			       u8 match_criteria,
714 			       struct mlx5dr_match_param *mask,
715 			       struct mlx5dr_match_param *value)
716 {
717 	if (value)
718 		return 0;
719 
720 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
721 		if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
722 			mlx5dr_err(dmn,
723 				   "Partial mask source_port is not supported\n");
724 			return -EINVAL;
725 		}
726 		if (mask->misc.source_eswitch_owner_vhca_id &&
727 		    mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
728 			mlx5dr_err(dmn,
729 				   "Partial mask source_eswitch_owner_vhca_id is not supported\n");
730 			return -EINVAL;
731 		}
732 	}
733 
734 	if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
735 	    dr_ste_build_pre_check_spec(dmn, &mask->outer))
736 		return -EINVAL;
737 
738 	if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
739 	    dr_ste_build_pre_check_spec(dmn, &mask->inner))
740 		return -EINVAL;
741 
742 	return 0;
743 }
744 
745 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
746 			     struct mlx5dr_matcher_rx_tx *nic_matcher,
747 			     struct mlx5dr_match_param *value,
748 			     u8 *ste_arr)
749 {
750 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
751 	bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
752 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
753 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
754 	struct mlx5dr_ste_build *sb;
755 	int ret, i;
756 
757 	ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
758 					 &matcher->mask, value);
759 	if (ret)
760 		return ret;
761 
762 	sb = nic_matcher->ste_builder;
763 	for (i = 0; i < nic_matcher->num_of_builders; i++) {
764 		ste_ctx->ste_init(ste_arr,
765 				  sb->lu_type,
766 				  is_rx,
767 				  dmn->info.caps.gvmi);
768 
769 		mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
770 
771 		ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
772 		if (ret)
773 			return ret;
774 
775 		/* Connect the STEs */
776 		if (i < (nic_matcher->num_of_builders - 1)) {
777 			/* Need the next builder for these fields,
778 			 * not relevant for the last ste in the chain.
779 			 */
780 			sb++;
781 			ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
782 			ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
783 		}
784 		ste_arr += DR_STE_SIZE;
785 	}
786 	return 0;
787 }
788 
789 #define IFC_GET_CLR(typ, p, fld, clear) ({ \
790 	void *__p = (p); \
791 	u32 __t = MLX5_GET(typ, __p, fld); \
792 	if (clear) \
793 		MLX5_SET(typ, __p, fld, 0); \
794 	__t; \
795 })
796 
797 #define memcpy_and_clear(to, from, len, clear) ({ \
798 	void *__to = (to), *__from = (from); \
799 	size_t __len = (len); \
800 	memcpy(__to, __from, __len); \
801 	if (clear) \
802 		memset(__from, 0, __len); \
803 })
804 
805 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr)
806 {
807 	spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr);
808 	spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr);
809 	spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr);
810 	spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr);
811 	spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr);
812 
813 	spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr);
814 	spec->source_eswitch_owner_vhca_id =
815 		IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr);
816 
817 	spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr);
818 	spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr);
819 	spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr);
820 	spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr);
821 	spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr);
822 	spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr);
823 
824 	spec->outer_second_cvlan_tag =
825 		IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr);
826 	spec->inner_second_cvlan_tag =
827 		IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr);
828 	spec->outer_second_svlan_tag =
829 		IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr);
830 	spec->inner_second_svlan_tag =
831 		IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr);
832 	spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr);
833 
834 	spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr);
835 	spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr);
836 
837 	spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
838 
839 	spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
840 	spec->geneve_tlv_option_0_exist =
841 		IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr);
842 	spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
843 
844 	spec->outer_ipv6_flow_label =
845 		IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr);
846 
847 	spec->inner_ipv6_flow_label =
848 		IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr);
849 
850 	spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr);
851 	spec->geneve_protocol_type =
852 		IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr);
853 
854 	spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr);
855 }
856 
857 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr)
858 {
859 	__be32 raw_ip[4];
860 
861 	spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr);
862 
863 	spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr);
864 	spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr);
865 
866 	spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr);
867 
868 	spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr);
869 	spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr);
870 	spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr);
871 	spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr);
872 
873 	spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr);
874 	spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr);
875 	spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr);
876 	spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr);
877 	spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr);
878 	spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr);
879 	spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr);
880 	spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr);
881 	spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
882 	spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
883 
884 	spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr);
885 	spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
886 
887 	spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
888 	spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr);
889 
890 	memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
891 					      src_ipv4_src_ipv6.ipv6_layout.ipv6),
892 			 sizeof(raw_ip), clr);
893 
894 	spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
895 	spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
896 	spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
897 	spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
898 
899 	memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
900 					      dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
901 			 sizeof(raw_ip), clr);
902 
903 	spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
904 	spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
905 	spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
906 	spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
907 }
908 
909 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr)
910 {
911 	spec->outer_first_mpls_label =
912 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr);
913 	spec->outer_first_mpls_exp =
914 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr);
915 	spec->outer_first_mpls_s_bos =
916 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr);
917 	spec->outer_first_mpls_ttl =
918 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr);
919 	spec->inner_first_mpls_label =
920 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr);
921 	spec->inner_first_mpls_exp =
922 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr);
923 	spec->inner_first_mpls_s_bos =
924 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr);
925 	spec->inner_first_mpls_ttl =
926 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr);
927 	spec->outer_first_mpls_over_gre_label =
928 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr);
929 	spec->outer_first_mpls_over_gre_exp =
930 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr);
931 	spec->outer_first_mpls_over_gre_s_bos =
932 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr);
933 	spec->outer_first_mpls_over_gre_ttl =
934 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr);
935 	spec->outer_first_mpls_over_udp_label =
936 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr);
937 	spec->outer_first_mpls_over_udp_exp =
938 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr);
939 	spec->outer_first_mpls_over_udp_s_bos =
940 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr);
941 	spec->outer_first_mpls_over_udp_ttl =
942 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr);
943 	spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr);
944 	spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr);
945 	spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr);
946 	spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr);
947 	spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr);
948 	spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr);
949 	spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr);
950 	spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr);
951 	spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr);
952 }
953 
954 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr)
955 {
956 	spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr);
957 	spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr);
958 	spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr);
959 	spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr);
960 	spec->outer_vxlan_gpe_vni =
961 		IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr);
962 	spec->outer_vxlan_gpe_next_protocol =
963 		IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr);
964 	spec->outer_vxlan_gpe_flags =
965 		IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr);
966 	spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr);
967 	spec->icmpv6_header_data =
968 		IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr);
969 	spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr);
970 	spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr);
971 	spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr);
972 	spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr);
973 	spec->geneve_tlv_option_0_data =
974 		IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr);
975 	spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr);
976 	spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr);
977 	spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr);
978 	spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr);
979 	spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr);
980 	spec->gtpu_first_ext_dw_0 =
981 		IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr);
982 }
983 
984 static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr)
985 {
986 	spec->prog_sample_field_id_0 =
987 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr);
988 	spec->prog_sample_field_value_0 =
989 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr);
990 	spec->prog_sample_field_id_1 =
991 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr);
992 	spec->prog_sample_field_value_1 =
993 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr);
994 	spec->prog_sample_field_id_2 =
995 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr);
996 	spec->prog_sample_field_value_2 =
997 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr);
998 	spec->prog_sample_field_id_3 =
999 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr);
1000 	spec->prog_sample_field_value_3 =
1001 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
1002 }
1003 
1004 static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr)
1005 {
1006 	spec->macsec_tag_0 =
1007 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr);
1008 	spec->macsec_tag_1 =
1009 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr);
1010 	spec->macsec_tag_2 =
1011 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr);
1012 	spec->macsec_tag_3 =
1013 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr);
1014 	spec->tunnel_header_0 =
1015 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr);
1016 	spec->tunnel_header_1 =
1017 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr);
1018 	spec->tunnel_header_2 =
1019 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr);
1020 	spec->tunnel_header_3 =
1021 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr);
1022 }
1023 
1024 void mlx5dr_ste_copy_param(u8 match_criteria,
1025 			   struct mlx5dr_match_param *set_param,
1026 			   struct mlx5dr_match_parameters *mask,
1027 			   bool clr)
1028 {
1029 	u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
1030 	u8 *data = (u8 *)mask->match_buf;
1031 	size_t param_location;
1032 	void *buff;
1033 
1034 	if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
1035 		if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
1036 			memcpy(tail_param, data, mask->match_sz);
1037 			buff = tail_param;
1038 		} else {
1039 			buff = mask->match_buf;
1040 		}
1041 		dr_ste_copy_mask_spec(buff, &set_param->outer, clr);
1042 	}
1043 	param_location = sizeof(struct mlx5dr_match_spec);
1044 
1045 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
1046 		if (mask->match_sz < param_location +
1047 		    sizeof(struct mlx5dr_match_misc)) {
1048 			memcpy(tail_param, data + param_location,
1049 			       mask->match_sz - param_location);
1050 			buff = tail_param;
1051 		} else {
1052 			buff = data + param_location;
1053 		}
1054 		dr_ste_copy_mask_misc(buff, &set_param->misc, clr);
1055 	}
1056 	param_location += sizeof(struct mlx5dr_match_misc);
1057 
1058 	if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
1059 		if (mask->match_sz < param_location +
1060 		    sizeof(struct mlx5dr_match_spec)) {
1061 			memcpy(tail_param, data + param_location,
1062 			       mask->match_sz - param_location);
1063 			buff = tail_param;
1064 		} else {
1065 			buff = data + param_location;
1066 		}
1067 		dr_ste_copy_mask_spec(buff, &set_param->inner, clr);
1068 	}
1069 	param_location += sizeof(struct mlx5dr_match_spec);
1070 
1071 	if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
1072 		if (mask->match_sz < param_location +
1073 		    sizeof(struct mlx5dr_match_misc2)) {
1074 			memcpy(tail_param, data + param_location,
1075 			       mask->match_sz - param_location);
1076 			buff = tail_param;
1077 		} else {
1078 			buff = data + param_location;
1079 		}
1080 		dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr);
1081 	}
1082 
1083 	param_location += sizeof(struct mlx5dr_match_misc2);
1084 
1085 	if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
1086 		if (mask->match_sz < param_location +
1087 		    sizeof(struct mlx5dr_match_misc3)) {
1088 			memcpy(tail_param, data + param_location,
1089 			       mask->match_sz - param_location);
1090 			buff = tail_param;
1091 		} else {
1092 			buff = data + param_location;
1093 		}
1094 		dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr);
1095 	}
1096 
1097 	param_location += sizeof(struct mlx5dr_match_misc3);
1098 
1099 	if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
1100 		if (mask->match_sz < param_location +
1101 		    sizeof(struct mlx5dr_match_misc4)) {
1102 			memcpy(tail_param, data + param_location,
1103 			       mask->match_sz - param_location);
1104 			buff = tail_param;
1105 		} else {
1106 			buff = data + param_location;
1107 		}
1108 		dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
1109 	}
1110 
1111 	param_location += sizeof(struct mlx5dr_match_misc4);
1112 
1113 	if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
1114 		if (mask->match_sz < param_location +
1115 		    sizeof(struct mlx5dr_match_misc5)) {
1116 			memcpy(tail_param, data + param_location,
1117 			       mask->match_sz - param_location);
1118 			buff = tail_param;
1119 		} else {
1120 			buff = data + param_location;
1121 		}
1122 		dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr);
1123 	}
1124 }
1125 
1126 void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
1127 				     struct mlx5dr_ste_build *sb,
1128 				     struct mlx5dr_match_param *mask,
1129 				     bool inner, bool rx)
1130 {
1131 	sb->rx = rx;
1132 	sb->inner = inner;
1133 	ste_ctx->build_eth_l2_src_dst_init(sb, mask);
1134 }
1135 
1136 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
1137 				      struct mlx5dr_ste_build *sb,
1138 				      struct mlx5dr_match_param *mask,
1139 				      bool inner, bool rx)
1140 {
1141 	sb->rx = rx;
1142 	sb->inner = inner;
1143 	ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
1144 }
1145 
1146 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
1147 				      struct mlx5dr_ste_build *sb,
1148 				      struct mlx5dr_match_param *mask,
1149 				      bool inner, bool rx)
1150 {
1151 	sb->rx = rx;
1152 	sb->inner = inner;
1153 	ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
1154 }
1155 
1156 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
1157 					  struct mlx5dr_ste_build *sb,
1158 					  struct mlx5dr_match_param *mask,
1159 					  bool inner, bool rx)
1160 {
1161 	sb->rx = rx;
1162 	sb->inner = inner;
1163 	ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
1164 }
1165 
1166 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
1167 				 struct mlx5dr_ste_build *sb,
1168 				 struct mlx5dr_match_param *mask,
1169 				 bool inner, bool rx)
1170 {
1171 	sb->rx = rx;
1172 	sb->inner = inner;
1173 	ste_ctx->build_eth_l2_src_init(sb, mask);
1174 }
1175 
1176 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
1177 				 struct mlx5dr_ste_build *sb,
1178 				 struct mlx5dr_match_param *mask,
1179 				 bool inner, bool rx)
1180 {
1181 	sb->rx = rx;
1182 	sb->inner = inner;
1183 	ste_ctx->build_eth_l2_dst_init(sb, mask);
1184 }
1185 
1186 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
1187 				 struct mlx5dr_ste_build *sb,
1188 				 struct mlx5dr_match_param *mask, bool inner, bool rx)
1189 {
1190 	sb->rx = rx;
1191 	sb->inner = inner;
1192 	ste_ctx->build_eth_l2_tnl_init(sb, mask);
1193 }
1194 
1195 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1196 				       struct mlx5dr_ste_build *sb,
1197 				       struct mlx5dr_match_param *mask,
1198 				       bool inner, bool rx)
1199 {
1200 	sb->rx = rx;
1201 	sb->inner = inner;
1202 	ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
1203 }
1204 
1205 void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
1206 				     struct mlx5dr_ste_build *sb,
1207 				     struct mlx5dr_match_param *mask,
1208 				     bool inner, bool rx)
1209 {
1210 	sb->rx = rx;
1211 	sb->inner = inner;
1212 	ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
1213 }
1214 
1215 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1216 					     struct mlx5dr_ste_build *sb,
1217 					     u8 *tag)
1218 {
1219 	return 0;
1220 }
1221 
1222 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1223 {
1224 	sb->rx = rx;
1225 	sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1226 	sb->byte_mask = 0;
1227 	sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1228 }
1229 
1230 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
1231 			   struct mlx5dr_ste_build *sb,
1232 			   struct mlx5dr_match_param *mask,
1233 			   bool inner, bool rx)
1234 {
1235 	sb->rx = rx;
1236 	sb->inner = inner;
1237 	ste_ctx->build_mpls_init(sb, mask);
1238 }
1239 
1240 void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
1241 			      struct mlx5dr_ste_build *sb,
1242 			      struct mlx5dr_match_param *mask,
1243 			      bool inner, bool rx)
1244 {
1245 	sb->rx = rx;
1246 	sb->inner = inner;
1247 	ste_ctx->build_tnl_gre_init(sb, mask);
1248 }
1249 
1250 void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
1251 					struct mlx5dr_ste_build *sb,
1252 					struct mlx5dr_match_param *mask,
1253 					struct mlx5dr_cmd_caps *caps,
1254 					bool inner, bool rx)
1255 {
1256 	sb->rx = rx;
1257 	sb->inner = inner;
1258 	sb->caps = caps;
1259 	return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
1260 }
1261 
1262 void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
1263 					struct mlx5dr_ste_build *sb,
1264 					struct mlx5dr_match_param *mask,
1265 					struct mlx5dr_cmd_caps *caps,
1266 					bool inner, bool rx)
1267 {
1268 	sb->rx = rx;
1269 	sb->inner = inner;
1270 	sb->caps = caps;
1271 	return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
1272 }
1273 
1274 void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
1275 			   struct mlx5dr_ste_build *sb,
1276 			   struct mlx5dr_match_param *mask,
1277 			   struct mlx5dr_cmd_caps *caps,
1278 			   bool inner, bool rx)
1279 {
1280 	sb->rx = rx;
1281 	sb->inner = inner;
1282 	sb->caps = caps;
1283 	ste_ctx->build_icmp_init(sb, mask);
1284 }
1285 
1286 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
1287 				      struct mlx5dr_ste_build *sb,
1288 				      struct mlx5dr_match_param *mask,
1289 				      bool inner, bool rx)
1290 {
1291 	sb->rx = rx;
1292 	sb->inner = inner;
1293 	ste_ctx->build_general_purpose_init(sb, mask);
1294 }
1295 
1296 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1297 				  struct mlx5dr_ste_build *sb,
1298 				  struct mlx5dr_match_param *mask,
1299 				  bool inner, bool rx)
1300 {
1301 	sb->rx = rx;
1302 	sb->inner = inner;
1303 	ste_ctx->build_eth_l4_misc_init(sb, mask);
1304 }
1305 
1306 void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
1307 				    struct mlx5dr_ste_build *sb,
1308 				    struct mlx5dr_match_param *mask,
1309 				    bool inner, bool rx)
1310 {
1311 	sb->rx = rx;
1312 	sb->inner = inner;
1313 	ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
1314 }
1315 
1316 void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
1317 				 struct mlx5dr_ste_build *sb,
1318 				 struct mlx5dr_match_param *mask,
1319 				 bool inner, bool rx)
1320 {
1321 	sb->rx = rx;
1322 	sb->inner = inner;
1323 	ste_ctx->build_tnl_geneve_init(sb, mask);
1324 }
1325 
1326 void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
1327 					 struct mlx5dr_ste_build *sb,
1328 					 struct mlx5dr_match_param *mask,
1329 					 struct mlx5dr_cmd_caps *caps,
1330 					 bool inner, bool rx)
1331 {
1332 	sb->rx = rx;
1333 	sb->caps = caps;
1334 	sb->inner = inner;
1335 	ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
1336 }
1337 
1338 void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
1339 					       struct mlx5dr_ste_build *sb,
1340 					       struct mlx5dr_match_param *mask,
1341 					       struct mlx5dr_cmd_caps *caps,
1342 					       bool inner, bool rx)
1343 {
1344 	if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init)
1345 		return;
1346 
1347 	sb->rx = rx;
1348 	sb->caps = caps;
1349 	sb->inner = inner;
1350 	ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask);
1351 }
1352 
1353 void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
1354 			       struct mlx5dr_ste_build *sb,
1355 			       struct mlx5dr_match_param *mask,
1356 			       bool inner, bool rx)
1357 {
1358 	sb->rx = rx;
1359 	sb->inner = inner;
1360 	ste_ctx->build_tnl_gtpu_init(sb, mask);
1361 }
1362 
1363 void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1364 					     struct mlx5dr_ste_build *sb,
1365 					     struct mlx5dr_match_param *mask,
1366 					     struct mlx5dr_cmd_caps *caps,
1367 					     bool inner, bool rx)
1368 {
1369 	sb->rx = rx;
1370 	sb->caps = caps;
1371 	sb->inner = inner;
1372 	ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
1373 }
1374 
1375 void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1376 					     struct mlx5dr_ste_build *sb,
1377 					     struct mlx5dr_match_param *mask,
1378 					     struct mlx5dr_cmd_caps *caps,
1379 					     bool inner, bool rx)
1380 {
1381 	sb->rx = rx;
1382 	sb->caps = caps;
1383 	sb->inner = inner;
1384 	ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
1385 }
1386 
1387 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
1388 				 struct mlx5dr_ste_build *sb,
1389 				 struct mlx5dr_match_param *mask,
1390 				 bool inner, bool rx)
1391 {
1392 	sb->rx = rx;
1393 	sb->inner = inner;
1394 	ste_ctx->build_register_0_init(sb, mask);
1395 }
1396 
1397 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
1398 				 struct mlx5dr_ste_build *sb,
1399 				 struct mlx5dr_match_param *mask,
1400 				 bool inner, bool rx)
1401 {
1402 	sb->rx = rx;
1403 	sb->inner = inner;
1404 	ste_ctx->build_register_1_init(sb, mask);
1405 }
1406 
1407 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
1408 				   struct mlx5dr_ste_build *sb,
1409 				   struct mlx5dr_match_param *mask,
1410 				   struct mlx5dr_domain *dmn,
1411 				   bool inner, bool rx)
1412 {
1413 	/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
1414 	sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
1415 
1416 	sb->rx = rx;
1417 	sb->dmn = dmn;
1418 	sb->inner = inner;
1419 	ste_ctx->build_src_gvmi_qpn_init(sb, mask);
1420 }
1421 
1422 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1423 				    struct mlx5dr_ste_build *sb,
1424 				    struct mlx5dr_match_param *mask,
1425 				    bool inner, bool rx)
1426 {
1427 	sb->rx = rx;
1428 	sb->inner = inner;
1429 	ste_ctx->build_flex_parser_0_init(sb, mask);
1430 }
1431 
1432 void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1433 				    struct mlx5dr_ste_build *sb,
1434 				    struct mlx5dr_match_param *mask,
1435 				    bool inner, bool rx)
1436 {
1437 	sb->rx = rx;
1438 	sb->inner = inner;
1439 	ste_ctx->build_flex_parser_1_init(sb, mask);
1440 }
1441 
1442 void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
1443 				     struct mlx5dr_ste_build *sb,
1444 				     struct mlx5dr_match_param *mask,
1445 				     bool inner, bool rx)
1446 {
1447 	sb->rx = rx;
1448 	sb->inner = inner;
1449 	ste_ctx->build_tnl_header_0_1_init(sb, mask);
1450 }
1451 
1452 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
1453 {
1454 	if (version == MLX5_STEERING_FORMAT_CONNECTX_5)
1455 		return mlx5dr_ste_get_ctx_v0();
1456 	else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX)
1457 		return mlx5dr_ste_get_ctx_v1();
1458 	else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
1459 		return mlx5dr_ste_get_ctx_v2();
1460 
1461 	return NULL;
1462 }
1463