1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 #define DR_RULE_MAX_STES_OPTIMIZED 5
7 #define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
8 
9 static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
10 				       enum mlx5dr_domain_nic_type nic_type,
11 				       struct mlx5dr_ste *new_last_ste,
12 				       struct list_head *miss_list,
13 				       struct list_head *send_list)
14 {
15 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
16 	struct mlx5dr_ste_send_info *ste_info_last;
17 	struct mlx5dr_ste *last_ste;
18 
19 	/* The new entry will be inserted after the last */
20 	last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
21 	WARN_ON(!last_ste);
22 
23 	ste_info_last = mlx5dr_send_info_alloc(dmn, nic_type);
24 	if (!ste_info_last)
25 		return -ENOMEM;
26 
27 	mlx5dr_ste_set_miss_addr(ste_ctx, mlx5dr_ste_get_hw_ste(last_ste),
28 				 mlx5dr_ste_get_icm_addr(new_last_ste));
29 	list_add_tail(&new_last_ste->miss_list_node, miss_list);
30 
31 	mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
32 						  0, mlx5dr_ste_get_hw_ste(last_ste),
33 						  ste_info_last, send_list, true);
34 
35 	return 0;
36 }
37 
38 static void dr_rule_set_last_ste_miss_addr(struct mlx5dr_matcher *matcher,
39 					   struct mlx5dr_matcher_rx_tx *nic_matcher,
40 					   u8 *hw_ste)
41 {
42 	struct mlx5dr_ste_ctx *ste_ctx = matcher->tbl->dmn->ste_ctx;
43 	u64 icm_addr;
44 
45 	if (mlx5dr_ste_is_miss_addr_set(ste_ctx, hw_ste))
46 		return;
47 
48 	icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
49 	mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
50 }
51 
52 static struct mlx5dr_ste *
53 dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
54 			      struct mlx5dr_matcher_rx_tx *nic_matcher,
55 			      u8 *hw_ste)
56 {
57 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
58 	struct mlx5dr_ste_htbl *new_htbl;
59 	struct mlx5dr_ste *ste;
60 
61 	/* Create new table for miss entry */
62 	new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
63 					 DR_CHUNK_SIZE_1,
64 					 MLX5DR_STE_LU_TYPE_DONT_CARE,
65 					 0);
66 	if (!new_htbl) {
67 		mlx5dr_dbg(dmn, "Failed allocating collision table\n");
68 		return NULL;
69 	}
70 
71 	/* One and only entry, never grows */
72 	ste = new_htbl->chunk->ste_arr;
73 	dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
74 	mlx5dr_htbl_get(new_htbl);
75 
76 	return ste;
77 }
78 
79 static struct mlx5dr_ste *
80 dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
81 			       struct mlx5dr_matcher_rx_tx *nic_matcher,
82 			       u8 *hw_ste,
83 			       struct mlx5dr_ste *orig_ste)
84 {
85 	struct mlx5dr_ste *ste;
86 
87 	ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
88 	if (!ste) {
89 		mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
90 		return NULL;
91 	}
92 
93 	ste->ste_chain_location = orig_ste->ste_chain_location;
94 	ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
95 
96 	/* In collision entry, all members share the same miss_list_head */
97 	ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
98 
99 	/* Next table */
100 	if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
101 					DR_CHUNK_SIZE_1)) {
102 		mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
103 		goto free_tbl;
104 	}
105 
106 	return ste;
107 
108 free_tbl:
109 	mlx5dr_ste_free(ste, matcher, nic_matcher);
110 	return NULL;
111 }
112 
113 static int
114 dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
115 				      struct mlx5dr_domain *dmn)
116 {
117 	int ret;
118 
119 	list_del(&ste_info->send_list);
120 
121 	/* Copy data to ste, only reduced size or control, the last 16B (mask)
122 	 * is already written to the hw.
123 	 */
124 	if (ste_info->size == DR_STE_SIZE_CTRL)
125 		memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
126 		       ste_info->data, DR_STE_SIZE_CTRL);
127 	else
128 		memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
129 		       ste_info->data, DR_STE_SIZE_REDUCED);
130 
131 	ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
132 				       ste_info->size, ste_info->offset);
133 	if (ret)
134 		goto out;
135 
136 out:
137 	mlx5dr_send_info_free(ste_info);
138 	return ret;
139 }
140 
141 static int dr_rule_send_update_list(struct list_head *send_ste_list,
142 				    struct mlx5dr_domain *dmn,
143 				    bool is_reverse)
144 {
145 	struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
146 	int ret;
147 
148 	if (is_reverse) {
149 		list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
150 						 send_ste_list, send_list) {
151 			ret = dr_rule_handle_one_ste_in_update_list(ste_info,
152 								    dmn);
153 			if (ret)
154 				return ret;
155 		}
156 	} else {
157 		list_for_each_entry_safe(ste_info, tmp_ste_info,
158 					 send_ste_list, send_list) {
159 			ret = dr_rule_handle_one_ste_in_update_list(ste_info,
160 								    dmn);
161 			if (ret)
162 				return ret;
163 		}
164 	}
165 
166 	return 0;
167 }
168 
169 static struct mlx5dr_ste *
170 dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
171 {
172 	struct mlx5dr_ste *ste;
173 
174 	if (list_empty(miss_list))
175 		return NULL;
176 
177 	/* Check if hw_ste is present in the list */
178 	list_for_each_entry(ste, miss_list, miss_list_node) {
179 		if (mlx5dr_ste_equal_tag(mlx5dr_ste_get_hw_ste(ste), hw_ste))
180 			return ste;
181 	}
182 
183 	return NULL;
184 }
185 
186 static struct mlx5dr_ste *
187 dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
188 				struct mlx5dr_matcher_rx_tx *nic_matcher,
189 				struct list_head *update_list,
190 				struct mlx5dr_ste *col_ste,
191 				u8 *hw_ste)
192 {
193 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
194 	struct mlx5dr_ste *new_ste;
195 	int ret;
196 
197 	new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
198 	if (!new_ste)
199 		return NULL;
200 
201 	/* Update collision pointing STE */
202 	new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
203 
204 	/* In collision entry, all members share the same miss_list_head */
205 	new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
206 
207 	/* Update the previous from the list */
208 	ret = dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
209 					  new_ste, mlx5dr_ste_get_miss_list(col_ste),
210 					  update_list);
211 	if (ret) {
212 		mlx5dr_dbg(dmn, "Failed update dup entry\n");
213 		goto err_exit;
214 	}
215 
216 	return new_ste;
217 
218 err_exit:
219 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
220 	return NULL;
221 }
222 
223 static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
224 					 struct mlx5dr_matcher_rx_tx *nic_matcher,
225 					 struct mlx5dr_ste *cur_ste,
226 					 struct mlx5dr_ste *new_ste)
227 {
228 	new_ste->next_htbl = cur_ste->next_htbl;
229 	new_ste->ste_chain_location = cur_ste->ste_chain_location;
230 
231 	if (new_ste->next_htbl)
232 		new_ste->next_htbl->pointing_ste = new_ste;
233 
234 	/* We need to copy the refcount since this ste
235 	 * may have been traversed several times
236 	 */
237 	new_ste->refcount = cur_ste->refcount;
238 
239 	/* Link old STEs rule to the new ste */
240 	mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false);
241 }
242 
243 static struct mlx5dr_ste *
244 dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
245 			struct mlx5dr_matcher_rx_tx *nic_matcher,
246 			struct mlx5dr_ste *cur_ste,
247 			struct mlx5dr_ste_htbl *new_htbl,
248 			struct list_head *update_list)
249 {
250 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
251 	struct mlx5dr_ste_send_info *ste_info;
252 	bool use_update_list = false;
253 	u8 hw_ste[DR_STE_SIZE] = {};
254 	struct mlx5dr_ste *new_ste;
255 	int new_idx;
256 	u8 sb_idx;
257 
258 	/* Copy STE mask from the matcher */
259 	sb_idx = cur_ste->ste_chain_location - 1;
260 	mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
261 
262 	/* Copy STE control and tag */
263 	memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
264 	dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
265 
266 	new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
267 	new_ste = &new_htbl->chunk->ste_arr[new_idx];
268 
269 	if (mlx5dr_ste_is_not_used(new_ste)) {
270 		mlx5dr_htbl_get(new_htbl);
271 		list_add_tail(&new_ste->miss_list_node,
272 			      mlx5dr_ste_get_miss_list(new_ste));
273 	} else {
274 		new_ste = dr_rule_rehash_handle_collision(matcher,
275 							  nic_matcher,
276 							  update_list,
277 							  new_ste,
278 							  hw_ste);
279 		if (!new_ste) {
280 			mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
281 				   new_idx);
282 			return NULL;
283 		}
284 		new_htbl->ctrl.num_of_collisions++;
285 		use_update_list = true;
286 	}
287 
288 	memcpy(mlx5dr_ste_get_hw_ste(new_ste), hw_ste, DR_STE_SIZE_REDUCED);
289 
290 	new_htbl->ctrl.num_of_valid_entries++;
291 
292 	if (use_update_list) {
293 		ste_info = mlx5dr_send_info_alloc(dmn,
294 						  nic_matcher->nic_tbl->nic_dmn->type);
295 		if (!ste_info)
296 			goto err_exit;
297 
298 		mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
299 							  hw_ste, ste_info,
300 							  update_list, true);
301 	}
302 
303 	dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
304 
305 	return new_ste;
306 
307 err_exit:
308 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
309 	return NULL;
310 }
311 
312 static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
313 					 struct mlx5dr_matcher_rx_tx *nic_matcher,
314 					 struct list_head *cur_miss_list,
315 					 struct mlx5dr_ste_htbl *new_htbl,
316 					 struct list_head *update_list)
317 {
318 	struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
319 
320 	if (list_empty(cur_miss_list))
321 		return 0;
322 
323 	list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
324 		new_ste = dr_rule_rehash_copy_ste(matcher,
325 						  nic_matcher,
326 						  cur_ste,
327 						  new_htbl,
328 						  update_list);
329 		if (!new_ste)
330 			goto err_insert;
331 
332 		list_del(&cur_ste->miss_list_node);
333 		mlx5dr_htbl_put(cur_ste->htbl);
334 	}
335 	return 0;
336 
337 err_insert:
338 	mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
339 	WARN_ON(true);
340 	return -EINVAL;
341 }
342 
343 static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
344 				    struct mlx5dr_matcher_rx_tx *nic_matcher,
345 				    struct mlx5dr_ste_htbl *cur_htbl,
346 				    struct mlx5dr_ste_htbl *new_htbl,
347 				    struct list_head *update_list)
348 {
349 	struct mlx5dr_ste *cur_ste;
350 	int cur_entries;
351 	int err = 0;
352 	int i;
353 
354 	cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
355 
356 	if (cur_entries < 1) {
357 		mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
358 		return -EINVAL;
359 	}
360 
361 	for (i = 0; i < cur_entries; i++) {
362 		cur_ste = &cur_htbl->chunk->ste_arr[i];
363 		if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
364 			continue;
365 
366 		err = dr_rule_rehash_copy_miss_list(matcher,
367 						    nic_matcher,
368 						    mlx5dr_ste_get_miss_list(cur_ste),
369 						    new_htbl,
370 						    update_list);
371 		if (err)
372 			goto clean_copy;
373 
374 		/* In order to decrease the number of allocated ste_send_info
375 		 * structs, send the current table row now.
376 		 */
377 		err = dr_rule_send_update_list(update_list, matcher->tbl->dmn, false);
378 		if (err) {
379 			mlx5dr_dbg(matcher->tbl->dmn, "Failed updating table to HW\n");
380 			goto clean_copy;
381 		}
382 	}
383 
384 clean_copy:
385 	return err;
386 }
387 
388 static struct mlx5dr_ste_htbl *
389 dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
390 		    struct mlx5dr_rule_rx_tx *nic_rule,
391 		    struct mlx5dr_ste_htbl *cur_htbl,
392 		    u8 ste_location,
393 		    struct list_head *update_list,
394 		    enum mlx5dr_icm_chunk_size new_size)
395 {
396 	struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
397 	struct mlx5dr_matcher *matcher = rule->matcher;
398 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
399 	struct mlx5dr_matcher_rx_tx *nic_matcher;
400 	struct mlx5dr_ste_send_info *ste_info;
401 	struct mlx5dr_htbl_connect_info info;
402 	struct mlx5dr_domain_rx_tx *nic_dmn;
403 	u8 formatted_ste[DR_STE_SIZE] = {};
404 	LIST_HEAD(rehash_table_send_list);
405 	struct mlx5dr_ste *ste_to_update;
406 	struct mlx5dr_ste_htbl *new_htbl;
407 	int err;
408 
409 	nic_matcher = nic_rule->nic_matcher;
410 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
411 
412 	ste_info = mlx5dr_send_info_alloc(dmn,
413 					  nic_matcher->nic_tbl->nic_dmn->type);
414 	if (!ste_info)
415 		return NULL;
416 
417 	new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
418 					 new_size,
419 					 cur_htbl->lu_type,
420 					 cur_htbl->byte_mask);
421 	if (!new_htbl) {
422 		mlx5dr_err(dmn, "Failed to allocate new hash table\n");
423 		goto free_ste_info;
424 	}
425 
426 	/* Write new table to HW */
427 	info.type = CONNECT_MISS;
428 	info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
429 	mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
430 				     dmn->info.caps.gvmi,
431 				     nic_dmn->type,
432 				     new_htbl,
433 				     formatted_ste,
434 				     &info);
435 
436 	new_htbl->pointing_ste = cur_htbl->pointing_ste;
437 	new_htbl->pointing_ste->next_htbl = new_htbl;
438 	err = dr_rule_rehash_copy_htbl(matcher,
439 				       nic_matcher,
440 				       cur_htbl,
441 				       new_htbl,
442 				       &rehash_table_send_list);
443 	if (err)
444 		goto free_new_htbl;
445 
446 	if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
447 				      nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
448 		mlx5dr_err(dmn, "Failed writing table to HW\n");
449 		goto free_new_htbl;
450 	}
451 
452 	/* Writing to the hw is done in regular order of rehash_table_send_list,
453 	 * in order to have the origin data written before the miss address of
454 	 * collision entries, if exists.
455 	 */
456 	if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
457 		mlx5dr_err(dmn, "Failed updating table to HW\n");
458 		goto free_ste_list;
459 	}
460 
461 	/* Connect previous hash table to current */
462 	if (ste_location == 1) {
463 		/* The previous table is an anchor, anchors size is always one STE */
464 		struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
465 
466 		/* On matcher s_anchor we keep an extra refcount */
467 		mlx5dr_htbl_get(new_htbl);
468 		mlx5dr_htbl_put(cur_htbl);
469 
470 		nic_matcher->s_htbl = new_htbl;
471 
472 		/* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
473 		 * (48B len) which works only on first 32B
474 		 */
475 		mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
476 					prev_htbl->chunk->hw_ste_arr,
477 					mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
478 					mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
479 
480 		ste_to_update = &prev_htbl->chunk->ste_arr[0];
481 	} else {
482 		mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
483 						     mlx5dr_ste_get_hw_ste(cur_htbl->pointing_ste),
484 						     new_htbl);
485 		ste_to_update = cur_htbl->pointing_ste;
486 	}
487 
488 	mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
489 						  0, mlx5dr_ste_get_hw_ste(ste_to_update),
490 						  ste_info, update_list, false);
491 
492 	return new_htbl;
493 
494 free_ste_list:
495 	/* Clean all ste_info's from the new table */
496 	list_for_each_entry_safe(del_ste_info, tmp_ste_info,
497 				 &rehash_table_send_list, send_list) {
498 		list_del(&del_ste_info->send_list);
499 		mlx5dr_send_info_free(del_ste_info);
500 	}
501 
502 free_new_htbl:
503 	mlx5dr_ste_htbl_free(new_htbl);
504 free_ste_info:
505 	mlx5dr_send_info_free(ste_info);
506 	mlx5dr_info(dmn, "Failed creating rehash table\n");
507 	return NULL;
508 }
509 
510 static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
511 					      struct mlx5dr_rule_rx_tx *nic_rule,
512 					      struct mlx5dr_ste_htbl *cur_htbl,
513 					      u8 ste_location,
514 					      struct list_head *update_list)
515 {
516 	struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
517 	enum mlx5dr_icm_chunk_size new_size;
518 
519 	new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
520 	new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
521 
522 	if (new_size == cur_htbl->chunk->size)
523 		return NULL; /* Skip rehash, we already at the max size */
524 
525 	return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
526 				   update_list, new_size);
527 }
528 
529 static struct mlx5dr_ste *
530 dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
531 			 struct mlx5dr_matcher_rx_tx *nic_matcher,
532 			 struct mlx5dr_ste *ste,
533 			 u8 *hw_ste,
534 			 struct list_head *miss_list,
535 			 struct list_head *send_list)
536 {
537 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
538 	struct mlx5dr_ste_send_info *ste_info;
539 	struct mlx5dr_ste *new_ste;
540 
541 	ste_info = mlx5dr_send_info_alloc(dmn,
542 					  nic_matcher->nic_tbl->nic_dmn->type);
543 	if (!ste_info)
544 		return NULL;
545 
546 	new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
547 	if (!new_ste)
548 		goto free_send_info;
549 
550 	if (dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
551 					new_ste, miss_list, send_list)) {
552 		mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
553 		goto err_exit;
554 	}
555 
556 	mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
557 						  ste_info, send_list, false);
558 
559 	ste->htbl->ctrl.num_of_collisions++;
560 	ste->htbl->ctrl.num_of_valid_entries++;
561 
562 	return new_ste;
563 
564 err_exit:
565 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
566 free_send_info:
567 	mlx5dr_send_info_free(ste_info);
568 	return NULL;
569 }
570 
571 static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
572 {
573 	struct mlx5dr_rule_action_member *action_mem;
574 	struct mlx5dr_rule_action_member *tmp;
575 
576 	list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
577 		list_del(&action_mem->list);
578 		refcount_dec(&action_mem->action->refcount);
579 		kvfree(action_mem);
580 	}
581 }
582 
583 static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
584 				      size_t num_actions,
585 				      struct mlx5dr_action *actions[])
586 {
587 	struct mlx5dr_rule_action_member *action_mem;
588 	int i;
589 
590 	for (i = 0; i < num_actions; i++) {
591 		action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
592 		if (!action_mem)
593 			goto free_action_members;
594 
595 		action_mem->action = actions[i];
596 		INIT_LIST_HEAD(&action_mem->list);
597 		list_add_tail(&action_mem->list, &rule->rule_actions_list);
598 		refcount_inc(&action_mem->action->refcount);
599 	}
600 
601 	return 0;
602 
603 free_action_members:
604 	dr_rule_remove_action_members(rule);
605 	return -ENOMEM;
606 }
607 
608 void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
609 				 struct mlx5dr_ste *ste,
610 				 bool force)
611 {
612 	/* Update rule member is usually done for the last STE or during rule
613 	 * creation to recover from mid-creation failure (for this peruse the
614 	 * force flag is used)
615 	 */
616 	if (ste->next_htbl && !force)
617 		return;
618 
619 	/* Update is required since each rule keeps track of its last STE */
620 	ste->rule_rx_tx = nic_rule;
621 	nic_rule->last_rule_ste = ste;
622 }
623 
624 static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste)
625 {
626 	struct mlx5dr_ste *first_ste;
627 
628 	first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste),
629 				     struct mlx5dr_ste, miss_list_node);
630 
631 	return first_ste->htbl->pointing_ste;
632 }
633 
634 int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
635 					 struct mlx5dr_ste *curr_ste,
636 					 int *num_of_stes)
637 {
638 	bool first = false;
639 
640 	*num_of_stes = 0;
641 
642 	if (!curr_ste)
643 		return -ENOENT;
644 
645 	/* Iterate from last to first */
646 	while (!first) {
647 		first = curr_ste->ste_chain_location == 1;
648 		ste_arr[*num_of_stes] = curr_ste;
649 		*num_of_stes += 1;
650 		curr_ste = dr_rule_get_pointed_ste(curr_ste);
651 	}
652 
653 	return 0;
654 }
655 
656 static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
657 				       struct mlx5dr_rule_rx_tx *nic_rule)
658 {
659 	struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
660 	struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste;
661 	int i;
662 
663 	if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
664 		return;
665 
666 	while (i--)
667 		mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher);
668 }
669 
670 static u16 dr_get_bits_per_mask(u16 byte_mask)
671 {
672 	u16 bits = 0;
673 
674 	while (byte_mask) {
675 		byte_mask = byte_mask & (byte_mask - 1);
676 		bits++;
677 	}
678 
679 	return bits;
680 }
681 
682 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
683 				      struct mlx5dr_domain *dmn,
684 				      struct mlx5dr_domain_rx_tx *nic_dmn)
685 {
686 	struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
687 	int threshold;
688 
689 	if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
690 		return false;
691 
692 	if (!mlx5dr_ste_htbl_may_grow(htbl))
693 		return false;
694 
695 	if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
696 		return false;
697 
698 	threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
699 	if (ctrl->num_of_collisions >= threshold &&
700 	    (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold)
701 		return true;
702 
703 	return false;
704 }
705 
706 static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
707 				      struct mlx5dr_rule_rx_tx *nic_rule,
708 				      struct list_head *send_ste_list,
709 				      struct mlx5dr_ste *last_ste,
710 				      u8 *hw_ste_arr,
711 				      u32 new_hw_ste_arr_sz)
712 {
713 	struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
714 	struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
715 	u8 num_of_builders = nic_matcher->num_of_builders;
716 	struct mlx5dr_matcher *matcher = rule->matcher;
717 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
718 	u8 *curr_hw_ste, *prev_hw_ste;
719 	struct mlx5dr_ste *action_ste;
720 	int i, k;
721 
722 	/* Two cases:
723 	 * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
724 	 * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
725 	 *    to support the action.
726 	 */
727 
728 	for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
729 		curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
730 		prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
731 		action_ste = dr_rule_create_collision_htbl(matcher,
732 							   nic_matcher,
733 							   curr_hw_ste);
734 		if (!action_ste)
735 			return -ENOMEM;
736 
737 		mlx5dr_ste_get(action_ste);
738 
739 		action_ste->htbl->pointing_ste = last_ste;
740 		last_ste->next_htbl = action_ste->htbl;
741 		last_ste = action_ste;
742 
743 		/* While free ste we go over the miss list, so add this ste to the list */
744 		list_add_tail(&action_ste->miss_list_node,
745 			      mlx5dr_ste_get_miss_list(action_ste));
746 
747 		ste_info_arr[k] = mlx5dr_send_info_alloc(dmn,
748 							 nic_matcher->nic_tbl->nic_dmn->type);
749 		if (!ste_info_arr[k])
750 			goto err_exit;
751 
752 		/* Point current ste to the new action */
753 		mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
754 						     prev_hw_ste,
755 						     action_ste->htbl);
756 
757 		mlx5dr_rule_set_last_member(nic_rule, action_ste, true);
758 
759 		mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
760 							  curr_hw_ste,
761 							  ste_info_arr[k],
762 							  send_ste_list, false);
763 	}
764 
765 	last_ste->next_htbl = NULL;
766 
767 	return 0;
768 
769 err_exit:
770 	mlx5dr_ste_put(action_ste, matcher, nic_matcher);
771 	return -ENOMEM;
772 }
773 
774 static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
775 				      struct mlx5dr_matcher_rx_tx *nic_matcher,
776 				      struct mlx5dr_ste_htbl *cur_htbl,
777 				      struct mlx5dr_ste *ste,
778 				      u8 ste_location,
779 				      u8 *hw_ste,
780 				      struct list_head *miss_list,
781 				      struct list_head *send_list)
782 {
783 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
784 	struct mlx5dr_ste_send_info *ste_info;
785 
786 	/* Take ref on table, only on first time this ste is used */
787 	mlx5dr_htbl_get(cur_htbl);
788 
789 	/* new entry -> new branch */
790 	list_add_tail(&ste->miss_list_node, miss_list);
791 
792 	dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
793 
794 	ste->ste_chain_location = ste_location;
795 
796 	ste_info = mlx5dr_send_info_alloc(dmn,
797 					  nic_matcher->nic_tbl->nic_dmn->type);
798 	if (!ste_info)
799 		goto clean_ste_setting;
800 
801 	if (mlx5dr_ste_create_next_htbl(matcher,
802 					nic_matcher,
803 					ste,
804 					hw_ste,
805 					DR_CHUNK_SIZE_1)) {
806 		mlx5dr_dbg(dmn, "Failed allocating table\n");
807 		goto clean_ste_info;
808 	}
809 
810 	cur_htbl->ctrl.num_of_valid_entries++;
811 
812 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
813 						  ste_info, send_list, false);
814 
815 	return 0;
816 
817 clean_ste_info:
818 	mlx5dr_send_info_free(ste_info);
819 clean_ste_setting:
820 	list_del_init(&ste->miss_list_node);
821 	mlx5dr_htbl_put(cur_htbl);
822 
823 	return -ENOMEM;
824 }
825 
826 static struct mlx5dr_ste *
827 dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
828 			  struct mlx5dr_rule_rx_tx *nic_rule,
829 			  struct list_head *send_ste_list,
830 			  struct mlx5dr_ste_htbl *cur_htbl,
831 			  u8 *hw_ste,
832 			  u8 ste_location,
833 			  struct mlx5dr_ste_htbl **put_htbl)
834 {
835 	struct mlx5dr_matcher *matcher = rule->matcher;
836 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
837 	struct mlx5dr_matcher_rx_tx *nic_matcher;
838 	struct mlx5dr_domain_rx_tx *nic_dmn;
839 	struct mlx5dr_ste_htbl *new_htbl;
840 	struct mlx5dr_ste *matched_ste;
841 	struct list_head *miss_list;
842 	bool skip_rehash = false;
843 	struct mlx5dr_ste *ste;
844 	int index;
845 
846 	nic_matcher = nic_rule->nic_matcher;
847 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
848 
849 again:
850 	index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
851 	miss_list = &cur_htbl->chunk->miss_list[index];
852 	ste = &cur_htbl->chunk->ste_arr[index];
853 
854 	if (mlx5dr_ste_is_not_used(ste)) {
855 		if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
856 					       ste, ste_location,
857 					       hw_ste, miss_list,
858 					       send_ste_list))
859 			return NULL;
860 	} else {
861 		/* Hash table index in use, check if this ste is in the miss list */
862 		matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
863 		if (matched_ste) {
864 			/* If it is last STE in the chain, and has the same tag
865 			 * it means that all the previous stes are the same,
866 			 * if so, this rule is duplicated.
867 			 */
868 			if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
869 				return matched_ste;
870 
871 			mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
872 		}
873 
874 		if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
875 			/* Hash table index in use, try to resize of the hash */
876 			skip_rehash = true;
877 
878 			/* Hold the table till we update.
879 			 * Release in dr_rule_create_rule()
880 			 */
881 			*put_htbl = cur_htbl;
882 			mlx5dr_htbl_get(cur_htbl);
883 
884 			new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
885 						  ste_location, send_ste_list);
886 			if (!new_htbl) {
887 				mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
888 					   cur_htbl->chunk->size);
889 				mlx5dr_htbl_put(cur_htbl);
890 			} else {
891 				cur_htbl = new_htbl;
892 			}
893 			goto again;
894 		} else {
895 			/* Hash table index in use, add another collision (miss) */
896 			ste = dr_rule_handle_collision(matcher,
897 						       nic_matcher,
898 						       ste,
899 						       hw_ste,
900 						       miss_list,
901 						       send_ste_list);
902 			if (!ste) {
903 				mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
904 					   index);
905 				return NULL;
906 			}
907 		}
908 	}
909 	return ste;
910 }
911 
912 static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
913 				      u32 s_idx, u32 e_idx)
914 {
915 	u32 i;
916 
917 	for (i = s_idx; i < e_idx; i++) {
918 		if (value[i] & ~mask[i]) {
919 			pr_info("Rule parameters contains a value not specified by mask\n");
920 			return false;
921 		}
922 	}
923 	return true;
924 }
925 
926 static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
927 			   struct mlx5dr_match_parameters *value,
928 			   struct mlx5dr_match_param *param)
929 {
930 	u8 match_criteria = matcher->match_criteria;
931 	size_t value_size = value->match_sz;
932 	u8 *mask_p = (u8 *)&matcher->mask;
933 	u8 *param_p = (u8 *)param;
934 	u32 s_idx, e_idx;
935 
936 	if (!value_size ||
937 	    (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
938 		mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
939 		return false;
940 	}
941 
942 	mlx5dr_ste_copy_param(matcher->match_criteria, param, value, false);
943 
944 	if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
945 		s_idx = offsetof(struct mlx5dr_match_param, outer);
946 		e_idx = min(s_idx + sizeof(param->outer), value_size);
947 
948 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
949 			mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
950 			return false;
951 		}
952 	}
953 
954 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
955 		s_idx = offsetof(struct mlx5dr_match_param, misc);
956 		e_idx = min(s_idx + sizeof(param->misc), value_size);
957 
958 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
959 			mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
960 			return false;
961 		}
962 	}
963 
964 	if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
965 		s_idx = offsetof(struct mlx5dr_match_param, inner);
966 		e_idx = min(s_idx + sizeof(param->inner), value_size);
967 
968 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
969 			mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
970 			return false;
971 		}
972 	}
973 
974 	if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
975 		s_idx = offsetof(struct mlx5dr_match_param, misc2);
976 		e_idx = min(s_idx + sizeof(param->misc2), value_size);
977 
978 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
979 			mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
980 			return false;
981 		}
982 	}
983 
984 	if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
985 		s_idx = offsetof(struct mlx5dr_match_param, misc3);
986 		e_idx = min(s_idx + sizeof(param->misc3), value_size);
987 
988 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
989 			mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
990 			return false;
991 		}
992 	}
993 
994 	if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
995 		s_idx = offsetof(struct mlx5dr_match_param, misc4);
996 		e_idx = min(s_idx + sizeof(param->misc4), value_size);
997 
998 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
999 			mlx5dr_err(matcher->tbl->dmn,
1000 				   "Rule misc4 parameters contains a value not specified by mask\n");
1001 			return false;
1002 		}
1003 	}
1004 
1005 	if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
1006 		s_idx = offsetof(struct mlx5dr_match_param, misc5);
1007 		e_idx = min(s_idx + sizeof(param->misc5), value_size);
1008 
1009 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
1010 			mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n");
1011 			return false;
1012 		}
1013 	}
1014 	return true;
1015 }
1016 
1017 static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
1018 				    struct mlx5dr_rule_rx_tx *nic_rule)
1019 {
1020 	/* Check if this nic rule was actually created, or was it skipped
1021 	 * and only the other type of the RX/TX nic rule was created.
1022 	 */
1023 	if (!nic_rule->last_rule_ste)
1024 		return 0;
1025 
1026 	mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
1027 	dr_rule_clean_rule_members(rule, nic_rule);
1028 
1029 	nic_rule->nic_matcher->rules--;
1030 	if (!nic_rule->nic_matcher->rules)
1031 		mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn,
1032 						   nic_rule->nic_matcher);
1033 
1034 	mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
1035 
1036 	return 0;
1037 }
1038 
1039 static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
1040 {
1041 	dr_rule_destroy_rule_nic(rule, &rule->rx);
1042 	dr_rule_destroy_rule_nic(rule, &rule->tx);
1043 	return 0;
1044 }
1045 
1046 static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
1047 {
1048 	struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
1049 
1050 	mlx5dr_dbg_rule_del(rule);
1051 
1052 	switch (dmn->type) {
1053 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
1054 		dr_rule_destroy_rule_nic(rule, &rule->rx);
1055 		break;
1056 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
1057 		dr_rule_destroy_rule_nic(rule, &rule->tx);
1058 		break;
1059 	case MLX5DR_DOMAIN_TYPE_FDB:
1060 		dr_rule_destroy_rule_fdb(rule);
1061 		break;
1062 	default:
1063 		return -EINVAL;
1064 	}
1065 
1066 	dr_rule_remove_action_members(rule);
1067 	kfree(rule);
1068 	return 0;
1069 }
1070 
1071 static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
1072 {
1073 	if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
1074 		return DR_RULE_IPV6;
1075 
1076 	return DR_RULE_IPV4;
1077 }
1078 
1079 static bool dr_rule_skip(enum mlx5dr_domain_type domain,
1080 			 enum mlx5dr_domain_nic_type nic_type,
1081 			 struct mlx5dr_match_param *mask,
1082 			 struct mlx5dr_match_param *value,
1083 			 u32 flow_source)
1084 {
1085 	bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
1086 
1087 	if (domain != MLX5DR_DOMAIN_TYPE_FDB)
1088 		return false;
1089 
1090 	if (mask->misc.source_port) {
1091 		if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
1092 			return true;
1093 
1094 		if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
1095 			return true;
1096 	}
1097 
1098 	if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
1099 		return true;
1100 
1101 	if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
1102 		return true;
1103 
1104 	return false;
1105 }
1106 
1107 static int
1108 dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1109 			struct mlx5dr_rule_rx_tx *nic_rule,
1110 			struct mlx5dr_match_param *param,
1111 			size_t num_actions,
1112 			struct mlx5dr_action *actions[])
1113 {
1114 	u8 hw_ste_arr_optimized[DR_RULE_MAX_STE_CHAIN_OPTIMIZED * DR_STE_SIZE] = {};
1115 	struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
1116 	struct mlx5dr_matcher *matcher = rule->matcher;
1117 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1118 	struct mlx5dr_matcher_rx_tx *nic_matcher;
1119 	struct mlx5dr_domain_rx_tx *nic_dmn;
1120 	struct mlx5dr_ste_htbl *htbl = NULL;
1121 	struct mlx5dr_ste_htbl *cur_htbl;
1122 	struct mlx5dr_ste *ste = NULL;
1123 	LIST_HEAD(send_ste_list);
1124 	bool hw_ste_arr_is_opt;
1125 	u8 *hw_ste_arr = NULL;
1126 	u32 new_hw_ste_arr_sz;
1127 	int ret, i;
1128 
1129 	nic_matcher = nic_rule->nic_matcher;
1130 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
1131 
1132 	if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param,
1133 			 rule->flow_source))
1134 		return 0;
1135 
1136 	ret = mlx5dr_matcher_select_builders(matcher,
1137 					     nic_matcher,
1138 					     dr_rule_get_ipv(&param->outer),
1139 					     dr_rule_get_ipv(&param->inner));
1140 	if (ret)
1141 		return ret;
1142 
1143 	hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
1144 	if (likely(hw_ste_arr_is_opt)) {
1145 		hw_ste_arr = hw_ste_arr_optimized;
1146 	} else {
1147 		hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
1148 				     DR_STE_SIZE, GFP_KERNEL);
1149 
1150 		if (!hw_ste_arr)
1151 			return -ENOMEM;
1152 	}
1153 
1154 	mlx5dr_domain_nic_lock(nic_dmn);
1155 
1156 	ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
1157 	if (ret)
1158 		goto free_hw_ste;
1159 
1160 	/* Set the tag values inside the ste array */
1161 	ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
1162 	if (ret)
1163 		goto remove_from_nic_tbl;
1164 
1165 	/* Set the actions values/addresses inside the ste array */
1166 	ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
1167 					   num_actions, hw_ste_arr,
1168 					   &new_hw_ste_arr_sz);
1169 	if (ret)
1170 		goto remove_from_nic_tbl;
1171 
1172 	cur_htbl = nic_matcher->s_htbl;
1173 
1174 	/* Go over the array of STEs, and build dr_ste accordingly.
1175 	 * The loop is over only the builders which are equal or less to the
1176 	 * number of stes, in case we have actions that lives in other stes.
1177 	 */
1178 	for (i = 0; i < nic_matcher->num_of_builders; i++) {
1179 		/* Calculate CRC and keep new ste entry */
1180 		u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
1181 
1182 		ste = dr_rule_handle_ste_branch(rule,
1183 						nic_rule,
1184 						&send_ste_list,
1185 						cur_htbl,
1186 						cur_hw_ste_ent,
1187 						i + 1,
1188 						&htbl);
1189 		if (!ste) {
1190 			mlx5dr_err(dmn, "Failed creating next branch\n");
1191 			ret = -ENOENT;
1192 			goto free_rule;
1193 		}
1194 
1195 		cur_htbl = ste->next_htbl;
1196 
1197 		mlx5dr_ste_get(ste);
1198 		mlx5dr_rule_set_last_member(nic_rule, ste, true);
1199 	}
1200 
1201 	/* Connect actions */
1202 	ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
1203 					 ste, hw_ste_arr, new_hw_ste_arr_sz);
1204 	if (ret) {
1205 		mlx5dr_dbg(dmn, "Failed apply actions\n");
1206 		goto free_rule;
1207 	}
1208 	ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
1209 	if (ret) {
1210 		mlx5dr_err(dmn, "Failed sending ste!\n");
1211 		goto free_rule;
1212 	}
1213 
1214 	if (htbl)
1215 		mlx5dr_htbl_put(htbl);
1216 
1217 	nic_matcher->rules++;
1218 
1219 	mlx5dr_domain_nic_unlock(nic_dmn);
1220 
1221 	if (unlikely(!hw_ste_arr_is_opt))
1222 		kfree(hw_ste_arr);
1223 
1224 	return 0;
1225 
1226 free_rule:
1227 	dr_rule_clean_rule_members(rule, nic_rule);
1228 	/* Clean all ste_info's */
1229 	list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
1230 		list_del(&ste_info->send_list);
1231 		mlx5dr_send_info_free(ste_info);
1232 	}
1233 
1234 remove_from_nic_tbl:
1235 	if (!nic_matcher->rules)
1236 		mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
1237 
1238 free_hw_ste:
1239 	mlx5dr_domain_nic_unlock(nic_dmn);
1240 
1241 	if (unlikely(!hw_ste_arr_is_opt))
1242 		kfree(hw_ste_arr);
1243 
1244 	return ret;
1245 }
1246 
1247 static int
1248 dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
1249 			struct mlx5dr_match_param *param,
1250 			size_t num_actions,
1251 			struct mlx5dr_action *actions[])
1252 {
1253 	struct mlx5dr_match_param copy_param = {};
1254 	int ret;
1255 
1256 	/* Copy match_param since they will be consumed during the first
1257 	 * nic_rule insertion.
1258 	 */
1259 	memcpy(&copy_param, param, sizeof(struct mlx5dr_match_param));
1260 
1261 	ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
1262 				      num_actions, actions);
1263 	if (ret)
1264 		return ret;
1265 
1266 	ret = dr_rule_create_rule_nic(rule, &rule->tx, &copy_param,
1267 				      num_actions, actions);
1268 	if (ret)
1269 		goto destroy_rule_nic_rx;
1270 
1271 	return 0;
1272 
1273 destroy_rule_nic_rx:
1274 	dr_rule_destroy_rule_nic(rule, &rule->rx);
1275 	return ret;
1276 }
1277 
1278 static struct mlx5dr_rule *
1279 dr_rule_create_rule(struct mlx5dr_matcher *matcher,
1280 		    struct mlx5dr_match_parameters *value,
1281 		    size_t num_actions,
1282 		    struct mlx5dr_action *actions[],
1283 		    u32 flow_source)
1284 {
1285 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1286 	struct mlx5dr_match_param param = {};
1287 	struct mlx5dr_rule *rule;
1288 	int ret;
1289 
1290 	if (!dr_rule_verify(matcher, value, &param))
1291 		return NULL;
1292 
1293 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1294 	if (!rule)
1295 		return NULL;
1296 
1297 	rule->matcher = matcher;
1298 	rule->flow_source = flow_source;
1299 	INIT_LIST_HEAD(&rule->rule_actions_list);
1300 
1301 	ret = dr_rule_add_action_members(rule, num_actions, actions);
1302 	if (ret)
1303 		goto free_rule;
1304 
1305 	switch (dmn->type) {
1306 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
1307 		rule->rx.nic_matcher = &matcher->rx;
1308 		ret = dr_rule_create_rule_nic(rule, &rule->rx, &param,
1309 					      num_actions, actions);
1310 		break;
1311 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
1312 		rule->tx.nic_matcher = &matcher->tx;
1313 		ret = dr_rule_create_rule_nic(rule, &rule->tx, &param,
1314 					      num_actions, actions);
1315 		break;
1316 	case MLX5DR_DOMAIN_TYPE_FDB:
1317 		rule->rx.nic_matcher = &matcher->rx;
1318 		rule->tx.nic_matcher = &matcher->tx;
1319 		ret = dr_rule_create_rule_fdb(rule, &param,
1320 					      num_actions, actions);
1321 		break;
1322 	default:
1323 		ret = -EINVAL;
1324 		break;
1325 	}
1326 
1327 	if (ret)
1328 		goto remove_action_members;
1329 
1330 	INIT_LIST_HEAD(&rule->dbg_node);
1331 	mlx5dr_dbg_rule_add(rule);
1332 	return rule;
1333 
1334 remove_action_members:
1335 	dr_rule_remove_action_members(rule);
1336 free_rule:
1337 	kfree(rule);
1338 	mlx5dr_err(dmn, "Failed creating rule\n");
1339 	return NULL;
1340 }
1341 
1342 struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
1343 				       struct mlx5dr_match_parameters *value,
1344 				       size_t num_actions,
1345 				       struct mlx5dr_action *actions[],
1346 				       u32 flow_source)
1347 {
1348 	struct mlx5dr_rule *rule;
1349 
1350 	refcount_inc(&matcher->refcount);
1351 
1352 	rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
1353 	if (!rule)
1354 		refcount_dec(&matcher->refcount);
1355 
1356 	return rule;
1357 }
1358 
1359 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
1360 {
1361 	struct mlx5dr_matcher *matcher = rule->matcher;
1362 	int ret;
1363 
1364 	ret = dr_rule_destroy_rule(rule);
1365 	if (!ret)
1366 		refcount_dec(&matcher->refcount);
1367 
1368 	return ret;
1369 }
1370