1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 #define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
7 
8 static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
9 				       struct mlx5dr_ste *new_last_ste,
10 				       struct list_head *miss_list,
11 				       struct list_head *send_list)
12 {
13 	struct mlx5dr_ste_send_info *ste_info_last;
14 	struct mlx5dr_ste *last_ste;
15 
16 	/* The new entry will be inserted after the last */
17 	last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
18 	WARN_ON(!last_ste);
19 
20 	ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
21 	if (!ste_info_last)
22 		return -ENOMEM;
23 
24 	mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
25 				 mlx5dr_ste_get_icm_addr(new_last_ste));
26 	list_add_tail(&new_last_ste->miss_list_node, miss_list);
27 
28 	mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
29 						  0, last_ste->hw_ste,
30 						  ste_info_last, send_list, true);
31 
32 	return 0;
33 }
34 
35 static struct mlx5dr_ste *
36 dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
37 			      struct mlx5dr_matcher_rx_tx *nic_matcher,
38 			      u8 *hw_ste)
39 {
40 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
41 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
42 	struct mlx5dr_ste_htbl *new_htbl;
43 	struct mlx5dr_ste *ste;
44 	u64 icm_addr;
45 
46 	/* Create new table for miss entry */
47 	new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
48 					 DR_CHUNK_SIZE_1,
49 					 MLX5DR_STE_LU_TYPE_DONT_CARE,
50 					 0);
51 	if (!new_htbl) {
52 		mlx5dr_dbg(dmn, "Failed allocating collision table\n");
53 		return NULL;
54 	}
55 
56 	/* One and only entry, never grows */
57 	ste = new_htbl->chunk->ste_arr;
58 	icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
59 	mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
60 	mlx5dr_htbl_get(new_htbl);
61 
62 	return ste;
63 }
64 
65 static struct mlx5dr_ste *
66 dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
67 			       struct mlx5dr_matcher_rx_tx *nic_matcher,
68 			       u8 *hw_ste,
69 			       struct mlx5dr_ste *orig_ste)
70 {
71 	struct mlx5dr_ste *ste;
72 
73 	ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
74 	if (!ste) {
75 		mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
76 		return NULL;
77 	}
78 
79 	ste->ste_chain_location = orig_ste->ste_chain_location;
80 	ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
81 
82 	/* In collision entry, all members share the same miss_list_head */
83 	ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
84 
85 	/* Next table */
86 	if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
87 					DR_CHUNK_SIZE_1)) {
88 		mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
89 		goto free_tbl;
90 	}
91 
92 	return ste;
93 
94 free_tbl:
95 	mlx5dr_ste_free(ste, matcher, nic_matcher);
96 	return NULL;
97 }
98 
99 static int
100 dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
101 				      struct mlx5dr_domain *dmn)
102 {
103 	int ret;
104 
105 	list_del(&ste_info->send_list);
106 
107 	/* Copy data to ste, only reduced size or control, the last 16B (mask)
108 	 * is already written to the hw.
109 	 */
110 	if (ste_info->size == DR_STE_SIZE_CTRL)
111 		memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL);
112 	else
113 		memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
114 
115 	ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
116 				       ste_info->size, ste_info->offset);
117 	if (ret)
118 		goto out;
119 
120 out:
121 	kfree(ste_info);
122 	return ret;
123 }
124 
125 static int dr_rule_send_update_list(struct list_head *send_ste_list,
126 				    struct mlx5dr_domain *dmn,
127 				    bool is_reverse)
128 {
129 	struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
130 	int ret;
131 
132 	if (is_reverse) {
133 		list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
134 						 send_ste_list, send_list) {
135 			ret = dr_rule_handle_one_ste_in_update_list(ste_info,
136 								    dmn);
137 			if (ret)
138 				return ret;
139 		}
140 	} else {
141 		list_for_each_entry_safe(ste_info, tmp_ste_info,
142 					 send_ste_list, send_list) {
143 			ret = dr_rule_handle_one_ste_in_update_list(ste_info,
144 								    dmn);
145 			if (ret)
146 				return ret;
147 		}
148 	}
149 
150 	return 0;
151 }
152 
153 static struct mlx5dr_ste *
154 dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
155 {
156 	struct mlx5dr_ste *ste;
157 
158 	if (list_empty(miss_list))
159 		return NULL;
160 
161 	/* Check if hw_ste is present in the list */
162 	list_for_each_entry(ste, miss_list, miss_list_node) {
163 		if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
164 			return ste;
165 	}
166 
167 	return NULL;
168 }
169 
170 static struct mlx5dr_ste *
171 dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
172 				struct mlx5dr_matcher_rx_tx *nic_matcher,
173 				struct list_head *update_list,
174 				struct mlx5dr_ste *col_ste,
175 				u8 *hw_ste)
176 {
177 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
178 	struct mlx5dr_ste *new_ste;
179 	int ret;
180 
181 	new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
182 	if (!new_ste)
183 		return NULL;
184 
185 	/* Update collision pointing STE */
186 	new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
187 
188 	/* In collision entry, all members share the same miss_list_head */
189 	new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
190 
191 	/* Update the previous from the list */
192 	ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
193 					  mlx5dr_ste_get_miss_list(col_ste),
194 					  update_list);
195 	if (ret) {
196 		mlx5dr_dbg(dmn, "Failed update dup entry\n");
197 		goto err_exit;
198 	}
199 
200 	return new_ste;
201 
202 err_exit:
203 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
204 	return NULL;
205 }
206 
207 static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
208 					 struct mlx5dr_matcher_rx_tx *nic_matcher,
209 					 struct mlx5dr_ste *cur_ste,
210 					 struct mlx5dr_ste *new_ste)
211 {
212 	new_ste->next_htbl = cur_ste->next_htbl;
213 	new_ste->ste_chain_location = cur_ste->ste_chain_location;
214 
215 	if (new_ste->next_htbl)
216 		new_ste->next_htbl->pointing_ste = new_ste;
217 
218 	/* We need to copy the refcount since this ste
219 	 * may have been traversed several times
220 	 */
221 	new_ste->refcount = cur_ste->refcount;
222 
223 	/* Link old STEs rule to the new ste */
224 	mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false);
225 }
226 
227 static struct mlx5dr_ste *
228 dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
229 			struct mlx5dr_matcher_rx_tx *nic_matcher,
230 			struct mlx5dr_ste *cur_ste,
231 			struct mlx5dr_ste_htbl *new_htbl,
232 			struct list_head *update_list)
233 {
234 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
235 	struct mlx5dr_ste_send_info *ste_info;
236 	bool use_update_list = false;
237 	u8 hw_ste[DR_STE_SIZE] = {};
238 	struct mlx5dr_ste *new_ste;
239 	u64 icm_addr;
240 	int new_idx;
241 	u8 sb_idx;
242 
243 	/* Copy STE mask from the matcher */
244 	sb_idx = cur_ste->ste_chain_location - 1;
245 	mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
246 
247 	/* Copy STE control and tag */
248 	icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
249 	memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
250 	mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
251 
252 	new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
253 	new_ste = &new_htbl->chunk->ste_arr[new_idx];
254 
255 	if (mlx5dr_ste_is_not_used(new_ste)) {
256 		mlx5dr_htbl_get(new_htbl);
257 		list_add_tail(&new_ste->miss_list_node,
258 			      mlx5dr_ste_get_miss_list(new_ste));
259 	} else {
260 		new_ste = dr_rule_rehash_handle_collision(matcher,
261 							  nic_matcher,
262 							  update_list,
263 							  new_ste,
264 							  hw_ste);
265 		if (!new_ste) {
266 			mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
267 				   new_idx);
268 			return NULL;
269 		}
270 		new_htbl->ctrl.num_of_collisions++;
271 		use_update_list = true;
272 	}
273 
274 	memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
275 
276 	new_htbl->ctrl.num_of_valid_entries++;
277 
278 	if (use_update_list) {
279 		ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
280 		if (!ste_info)
281 			goto err_exit;
282 
283 		mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
284 							  hw_ste, ste_info,
285 							  update_list, true);
286 	}
287 
288 	dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
289 
290 	return new_ste;
291 
292 err_exit:
293 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
294 	return NULL;
295 }
296 
297 static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
298 					 struct mlx5dr_matcher_rx_tx *nic_matcher,
299 					 struct list_head *cur_miss_list,
300 					 struct mlx5dr_ste_htbl *new_htbl,
301 					 struct list_head *update_list)
302 {
303 	struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
304 
305 	if (list_empty(cur_miss_list))
306 		return 0;
307 
308 	list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
309 		new_ste = dr_rule_rehash_copy_ste(matcher,
310 						  nic_matcher,
311 						  cur_ste,
312 						  new_htbl,
313 						  update_list);
314 		if (!new_ste)
315 			goto err_insert;
316 
317 		list_del(&cur_ste->miss_list_node);
318 		mlx5dr_htbl_put(cur_ste->htbl);
319 	}
320 	return 0;
321 
322 err_insert:
323 	mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
324 	WARN_ON(true);
325 	return -EINVAL;
326 }
327 
328 static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
329 				    struct mlx5dr_matcher_rx_tx *nic_matcher,
330 				    struct mlx5dr_ste_htbl *cur_htbl,
331 				    struct mlx5dr_ste_htbl *new_htbl,
332 				    struct list_head *update_list)
333 {
334 	struct mlx5dr_ste *cur_ste;
335 	int cur_entries;
336 	int err = 0;
337 	int i;
338 
339 	cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
340 
341 	if (cur_entries < 1) {
342 		mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
343 		return -EINVAL;
344 	}
345 
346 	for (i = 0; i < cur_entries; i++) {
347 		cur_ste = &cur_htbl->chunk->ste_arr[i];
348 		if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
349 			continue;
350 
351 		err = dr_rule_rehash_copy_miss_list(matcher,
352 						    nic_matcher,
353 						    mlx5dr_ste_get_miss_list(cur_ste),
354 						    new_htbl,
355 						    update_list);
356 		if (err)
357 			goto clean_copy;
358 	}
359 
360 clean_copy:
361 	return err;
362 }
363 
364 static struct mlx5dr_ste_htbl *
365 dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
366 		    struct mlx5dr_rule_rx_tx *nic_rule,
367 		    struct mlx5dr_ste_htbl *cur_htbl,
368 		    u8 ste_location,
369 		    struct list_head *update_list,
370 		    enum mlx5dr_icm_chunk_size new_size)
371 {
372 	struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
373 	struct mlx5dr_matcher *matcher = rule->matcher;
374 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
375 	struct mlx5dr_matcher_rx_tx *nic_matcher;
376 	struct mlx5dr_ste_send_info *ste_info;
377 	struct mlx5dr_htbl_connect_info info;
378 	struct mlx5dr_domain_rx_tx *nic_dmn;
379 	u8 formatted_ste[DR_STE_SIZE] = {};
380 	LIST_HEAD(rehash_table_send_list);
381 	struct mlx5dr_ste *ste_to_update;
382 	struct mlx5dr_ste_htbl *new_htbl;
383 	int err;
384 
385 	nic_matcher = nic_rule->nic_matcher;
386 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
387 
388 	ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
389 	if (!ste_info)
390 		return NULL;
391 
392 	new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
393 					 new_size,
394 					 cur_htbl->lu_type,
395 					 cur_htbl->byte_mask);
396 	if (!new_htbl) {
397 		mlx5dr_err(dmn, "Failed to allocate new hash table\n");
398 		goto free_ste_info;
399 	}
400 
401 	/* Write new table to HW */
402 	info.type = CONNECT_MISS;
403 	info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
404 	mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
405 				     dmn->info.caps.gvmi,
406 				     nic_dmn->type,
407 				     new_htbl,
408 				     formatted_ste,
409 				     &info);
410 
411 	new_htbl->pointing_ste = cur_htbl->pointing_ste;
412 	new_htbl->pointing_ste->next_htbl = new_htbl;
413 	err = dr_rule_rehash_copy_htbl(matcher,
414 				       nic_matcher,
415 				       cur_htbl,
416 				       new_htbl,
417 				       &rehash_table_send_list);
418 	if (err)
419 		goto free_new_htbl;
420 
421 	if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
422 				      nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
423 		mlx5dr_err(dmn, "Failed writing table to HW\n");
424 		goto free_new_htbl;
425 	}
426 
427 	/* Writing to the hw is done in regular order of rehash_table_send_list,
428 	 * in order to have the origin data written before the miss address of
429 	 * collision entries, if exists.
430 	 */
431 	if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
432 		mlx5dr_err(dmn, "Failed updating table to HW\n");
433 		goto free_ste_list;
434 	}
435 
436 	/* Connect previous hash table to current */
437 	if (ste_location == 1) {
438 		/* The previous table is an anchor, anchors size is always one STE */
439 		struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
440 
441 		/* On matcher s_anchor we keep an extra refcount */
442 		mlx5dr_htbl_get(new_htbl);
443 		mlx5dr_htbl_put(cur_htbl);
444 
445 		nic_matcher->s_htbl = new_htbl;
446 
447 		/* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
448 		 * (48B len) which works only on first 32B
449 		 */
450 		mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
451 					prev_htbl->chunk->ste_arr[0].hw_ste,
452 					mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
453 					mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
454 
455 		ste_to_update = &prev_htbl->chunk->ste_arr[0];
456 	} else {
457 		mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
458 						     cur_htbl->pointing_ste->hw_ste,
459 						     new_htbl);
460 		ste_to_update = cur_htbl->pointing_ste;
461 	}
462 
463 	mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
464 						  0, ste_to_update->hw_ste, ste_info,
465 						  update_list, false);
466 
467 	return new_htbl;
468 
469 free_ste_list:
470 	/* Clean all ste_info's from the new table */
471 	list_for_each_entry_safe(del_ste_info, tmp_ste_info,
472 				 &rehash_table_send_list, send_list) {
473 		list_del(&del_ste_info->send_list);
474 		kfree(del_ste_info);
475 	}
476 
477 free_new_htbl:
478 	mlx5dr_ste_htbl_free(new_htbl);
479 free_ste_info:
480 	kfree(ste_info);
481 	mlx5dr_info(dmn, "Failed creating rehash table\n");
482 	return NULL;
483 }
484 
485 static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
486 					      struct mlx5dr_rule_rx_tx *nic_rule,
487 					      struct mlx5dr_ste_htbl *cur_htbl,
488 					      u8 ste_location,
489 					      struct list_head *update_list)
490 {
491 	struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
492 	enum mlx5dr_icm_chunk_size new_size;
493 
494 	new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
495 	new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
496 
497 	if (new_size == cur_htbl->chunk->size)
498 		return NULL; /* Skip rehash, we already at the max size */
499 
500 	return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
501 				   update_list, new_size);
502 }
503 
504 static struct mlx5dr_ste *
505 dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
506 			 struct mlx5dr_matcher_rx_tx *nic_matcher,
507 			 struct mlx5dr_ste *ste,
508 			 u8 *hw_ste,
509 			 struct list_head *miss_list,
510 			 struct list_head *send_list)
511 {
512 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
513 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
514 	struct mlx5dr_ste_send_info *ste_info;
515 	struct mlx5dr_ste *new_ste;
516 
517 	ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
518 	if (!ste_info)
519 		return NULL;
520 
521 	new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
522 	if (!new_ste)
523 		goto free_send_info;
524 
525 	if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
526 					miss_list, send_list)) {
527 		mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
528 		goto err_exit;
529 	}
530 
531 	mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
532 						  ste_info, send_list, false);
533 
534 	ste->htbl->ctrl.num_of_collisions++;
535 	ste->htbl->ctrl.num_of_valid_entries++;
536 
537 	return new_ste;
538 
539 err_exit:
540 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
541 free_send_info:
542 	kfree(ste_info);
543 	return NULL;
544 }
545 
546 static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
547 {
548 	struct mlx5dr_rule_action_member *action_mem;
549 	struct mlx5dr_rule_action_member *tmp;
550 
551 	list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
552 		list_del(&action_mem->list);
553 		refcount_dec(&action_mem->action->refcount);
554 		kvfree(action_mem);
555 	}
556 }
557 
558 static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
559 				      size_t num_actions,
560 				      struct mlx5dr_action *actions[])
561 {
562 	struct mlx5dr_rule_action_member *action_mem;
563 	int i;
564 
565 	for (i = 0; i < num_actions; i++) {
566 		action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
567 		if (!action_mem)
568 			goto free_action_members;
569 
570 		action_mem->action = actions[i];
571 		INIT_LIST_HEAD(&action_mem->list);
572 		list_add_tail(&action_mem->list, &rule->rule_actions_list);
573 		refcount_inc(&action_mem->action->refcount);
574 	}
575 
576 	return 0;
577 
578 free_action_members:
579 	dr_rule_remove_action_members(rule);
580 	return -ENOMEM;
581 }
582 
583 void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
584 				 struct mlx5dr_ste *ste,
585 				 bool force)
586 {
587 	/* Update rule member is usually done for the last STE or during rule
588 	 * creation to recover from mid-creation failure (for this peruse the
589 	 * force flag is used)
590 	 */
591 	if (ste->next_htbl && !force)
592 		return;
593 
594 	/* Update is required since each rule keeps track of its last STE */
595 	ste->rule_rx_tx = nic_rule;
596 	nic_rule->last_rule_ste = ste;
597 }
598 
599 static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste)
600 {
601 	struct mlx5dr_ste *first_ste;
602 
603 	first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste),
604 				     struct mlx5dr_ste, miss_list_node);
605 
606 	return first_ste->htbl->pointing_ste;
607 }
608 
609 int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
610 					 struct mlx5dr_ste *curr_ste,
611 					 int *num_of_stes)
612 {
613 	bool first = false;
614 
615 	*num_of_stes = 0;
616 
617 	if (!curr_ste)
618 		return -ENOENT;
619 
620 	/* Iterate from last to first */
621 	while (!first) {
622 		first = curr_ste->ste_chain_location == 1;
623 		ste_arr[*num_of_stes] = curr_ste;
624 		*num_of_stes += 1;
625 		curr_ste = dr_rule_get_pointed_ste(curr_ste);
626 	}
627 
628 	return 0;
629 }
630 
631 static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
632 				       struct mlx5dr_rule_rx_tx *nic_rule)
633 {
634 	struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
635 	struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste;
636 	int i;
637 
638 	if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
639 		return;
640 
641 	while (i--)
642 		mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher);
643 }
644 
645 static u16 dr_get_bits_per_mask(u16 byte_mask)
646 {
647 	u16 bits = 0;
648 
649 	while (byte_mask) {
650 		byte_mask = byte_mask & (byte_mask - 1);
651 		bits++;
652 	}
653 
654 	return bits;
655 }
656 
657 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
658 				      struct mlx5dr_domain *dmn,
659 				      struct mlx5dr_domain_rx_tx *nic_dmn)
660 {
661 	struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
662 	int threshold;
663 
664 	if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
665 		return false;
666 
667 	if (!mlx5dr_ste_htbl_may_grow(htbl))
668 		return false;
669 
670 	if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
671 		return false;
672 
673 	threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
674 	if (ctrl->num_of_collisions >= threshold &&
675 	    (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold)
676 		return true;
677 
678 	return false;
679 }
680 
681 static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
682 				      struct mlx5dr_rule_rx_tx *nic_rule,
683 				      struct list_head *send_ste_list,
684 				      struct mlx5dr_ste *last_ste,
685 				      u8 *hw_ste_arr,
686 				      u32 new_hw_ste_arr_sz)
687 {
688 	struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
689 	struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
690 	u8 num_of_builders = nic_matcher->num_of_builders;
691 	struct mlx5dr_matcher *matcher = rule->matcher;
692 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
693 	u8 *curr_hw_ste, *prev_hw_ste;
694 	struct mlx5dr_ste *action_ste;
695 	int i, k;
696 
697 	/* Two cases:
698 	 * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
699 	 * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
700 	 *    to support the action.
701 	 */
702 
703 	for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
704 		curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
705 		prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
706 		action_ste = dr_rule_create_collision_htbl(matcher,
707 							   nic_matcher,
708 							   curr_hw_ste);
709 		if (!action_ste)
710 			return -ENOMEM;
711 
712 		mlx5dr_ste_get(action_ste);
713 
714 		action_ste->htbl->pointing_ste = last_ste;
715 		last_ste->next_htbl = action_ste->htbl;
716 		last_ste = action_ste;
717 
718 		/* While free ste we go over the miss list, so add this ste to the list */
719 		list_add_tail(&action_ste->miss_list_node,
720 			      mlx5dr_ste_get_miss_list(action_ste));
721 
722 		ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
723 					  GFP_KERNEL);
724 		if (!ste_info_arr[k])
725 			goto err_exit;
726 
727 		/* Point current ste to the new action */
728 		mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
729 						     prev_hw_ste,
730 						     action_ste->htbl);
731 
732 		mlx5dr_rule_set_last_member(nic_rule, action_ste, true);
733 
734 		mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
735 							  curr_hw_ste,
736 							  ste_info_arr[k],
737 							  send_ste_list, false);
738 	}
739 
740 	last_ste->next_htbl = NULL;
741 
742 	return 0;
743 
744 err_exit:
745 	mlx5dr_ste_put(action_ste, matcher, nic_matcher);
746 	return -ENOMEM;
747 }
748 
749 static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
750 				      struct mlx5dr_matcher_rx_tx *nic_matcher,
751 				      struct mlx5dr_ste_htbl *cur_htbl,
752 				      struct mlx5dr_ste *ste,
753 				      u8 ste_location,
754 				      u8 *hw_ste,
755 				      struct list_head *miss_list,
756 				      struct list_head *send_list)
757 {
758 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
759 	struct mlx5dr_ste_send_info *ste_info;
760 	u64 icm_addr;
761 
762 	/* Take ref on table, only on first time this ste is used */
763 	mlx5dr_htbl_get(cur_htbl);
764 
765 	/* new entry -> new branch */
766 	list_add_tail(&ste->miss_list_node, miss_list);
767 
768 	icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
769 	mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
770 
771 	ste->ste_chain_location = ste_location;
772 
773 	ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
774 	if (!ste_info)
775 		goto clean_ste_setting;
776 
777 	if (mlx5dr_ste_create_next_htbl(matcher,
778 					nic_matcher,
779 					ste,
780 					hw_ste,
781 					DR_CHUNK_SIZE_1)) {
782 		mlx5dr_dbg(dmn, "Failed allocating table\n");
783 		goto clean_ste_info;
784 	}
785 
786 	cur_htbl->ctrl.num_of_valid_entries++;
787 
788 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
789 						  ste_info, send_list, false);
790 
791 	return 0;
792 
793 clean_ste_info:
794 	kfree(ste_info);
795 clean_ste_setting:
796 	list_del_init(&ste->miss_list_node);
797 	mlx5dr_htbl_put(cur_htbl);
798 
799 	return -ENOMEM;
800 }
801 
802 static struct mlx5dr_ste *
803 dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
804 			  struct mlx5dr_rule_rx_tx *nic_rule,
805 			  struct list_head *send_ste_list,
806 			  struct mlx5dr_ste_htbl *cur_htbl,
807 			  u8 *hw_ste,
808 			  u8 ste_location,
809 			  struct mlx5dr_ste_htbl **put_htbl)
810 {
811 	struct mlx5dr_matcher *matcher = rule->matcher;
812 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
813 	struct mlx5dr_matcher_rx_tx *nic_matcher;
814 	struct mlx5dr_domain_rx_tx *nic_dmn;
815 	struct mlx5dr_ste_htbl *new_htbl;
816 	struct mlx5dr_ste *matched_ste;
817 	struct list_head *miss_list;
818 	bool skip_rehash = false;
819 	struct mlx5dr_ste *ste;
820 	int index;
821 
822 	nic_matcher = nic_rule->nic_matcher;
823 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
824 
825 again:
826 	index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
827 	miss_list = &cur_htbl->chunk->miss_list[index];
828 	ste = &cur_htbl->chunk->ste_arr[index];
829 
830 	if (mlx5dr_ste_is_not_used(ste)) {
831 		if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
832 					       ste, ste_location,
833 					       hw_ste, miss_list,
834 					       send_ste_list))
835 			return NULL;
836 	} else {
837 		/* Hash table index in use, check if this ste is in the miss list */
838 		matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
839 		if (matched_ste) {
840 			/* If it is last STE in the chain, and has the same tag
841 			 * it means that all the previous stes are the same,
842 			 * if so, this rule is duplicated.
843 			 */
844 			if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
845 				return matched_ste;
846 
847 			mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
848 		}
849 
850 		if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
851 			/* Hash table index in use, try to resize of the hash */
852 			skip_rehash = true;
853 
854 			/* Hold the table till we update.
855 			 * Release in dr_rule_create_rule()
856 			 */
857 			*put_htbl = cur_htbl;
858 			mlx5dr_htbl_get(cur_htbl);
859 
860 			new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
861 						  ste_location, send_ste_list);
862 			if (!new_htbl) {
863 				mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
864 					   cur_htbl->chunk->size);
865 				mlx5dr_htbl_put(cur_htbl);
866 			} else {
867 				cur_htbl = new_htbl;
868 			}
869 			goto again;
870 		} else {
871 			/* Hash table index in use, add another collision (miss) */
872 			ste = dr_rule_handle_collision(matcher,
873 						       nic_matcher,
874 						       ste,
875 						       hw_ste,
876 						       miss_list,
877 						       send_ste_list);
878 			if (!ste) {
879 				mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
880 					   index);
881 				return NULL;
882 			}
883 		}
884 	}
885 	return ste;
886 }
887 
888 static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
889 				      u32 s_idx, u32 e_idx)
890 {
891 	u32 i;
892 
893 	for (i = s_idx; i < e_idx; i++) {
894 		if (value[i] & ~mask[i]) {
895 			pr_info("Rule parameters contains a value not specified by mask\n");
896 			return false;
897 		}
898 	}
899 	return true;
900 }
901 
902 static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
903 			   struct mlx5dr_match_parameters *value,
904 			   struct mlx5dr_match_param *param)
905 {
906 	u8 match_criteria = matcher->match_criteria;
907 	size_t value_size = value->match_sz;
908 	u8 *mask_p = (u8 *)&matcher->mask;
909 	u8 *param_p = (u8 *)param;
910 	u32 s_idx, e_idx;
911 
912 	if (!value_size ||
913 	    (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
914 		mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
915 		return false;
916 	}
917 
918 	mlx5dr_ste_copy_param(matcher->match_criteria, param, value, false);
919 
920 	if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
921 		s_idx = offsetof(struct mlx5dr_match_param, outer);
922 		e_idx = min(s_idx + sizeof(param->outer), value_size);
923 
924 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
925 			mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
926 			return false;
927 		}
928 	}
929 
930 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
931 		s_idx = offsetof(struct mlx5dr_match_param, misc);
932 		e_idx = min(s_idx + sizeof(param->misc), value_size);
933 
934 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
935 			mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
936 			return false;
937 		}
938 	}
939 
940 	if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
941 		s_idx = offsetof(struct mlx5dr_match_param, inner);
942 		e_idx = min(s_idx + sizeof(param->inner), value_size);
943 
944 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
945 			mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
946 			return false;
947 		}
948 	}
949 
950 	if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
951 		s_idx = offsetof(struct mlx5dr_match_param, misc2);
952 		e_idx = min(s_idx + sizeof(param->misc2), value_size);
953 
954 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
955 			mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
956 			return false;
957 		}
958 	}
959 
960 	if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
961 		s_idx = offsetof(struct mlx5dr_match_param, misc3);
962 		e_idx = min(s_idx + sizeof(param->misc3), value_size);
963 
964 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
965 			mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
966 			return false;
967 		}
968 	}
969 
970 	if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
971 		s_idx = offsetof(struct mlx5dr_match_param, misc4);
972 		e_idx = min(s_idx + sizeof(param->misc4), value_size);
973 
974 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
975 			mlx5dr_err(matcher->tbl->dmn,
976 				   "Rule misc4 parameters contains a value not specified by mask\n");
977 			return false;
978 		}
979 	}
980 
981 	if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
982 		s_idx = offsetof(struct mlx5dr_match_param, misc5);
983 		e_idx = min(s_idx + sizeof(param->misc5), value_size);
984 
985 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
986 			mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n");
987 			return false;
988 		}
989 	}
990 	return true;
991 }
992 
993 static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
994 				    struct mlx5dr_rule_rx_tx *nic_rule)
995 {
996 	/* Check if this nic rule was actually created, or was it skipped
997 	 * and only the other type of the RX/TX nic rule was created.
998 	 */
999 	if (!nic_rule->last_rule_ste)
1000 		return 0;
1001 
1002 	mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
1003 	dr_rule_clean_rule_members(rule, nic_rule);
1004 
1005 	nic_rule->nic_matcher->rules--;
1006 	if (!nic_rule->nic_matcher->rules)
1007 		mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn,
1008 						   nic_rule->nic_matcher);
1009 
1010 	mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
1011 
1012 	return 0;
1013 }
1014 
1015 static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
1016 {
1017 	dr_rule_destroy_rule_nic(rule, &rule->rx);
1018 	dr_rule_destroy_rule_nic(rule, &rule->tx);
1019 	return 0;
1020 }
1021 
1022 static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
1023 {
1024 	struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
1025 
1026 	mlx5dr_dbg_rule_del(rule);
1027 
1028 	switch (dmn->type) {
1029 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
1030 		dr_rule_destroy_rule_nic(rule, &rule->rx);
1031 		break;
1032 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
1033 		dr_rule_destroy_rule_nic(rule, &rule->tx);
1034 		break;
1035 	case MLX5DR_DOMAIN_TYPE_FDB:
1036 		dr_rule_destroy_rule_fdb(rule);
1037 		break;
1038 	default:
1039 		return -EINVAL;
1040 	}
1041 
1042 	dr_rule_remove_action_members(rule);
1043 	kfree(rule);
1044 	return 0;
1045 }
1046 
1047 static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
1048 {
1049 	if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
1050 		return DR_RULE_IPV6;
1051 
1052 	return DR_RULE_IPV4;
1053 }
1054 
1055 static bool dr_rule_skip(enum mlx5dr_domain_type domain,
1056 			 enum mlx5dr_domain_nic_type nic_type,
1057 			 struct mlx5dr_match_param *mask,
1058 			 struct mlx5dr_match_param *value,
1059 			 u32 flow_source)
1060 {
1061 	bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
1062 
1063 	if (domain != MLX5DR_DOMAIN_TYPE_FDB)
1064 		return false;
1065 
1066 	if (mask->misc.source_port) {
1067 		if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
1068 			return true;
1069 
1070 		if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
1071 			return true;
1072 	}
1073 
1074 	if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
1075 		return true;
1076 
1077 	if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
1078 		return true;
1079 
1080 	return false;
1081 }
1082 
1083 static int
1084 dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1085 			struct mlx5dr_rule_rx_tx *nic_rule,
1086 			struct mlx5dr_match_param *param,
1087 			size_t num_actions,
1088 			struct mlx5dr_action *actions[])
1089 {
1090 	struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
1091 	struct mlx5dr_matcher *matcher = rule->matcher;
1092 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1093 	struct mlx5dr_matcher_rx_tx *nic_matcher;
1094 	struct mlx5dr_domain_rx_tx *nic_dmn;
1095 	struct mlx5dr_ste_htbl *htbl = NULL;
1096 	struct mlx5dr_ste_htbl *cur_htbl;
1097 	struct mlx5dr_ste *ste = NULL;
1098 	LIST_HEAD(send_ste_list);
1099 	u8 *hw_ste_arr = NULL;
1100 	u32 new_hw_ste_arr_sz;
1101 	int ret, i;
1102 
1103 	nic_matcher = nic_rule->nic_matcher;
1104 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
1105 
1106 	if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param,
1107 			 rule->flow_source))
1108 		return 0;
1109 
1110 	hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
1111 	if (!hw_ste_arr)
1112 		return -ENOMEM;
1113 
1114 	mlx5dr_domain_nic_lock(nic_dmn);
1115 
1116 	ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
1117 	if (ret)
1118 		goto free_hw_ste;
1119 
1120 	ret = mlx5dr_matcher_select_builders(matcher,
1121 					     nic_matcher,
1122 					     dr_rule_get_ipv(&param->outer),
1123 					     dr_rule_get_ipv(&param->inner));
1124 	if (ret)
1125 		goto remove_from_nic_tbl;
1126 
1127 	/* Set the tag values inside the ste array */
1128 	ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
1129 	if (ret)
1130 		goto remove_from_nic_tbl;
1131 
1132 	/* Set the actions values/addresses inside the ste array */
1133 	ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
1134 					   num_actions, hw_ste_arr,
1135 					   &new_hw_ste_arr_sz);
1136 	if (ret)
1137 		goto remove_from_nic_tbl;
1138 
1139 	cur_htbl = nic_matcher->s_htbl;
1140 
1141 	/* Go over the array of STEs, and build dr_ste accordingly.
1142 	 * The loop is over only the builders which are equal or less to the
1143 	 * number of stes, in case we have actions that lives in other stes.
1144 	 */
1145 	for (i = 0; i < nic_matcher->num_of_builders; i++) {
1146 		/* Calculate CRC and keep new ste entry */
1147 		u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
1148 
1149 		ste = dr_rule_handle_ste_branch(rule,
1150 						nic_rule,
1151 						&send_ste_list,
1152 						cur_htbl,
1153 						cur_hw_ste_ent,
1154 						i + 1,
1155 						&htbl);
1156 		if (!ste) {
1157 			mlx5dr_err(dmn, "Failed creating next branch\n");
1158 			ret = -ENOENT;
1159 			goto free_rule;
1160 		}
1161 
1162 		cur_htbl = ste->next_htbl;
1163 
1164 		mlx5dr_ste_get(ste);
1165 		mlx5dr_rule_set_last_member(nic_rule, ste, true);
1166 	}
1167 
1168 	/* Connect actions */
1169 	ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
1170 					 ste, hw_ste_arr, new_hw_ste_arr_sz);
1171 	if (ret) {
1172 		mlx5dr_dbg(dmn, "Failed apply actions\n");
1173 		goto free_rule;
1174 	}
1175 	ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
1176 	if (ret) {
1177 		mlx5dr_err(dmn, "Failed sending ste!\n");
1178 		goto free_rule;
1179 	}
1180 
1181 	if (htbl)
1182 		mlx5dr_htbl_put(htbl);
1183 
1184 	nic_matcher->rules++;
1185 
1186 	mlx5dr_domain_nic_unlock(nic_dmn);
1187 
1188 	kfree(hw_ste_arr);
1189 
1190 	return 0;
1191 
1192 free_rule:
1193 	dr_rule_clean_rule_members(rule, nic_rule);
1194 	/* Clean all ste_info's */
1195 	list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
1196 		list_del(&ste_info->send_list);
1197 		kfree(ste_info);
1198 	}
1199 
1200 remove_from_nic_tbl:
1201 	mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
1202 
1203 free_hw_ste:
1204 	mlx5dr_domain_nic_unlock(nic_dmn);
1205 	kfree(hw_ste_arr);
1206 	return ret;
1207 }
1208 
1209 static int
1210 dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
1211 			struct mlx5dr_match_param *param,
1212 			size_t num_actions,
1213 			struct mlx5dr_action *actions[])
1214 {
1215 	struct mlx5dr_match_param copy_param = {};
1216 	int ret;
1217 
1218 	/* Copy match_param since they will be consumed during the first
1219 	 * nic_rule insertion.
1220 	 */
1221 	memcpy(&copy_param, param, sizeof(struct mlx5dr_match_param));
1222 
1223 	ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
1224 				      num_actions, actions);
1225 	if (ret)
1226 		return ret;
1227 
1228 	ret = dr_rule_create_rule_nic(rule, &rule->tx, &copy_param,
1229 				      num_actions, actions);
1230 	if (ret)
1231 		goto destroy_rule_nic_rx;
1232 
1233 	return 0;
1234 
1235 destroy_rule_nic_rx:
1236 	dr_rule_destroy_rule_nic(rule, &rule->rx);
1237 	return ret;
1238 }
1239 
1240 static struct mlx5dr_rule *
1241 dr_rule_create_rule(struct mlx5dr_matcher *matcher,
1242 		    struct mlx5dr_match_parameters *value,
1243 		    size_t num_actions,
1244 		    struct mlx5dr_action *actions[],
1245 		    u32 flow_source)
1246 {
1247 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1248 	struct mlx5dr_match_param param = {};
1249 	struct mlx5dr_rule *rule;
1250 	int ret;
1251 
1252 	if (!dr_rule_verify(matcher, value, &param))
1253 		return NULL;
1254 
1255 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1256 	if (!rule)
1257 		return NULL;
1258 
1259 	rule->matcher = matcher;
1260 	rule->flow_source = flow_source;
1261 	INIT_LIST_HEAD(&rule->rule_actions_list);
1262 
1263 	ret = dr_rule_add_action_members(rule, num_actions, actions);
1264 	if (ret)
1265 		goto free_rule;
1266 
1267 	switch (dmn->type) {
1268 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
1269 		rule->rx.nic_matcher = &matcher->rx;
1270 		ret = dr_rule_create_rule_nic(rule, &rule->rx, &param,
1271 					      num_actions, actions);
1272 		break;
1273 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
1274 		rule->tx.nic_matcher = &matcher->tx;
1275 		ret = dr_rule_create_rule_nic(rule, &rule->tx, &param,
1276 					      num_actions, actions);
1277 		break;
1278 	case MLX5DR_DOMAIN_TYPE_FDB:
1279 		rule->rx.nic_matcher = &matcher->rx;
1280 		rule->tx.nic_matcher = &matcher->tx;
1281 		ret = dr_rule_create_rule_fdb(rule, &param,
1282 					      num_actions, actions);
1283 		break;
1284 	default:
1285 		ret = -EINVAL;
1286 		break;
1287 	}
1288 
1289 	if (ret)
1290 		goto remove_action_members;
1291 
1292 	INIT_LIST_HEAD(&rule->dbg_node);
1293 	mlx5dr_dbg_rule_add(rule);
1294 	return rule;
1295 
1296 remove_action_members:
1297 	dr_rule_remove_action_members(rule);
1298 free_rule:
1299 	kfree(rule);
1300 	mlx5dr_err(dmn, "Failed creating rule\n");
1301 	return NULL;
1302 }
1303 
1304 struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
1305 				       struct mlx5dr_match_parameters *value,
1306 				       size_t num_actions,
1307 				       struct mlx5dr_action *actions[],
1308 				       u32 flow_source)
1309 {
1310 	struct mlx5dr_rule *rule;
1311 
1312 	refcount_inc(&matcher->refcount);
1313 
1314 	rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
1315 	if (!rule)
1316 		refcount_dec(&matcher->refcount);
1317 
1318 	return rule;
1319 }
1320 
1321 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
1322 {
1323 	struct mlx5dr_matcher *matcher = rule->matcher;
1324 	int ret;
1325 
1326 	ret = dr_rule_destroy_rule(rule);
1327 	if (!ret)
1328 		refcount_dec(&matcher->refcount);
1329 
1330 	return ret;
1331 }
1332