1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 #define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
7 
8 struct mlx5dr_rule_action_member {
9 	struct mlx5dr_action *action;
10 	struct list_head list;
11 };
12 
13 static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
14 				       struct mlx5dr_ste *new_last_ste,
15 				       struct list_head *miss_list,
16 				       struct list_head *send_list)
17 {
18 	struct mlx5dr_ste_send_info *ste_info_last;
19 	struct mlx5dr_ste *last_ste;
20 
21 	/* The new entry will be inserted after the last */
22 	last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
23 	WARN_ON(!last_ste);
24 
25 	ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
26 	if (!ste_info_last)
27 		return -ENOMEM;
28 
29 	mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
30 				 mlx5dr_ste_get_icm_addr(new_last_ste));
31 	list_add_tail(&new_last_ste->miss_list_node, miss_list);
32 
33 	mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
34 						  0, last_ste->hw_ste,
35 						  ste_info_last, send_list, true);
36 
37 	return 0;
38 }
39 
40 static struct mlx5dr_ste *
41 dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
42 			      struct mlx5dr_matcher_rx_tx *nic_matcher,
43 			      u8 *hw_ste)
44 {
45 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
46 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
47 	struct mlx5dr_ste_htbl *new_htbl;
48 	struct mlx5dr_ste *ste;
49 
50 	/* Create new table for miss entry */
51 	new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
52 					 DR_CHUNK_SIZE_1,
53 					 MLX5DR_STE_LU_TYPE_DONT_CARE,
54 					 0);
55 	if (!new_htbl) {
56 		mlx5dr_dbg(dmn, "Failed allocating collision table\n");
57 		return NULL;
58 	}
59 
60 	/* One and only entry, never grows */
61 	ste = new_htbl->ste_arr;
62 	mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
63 				 nic_matcher->e_anchor->chunk->icm_addr);
64 	mlx5dr_htbl_get(new_htbl);
65 
66 	return ste;
67 }
68 
69 static struct mlx5dr_ste *
70 dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
71 			       struct mlx5dr_matcher_rx_tx *nic_matcher,
72 			       u8 *hw_ste,
73 			       struct mlx5dr_ste *orig_ste)
74 {
75 	struct mlx5dr_ste *ste;
76 
77 	ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
78 	if (!ste) {
79 		mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
80 		return NULL;
81 	}
82 
83 	ste->ste_chain_location = orig_ste->ste_chain_location;
84 
85 	/* In collision entry, all members share the same miss_list_head */
86 	ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
87 
88 	/* Next table */
89 	if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
90 					DR_CHUNK_SIZE_1)) {
91 		mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
92 		goto free_tbl;
93 	}
94 
95 	return ste;
96 
97 free_tbl:
98 	mlx5dr_ste_free(ste, matcher, nic_matcher);
99 	return NULL;
100 }
101 
102 static int
103 dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
104 				      struct mlx5dr_domain *dmn)
105 {
106 	int ret;
107 
108 	list_del(&ste_info->send_list);
109 
110 	/* Copy data to ste, only reduced size or control, the last 16B (mask)
111 	 * is already written to the hw.
112 	 */
113 	if (ste_info->size == DR_STE_SIZE_CTRL)
114 		memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL);
115 	else
116 		memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
117 
118 	ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
119 				       ste_info->size, ste_info->offset);
120 	if (ret)
121 		goto out;
122 
123 out:
124 	kfree(ste_info);
125 	return ret;
126 }
127 
128 static int dr_rule_send_update_list(struct list_head *send_ste_list,
129 				    struct mlx5dr_domain *dmn,
130 				    bool is_reverse)
131 {
132 	struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
133 	int ret;
134 
135 	if (is_reverse) {
136 		list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
137 						 send_ste_list, send_list) {
138 			ret = dr_rule_handle_one_ste_in_update_list(ste_info,
139 								    dmn);
140 			if (ret)
141 				return ret;
142 		}
143 	} else {
144 		list_for_each_entry_safe(ste_info, tmp_ste_info,
145 					 send_ste_list, send_list) {
146 			ret = dr_rule_handle_one_ste_in_update_list(ste_info,
147 								    dmn);
148 			if (ret)
149 				return ret;
150 		}
151 	}
152 
153 	return 0;
154 }
155 
156 static struct mlx5dr_ste *
157 dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
158 {
159 	struct mlx5dr_ste *ste;
160 
161 	if (list_empty(miss_list))
162 		return NULL;
163 
164 	/* Check if hw_ste is present in the list */
165 	list_for_each_entry(ste, miss_list, miss_list_node) {
166 		if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
167 			return ste;
168 	}
169 
170 	return NULL;
171 }
172 
173 static struct mlx5dr_ste *
174 dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
175 				struct mlx5dr_matcher_rx_tx *nic_matcher,
176 				struct list_head *update_list,
177 				struct mlx5dr_ste *col_ste,
178 				u8 *hw_ste)
179 {
180 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
181 	struct mlx5dr_ste *new_ste;
182 	int ret;
183 
184 	new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
185 	if (!new_ste)
186 		return NULL;
187 
188 	/* In collision entry, all members share the same miss_list_head */
189 	new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
190 
191 	/* Update the previous from the list */
192 	ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
193 					  mlx5dr_ste_get_miss_list(col_ste),
194 					  update_list);
195 	if (ret) {
196 		mlx5dr_dbg(dmn, "Failed update dup entry\n");
197 		goto err_exit;
198 	}
199 
200 	return new_ste;
201 
202 err_exit:
203 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
204 	return NULL;
205 }
206 
207 static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
208 					 struct mlx5dr_matcher_rx_tx *nic_matcher,
209 					 struct mlx5dr_ste *cur_ste,
210 					 struct mlx5dr_ste *new_ste)
211 {
212 	new_ste->next_htbl = cur_ste->next_htbl;
213 	new_ste->ste_chain_location = cur_ste->ste_chain_location;
214 
215 	if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location))
216 		new_ste->next_htbl->pointing_ste = new_ste;
217 
218 	/* We need to copy the refcount since this ste
219 	 * may have been traversed several times
220 	 */
221 	new_ste->refcount = cur_ste->refcount;
222 
223 	/* Link old STEs rule_mem list to the new ste */
224 	mlx5dr_rule_update_rule_member(cur_ste, new_ste);
225 	INIT_LIST_HEAD(&new_ste->rule_list);
226 	list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list);
227 }
228 
229 static struct mlx5dr_ste *
230 dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
231 			struct mlx5dr_matcher_rx_tx *nic_matcher,
232 			struct mlx5dr_ste *cur_ste,
233 			struct mlx5dr_ste_htbl *new_htbl,
234 			struct list_head *update_list)
235 {
236 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
237 	struct mlx5dr_ste_send_info *ste_info;
238 	bool use_update_list = false;
239 	u8 hw_ste[DR_STE_SIZE] = {};
240 	struct mlx5dr_ste *new_ste;
241 	int new_idx;
242 	u8 sb_idx;
243 
244 	/* Copy STE mask from the matcher */
245 	sb_idx = cur_ste->ste_chain_location - 1;
246 	mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
247 
248 	/* Copy STE control and tag */
249 	memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
250 	mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
251 				 nic_matcher->e_anchor->chunk->icm_addr);
252 
253 	new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
254 	new_ste = &new_htbl->ste_arr[new_idx];
255 
256 	if (mlx5dr_ste_is_not_used(new_ste)) {
257 		mlx5dr_htbl_get(new_htbl);
258 		list_add_tail(&new_ste->miss_list_node,
259 			      mlx5dr_ste_get_miss_list(new_ste));
260 	} else {
261 		new_ste = dr_rule_rehash_handle_collision(matcher,
262 							  nic_matcher,
263 							  update_list,
264 							  new_ste,
265 							  hw_ste);
266 		if (!new_ste) {
267 			mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
268 				   new_idx);
269 			return NULL;
270 		}
271 		new_htbl->ctrl.num_of_collisions++;
272 		use_update_list = true;
273 	}
274 
275 	memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
276 
277 	new_htbl->ctrl.num_of_valid_entries++;
278 
279 	if (use_update_list) {
280 		ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
281 		if (!ste_info)
282 			goto err_exit;
283 
284 		mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
285 							  hw_ste, ste_info,
286 							  update_list, true);
287 	}
288 
289 	dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
290 
291 	return new_ste;
292 
293 err_exit:
294 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
295 	return NULL;
296 }
297 
298 static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
299 					 struct mlx5dr_matcher_rx_tx *nic_matcher,
300 					 struct list_head *cur_miss_list,
301 					 struct mlx5dr_ste_htbl *new_htbl,
302 					 struct list_head *update_list)
303 {
304 	struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
305 
306 	if (list_empty(cur_miss_list))
307 		return 0;
308 
309 	list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
310 		new_ste = dr_rule_rehash_copy_ste(matcher,
311 						  nic_matcher,
312 						  cur_ste,
313 						  new_htbl,
314 						  update_list);
315 		if (!new_ste)
316 			goto err_insert;
317 
318 		list_del(&cur_ste->miss_list_node);
319 		mlx5dr_htbl_put(cur_ste->htbl);
320 	}
321 	return 0;
322 
323 err_insert:
324 	mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
325 	WARN_ON(true);
326 	return -EINVAL;
327 }
328 
329 static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
330 				    struct mlx5dr_matcher_rx_tx *nic_matcher,
331 				    struct mlx5dr_ste_htbl *cur_htbl,
332 				    struct mlx5dr_ste_htbl *new_htbl,
333 				    struct list_head *update_list)
334 {
335 	struct mlx5dr_ste *cur_ste;
336 	int cur_entries;
337 	int err = 0;
338 	int i;
339 
340 	cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
341 
342 	if (cur_entries < 1) {
343 		mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
344 		return -EINVAL;
345 	}
346 
347 	for (i = 0; i < cur_entries; i++) {
348 		cur_ste = &cur_htbl->ste_arr[i];
349 		if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
350 			continue;
351 
352 		err = dr_rule_rehash_copy_miss_list(matcher,
353 						    nic_matcher,
354 						    mlx5dr_ste_get_miss_list(cur_ste),
355 						    new_htbl,
356 						    update_list);
357 		if (err)
358 			goto clean_copy;
359 	}
360 
361 clean_copy:
362 	return err;
363 }
364 
365 static struct mlx5dr_ste_htbl *
366 dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
367 		    struct mlx5dr_rule_rx_tx *nic_rule,
368 		    struct mlx5dr_ste_htbl *cur_htbl,
369 		    u8 ste_location,
370 		    struct list_head *update_list,
371 		    enum mlx5dr_icm_chunk_size new_size)
372 {
373 	struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
374 	struct mlx5dr_matcher *matcher = rule->matcher;
375 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
376 	struct mlx5dr_matcher_rx_tx *nic_matcher;
377 	struct mlx5dr_ste_send_info *ste_info;
378 	struct mlx5dr_htbl_connect_info info;
379 	struct mlx5dr_domain_rx_tx *nic_dmn;
380 	u8 formatted_ste[DR_STE_SIZE] = {};
381 	LIST_HEAD(rehash_table_send_list);
382 	struct mlx5dr_ste *ste_to_update;
383 	struct mlx5dr_ste_htbl *new_htbl;
384 	int err;
385 
386 	nic_matcher = nic_rule->nic_matcher;
387 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
388 
389 	ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
390 	if (!ste_info)
391 		return NULL;
392 
393 	new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
394 					 new_size,
395 					 cur_htbl->lu_type,
396 					 cur_htbl->byte_mask);
397 	if (!new_htbl) {
398 		mlx5dr_err(dmn, "Failed to allocate new hash table\n");
399 		goto free_ste_info;
400 	}
401 
402 	/* Write new table to HW */
403 	info.type = CONNECT_MISS;
404 	info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
405 	mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
406 				     dmn->info.caps.gvmi,
407 				     nic_dmn,
408 				     new_htbl,
409 				     formatted_ste,
410 				     &info);
411 
412 	new_htbl->pointing_ste = cur_htbl->pointing_ste;
413 	new_htbl->pointing_ste->next_htbl = new_htbl;
414 	err = dr_rule_rehash_copy_htbl(matcher,
415 				       nic_matcher,
416 				       cur_htbl,
417 				       new_htbl,
418 				       &rehash_table_send_list);
419 	if (err)
420 		goto free_new_htbl;
421 
422 	if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
423 				      nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
424 		mlx5dr_err(dmn, "Failed writing table to HW\n");
425 		goto free_new_htbl;
426 	}
427 
428 	/* Writing to the hw is done in regular order of rehash_table_send_list,
429 	 * in order to have the origin data written before the miss address of
430 	 * collision entries, if exists.
431 	 */
432 	if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
433 		mlx5dr_err(dmn, "Failed updating table to HW\n");
434 		goto free_ste_list;
435 	}
436 
437 	/* Connect previous hash table to current */
438 	if (ste_location == 1) {
439 		/* The previous table is an anchor, anchors size is always one STE */
440 		struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
441 
442 		/* On matcher s_anchor we keep an extra refcount */
443 		mlx5dr_htbl_get(new_htbl);
444 		mlx5dr_htbl_put(cur_htbl);
445 
446 		nic_matcher->s_htbl = new_htbl;
447 
448 		/* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
449 		 * (48B len) which works only on first 32B
450 		 */
451 		mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
452 					prev_htbl->ste_arr[0].hw_ste,
453 					new_htbl->chunk->icm_addr,
454 					new_htbl->chunk->num_of_entries);
455 
456 		ste_to_update = &prev_htbl->ste_arr[0];
457 	} else {
458 		mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
459 						     cur_htbl->pointing_ste->hw_ste,
460 						     new_htbl);
461 		ste_to_update = cur_htbl->pointing_ste;
462 	}
463 
464 	mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
465 						  0, ste_to_update->hw_ste, ste_info,
466 						  update_list, false);
467 
468 	return new_htbl;
469 
470 free_ste_list:
471 	/* Clean all ste_info's from the new table */
472 	list_for_each_entry_safe(del_ste_info, tmp_ste_info,
473 				 &rehash_table_send_list, send_list) {
474 		list_del(&del_ste_info->send_list);
475 		kfree(del_ste_info);
476 	}
477 
478 free_new_htbl:
479 	mlx5dr_ste_htbl_free(new_htbl);
480 free_ste_info:
481 	kfree(ste_info);
482 	mlx5dr_info(dmn, "Failed creating rehash table\n");
483 	return NULL;
484 }
485 
486 static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
487 					      struct mlx5dr_rule_rx_tx *nic_rule,
488 					      struct mlx5dr_ste_htbl *cur_htbl,
489 					      u8 ste_location,
490 					      struct list_head *update_list)
491 {
492 	struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
493 	enum mlx5dr_icm_chunk_size new_size;
494 
495 	new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
496 	new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
497 
498 	if (new_size == cur_htbl->chunk_size)
499 		return NULL; /* Skip rehash, we already at the max size */
500 
501 	return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
502 				   update_list, new_size);
503 }
504 
505 static struct mlx5dr_ste *
506 dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
507 			 struct mlx5dr_matcher_rx_tx *nic_matcher,
508 			 struct mlx5dr_ste *ste,
509 			 u8 *hw_ste,
510 			 struct list_head *miss_list,
511 			 struct list_head *send_list)
512 {
513 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
514 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
515 	struct mlx5dr_ste_send_info *ste_info;
516 	struct mlx5dr_ste *new_ste;
517 
518 	ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
519 	if (!ste_info)
520 		return NULL;
521 
522 	new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
523 	if (!new_ste)
524 		goto free_send_info;
525 
526 	if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
527 					miss_list, send_list)) {
528 		mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
529 		goto err_exit;
530 	}
531 
532 	mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
533 						  ste_info, send_list, false);
534 
535 	ste->htbl->ctrl.num_of_collisions++;
536 	ste->htbl->ctrl.num_of_valid_entries++;
537 
538 	return new_ste;
539 
540 err_exit:
541 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
542 free_send_info:
543 	kfree(ste_info);
544 	return NULL;
545 }
546 
547 static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
548 {
549 	struct mlx5dr_rule_action_member *action_mem;
550 	struct mlx5dr_rule_action_member *tmp;
551 
552 	list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
553 		list_del(&action_mem->list);
554 		refcount_dec(&action_mem->action->refcount);
555 		kvfree(action_mem);
556 	}
557 }
558 
559 static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
560 				      size_t num_actions,
561 				      struct mlx5dr_action *actions[])
562 {
563 	struct mlx5dr_rule_action_member *action_mem;
564 	int i;
565 
566 	for (i = 0; i < num_actions; i++) {
567 		action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
568 		if (!action_mem)
569 			goto free_action_members;
570 
571 		action_mem->action = actions[i];
572 		INIT_LIST_HEAD(&action_mem->list);
573 		list_add_tail(&action_mem->list, &rule->rule_actions_list);
574 		refcount_inc(&action_mem->action->refcount);
575 	}
576 
577 	return 0;
578 
579 free_action_members:
580 	dr_rule_remove_action_members(rule);
581 	return -ENOMEM;
582 }
583 
584 /* While the pointer of ste is no longer valid, like while moving ste to be
585  * the first in the miss_list, and to be in the origin table,
586  * all rule-members that are attached to this ste should update their ste member
587  * to the new pointer
588  */
589 void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste,
590 				    struct mlx5dr_ste *new_ste)
591 {
592 	struct mlx5dr_rule_member *rule_mem;
593 
594 	list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
595 		rule_mem->ste = new_ste;
596 }
597 
598 static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
599 				       struct mlx5dr_rule_rx_tx *nic_rule)
600 {
601 	struct mlx5dr_rule_member *rule_mem;
602 	struct mlx5dr_rule_member *tmp_mem;
603 
604 	if (list_empty(&nic_rule->rule_members_list))
605 		return;
606 	list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) {
607 		list_del(&rule_mem->list);
608 		list_del(&rule_mem->use_ste_list);
609 		mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher);
610 		kvfree(rule_mem);
611 	}
612 }
613 
614 static u16 dr_get_bits_per_mask(u16 byte_mask)
615 {
616 	u16 bits = 0;
617 
618 	while (byte_mask) {
619 		byte_mask = byte_mask & (byte_mask - 1);
620 		bits++;
621 	}
622 
623 	return bits;
624 }
625 
626 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
627 				      struct mlx5dr_domain *dmn,
628 				      struct mlx5dr_domain_rx_tx *nic_dmn)
629 {
630 	struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
631 
632 	if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
633 		return false;
634 
635 	if (!ctrl->may_grow)
636 		return false;
637 
638 	if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
639 		return false;
640 
641 	if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
642 	    (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
643 		return true;
644 
645 	return false;
646 }
647 
648 static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
649 			      struct mlx5dr_ste *ste)
650 {
651 	struct mlx5dr_rule_member *rule_mem;
652 
653 	rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL);
654 	if (!rule_mem)
655 		return -ENOMEM;
656 
657 	INIT_LIST_HEAD(&rule_mem->list);
658 	INIT_LIST_HEAD(&rule_mem->use_ste_list);
659 
660 	rule_mem->ste = ste;
661 	list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
662 
663 	list_add_tail(&rule_mem->use_ste_list, &ste->rule_list);
664 
665 	return 0;
666 }
667 
668 static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
669 				      struct mlx5dr_rule_rx_tx *nic_rule,
670 				      struct list_head *send_ste_list,
671 				      struct mlx5dr_ste *last_ste,
672 				      u8 *hw_ste_arr,
673 				      u32 new_hw_ste_arr_sz)
674 {
675 	struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
676 	struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
677 	u8 num_of_builders = nic_matcher->num_of_builders;
678 	struct mlx5dr_matcher *matcher = rule->matcher;
679 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
680 	u8 *curr_hw_ste, *prev_hw_ste;
681 	struct mlx5dr_ste *action_ste;
682 	int i, k, ret;
683 
684 	/* Two cases:
685 	 * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
686 	 * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
687 	 *    to support the action.
688 	 */
689 	if (num_of_builders == new_hw_ste_arr_sz)
690 		return 0;
691 
692 	for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
693 		curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
694 		prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
695 		action_ste = dr_rule_create_collision_htbl(matcher,
696 							   nic_matcher,
697 							   curr_hw_ste);
698 		if (!action_ste)
699 			return -ENOMEM;
700 
701 		mlx5dr_ste_get(action_ste);
702 
703 		/* While free ste we go over the miss list, so add this ste to the list */
704 		list_add_tail(&action_ste->miss_list_node,
705 			      mlx5dr_ste_get_miss_list(action_ste));
706 
707 		ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
708 					  GFP_KERNEL);
709 		if (!ste_info_arr[k])
710 			goto err_exit;
711 
712 		/* Point current ste to the new action */
713 		mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
714 						     prev_hw_ste,
715 						     action_ste->htbl);
716 		ret = dr_rule_add_member(nic_rule, action_ste);
717 		if (ret) {
718 			mlx5dr_dbg(dmn, "Failed adding rule member\n");
719 			goto free_ste_info;
720 		}
721 		mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
722 							  curr_hw_ste,
723 							  ste_info_arr[k],
724 							  send_ste_list, false);
725 	}
726 
727 	return 0;
728 
729 free_ste_info:
730 	kfree(ste_info_arr[k]);
731 err_exit:
732 	mlx5dr_ste_put(action_ste, matcher, nic_matcher);
733 	return -ENOMEM;
734 }
735 
736 static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
737 				      struct mlx5dr_matcher_rx_tx *nic_matcher,
738 				      struct mlx5dr_ste_htbl *cur_htbl,
739 				      struct mlx5dr_ste *ste,
740 				      u8 ste_location,
741 				      u8 *hw_ste,
742 				      struct list_head *miss_list,
743 				      struct list_head *send_list)
744 {
745 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
746 	struct mlx5dr_ste_send_info *ste_info;
747 
748 	/* Take ref on table, only on first time this ste is used */
749 	mlx5dr_htbl_get(cur_htbl);
750 
751 	/* new entry -> new branch */
752 	list_add_tail(&ste->miss_list_node, miss_list);
753 
754 	mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
755 				 nic_matcher->e_anchor->chunk->icm_addr);
756 
757 	ste->ste_chain_location = ste_location;
758 
759 	ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
760 	if (!ste_info)
761 		goto clean_ste_setting;
762 
763 	if (mlx5dr_ste_create_next_htbl(matcher,
764 					nic_matcher,
765 					ste,
766 					hw_ste,
767 					DR_CHUNK_SIZE_1)) {
768 		mlx5dr_dbg(dmn, "Failed allocating table\n");
769 		goto clean_ste_info;
770 	}
771 
772 	cur_htbl->ctrl.num_of_valid_entries++;
773 
774 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
775 						  ste_info, send_list, false);
776 
777 	return 0;
778 
779 clean_ste_info:
780 	kfree(ste_info);
781 clean_ste_setting:
782 	list_del_init(&ste->miss_list_node);
783 	mlx5dr_htbl_put(cur_htbl);
784 
785 	return -ENOMEM;
786 }
787 
788 static struct mlx5dr_ste *
789 dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
790 			  struct mlx5dr_rule_rx_tx *nic_rule,
791 			  struct list_head *send_ste_list,
792 			  struct mlx5dr_ste_htbl *cur_htbl,
793 			  u8 *hw_ste,
794 			  u8 ste_location,
795 			  struct mlx5dr_ste_htbl **put_htbl)
796 {
797 	struct mlx5dr_matcher *matcher = rule->matcher;
798 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
799 	struct mlx5dr_matcher_rx_tx *nic_matcher;
800 	struct mlx5dr_domain_rx_tx *nic_dmn;
801 	struct mlx5dr_ste_htbl *new_htbl;
802 	struct mlx5dr_ste *matched_ste;
803 	struct list_head *miss_list;
804 	bool skip_rehash = false;
805 	struct mlx5dr_ste *ste;
806 	int index;
807 
808 	nic_matcher = nic_rule->nic_matcher;
809 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
810 
811 again:
812 	index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
813 	miss_list = &cur_htbl->chunk->miss_list[index];
814 	ste = &cur_htbl->ste_arr[index];
815 
816 	if (mlx5dr_ste_is_not_used(ste)) {
817 		if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
818 					       ste, ste_location,
819 					       hw_ste, miss_list,
820 					       send_ste_list))
821 			return NULL;
822 	} else {
823 		/* Hash table index in use, check if this ste is in the miss list */
824 		matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
825 		if (matched_ste) {
826 			/* If it is last STE in the chain, and has the same tag
827 			 * it means that all the previous stes are the same,
828 			 * if so, this rule is duplicated.
829 			 */
830 			if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
831 				return matched_ste;
832 
833 			mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
834 		}
835 
836 		if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
837 			/* Hash table index in use, try to resize of the hash */
838 			skip_rehash = true;
839 
840 			/* Hold the table till we update.
841 			 * Release in dr_rule_create_rule()
842 			 */
843 			*put_htbl = cur_htbl;
844 			mlx5dr_htbl_get(cur_htbl);
845 
846 			new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
847 						  ste_location, send_ste_list);
848 			if (!new_htbl) {
849 				mlx5dr_htbl_put(cur_htbl);
850 				mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
851 					   cur_htbl->chunk_size);
852 			} else {
853 				cur_htbl = new_htbl;
854 			}
855 			goto again;
856 		} else {
857 			/* Hash table index in use, add another collision (miss) */
858 			ste = dr_rule_handle_collision(matcher,
859 						       nic_matcher,
860 						       ste,
861 						       hw_ste,
862 						       miss_list,
863 						       send_ste_list);
864 			if (!ste) {
865 				mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
866 					   index);
867 				return NULL;
868 			}
869 		}
870 	}
871 	return ste;
872 }
873 
874 static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
875 				      u32 s_idx, u32 e_idx)
876 {
877 	u32 i;
878 
879 	for (i = s_idx; i < e_idx; i++) {
880 		if (value[i] & ~mask[i]) {
881 			pr_info("Rule parameters contains a value not specified by mask\n");
882 			return false;
883 		}
884 	}
885 	return true;
886 }
887 
888 static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
889 			   struct mlx5dr_match_parameters *value,
890 			   struct mlx5dr_match_param *param)
891 {
892 	u8 match_criteria = matcher->match_criteria;
893 	size_t value_size = value->match_sz;
894 	u8 *mask_p = (u8 *)&matcher->mask;
895 	u8 *param_p = (u8 *)param;
896 	u32 s_idx, e_idx;
897 
898 	if (!value_size ||
899 	    (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
900 		mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
901 		return false;
902 	}
903 
904 	mlx5dr_ste_copy_param(matcher->match_criteria, param, value);
905 
906 	if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
907 		s_idx = offsetof(struct mlx5dr_match_param, outer);
908 		e_idx = min(s_idx + sizeof(param->outer), value_size);
909 
910 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
911 			mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
912 			return false;
913 		}
914 	}
915 
916 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
917 		s_idx = offsetof(struct mlx5dr_match_param, misc);
918 		e_idx = min(s_idx + sizeof(param->misc), value_size);
919 
920 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
921 			mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
922 			return false;
923 		}
924 	}
925 
926 	if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
927 		s_idx = offsetof(struct mlx5dr_match_param, inner);
928 		e_idx = min(s_idx + sizeof(param->inner), value_size);
929 
930 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
931 			mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
932 			return false;
933 		}
934 	}
935 
936 	if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
937 		s_idx = offsetof(struct mlx5dr_match_param, misc2);
938 		e_idx = min(s_idx + sizeof(param->misc2), value_size);
939 
940 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
941 			mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
942 			return false;
943 		}
944 	}
945 
946 	if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
947 		s_idx = offsetof(struct mlx5dr_match_param, misc3);
948 		e_idx = min(s_idx + sizeof(param->misc3), value_size);
949 
950 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
951 			mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
952 			return false;
953 		}
954 	}
955 
956 	if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
957 		s_idx = offsetof(struct mlx5dr_match_param, misc4);
958 		e_idx = min(s_idx + sizeof(param->misc4), value_size);
959 
960 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
961 			mlx5dr_err(matcher->tbl->dmn,
962 				   "Rule misc4 parameters contains a value not specified by mask\n");
963 			return false;
964 		}
965 	}
966 	return true;
967 }
968 
969 static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
970 				    struct mlx5dr_rule_rx_tx *nic_rule)
971 {
972 	mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
973 	dr_rule_clean_rule_members(rule, nic_rule);
974 	mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
975 
976 	return 0;
977 }
978 
979 static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
980 {
981 	dr_rule_destroy_rule_nic(rule, &rule->rx);
982 	dr_rule_destroy_rule_nic(rule, &rule->tx);
983 	return 0;
984 }
985 
986 static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
987 {
988 	struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
989 
990 	switch (dmn->type) {
991 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
992 		dr_rule_destroy_rule_nic(rule, &rule->rx);
993 		break;
994 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
995 		dr_rule_destroy_rule_nic(rule, &rule->tx);
996 		break;
997 	case MLX5DR_DOMAIN_TYPE_FDB:
998 		dr_rule_destroy_rule_fdb(rule);
999 		break;
1000 	default:
1001 		return -EINVAL;
1002 	}
1003 
1004 	dr_rule_remove_action_members(rule);
1005 	kfree(rule);
1006 	return 0;
1007 }
1008 
1009 static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
1010 {
1011 	if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
1012 		return DR_RULE_IPV6;
1013 
1014 	return DR_RULE_IPV4;
1015 }
1016 
1017 static bool dr_rule_skip(enum mlx5dr_domain_type domain,
1018 			 enum mlx5dr_ste_entry_type ste_type,
1019 			 struct mlx5dr_match_param *mask,
1020 			 struct mlx5dr_match_param *value,
1021 			 u32 flow_source)
1022 {
1023 	bool rx = ste_type == MLX5DR_STE_TYPE_RX;
1024 
1025 	if (domain != MLX5DR_DOMAIN_TYPE_FDB)
1026 		return false;
1027 
1028 	if (mask->misc.source_port) {
1029 		if (rx && value->misc.source_port != WIRE_PORT)
1030 			return true;
1031 
1032 		if (!rx && value->misc.source_port == WIRE_PORT)
1033 			return true;
1034 	}
1035 
1036 	if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
1037 		return true;
1038 
1039 	if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
1040 		return true;
1041 
1042 	return false;
1043 }
1044 
1045 static int
1046 dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1047 			struct mlx5dr_rule_rx_tx *nic_rule,
1048 			struct mlx5dr_match_param *param,
1049 			size_t num_actions,
1050 			struct mlx5dr_action *actions[])
1051 {
1052 	struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
1053 	struct mlx5dr_matcher *matcher = rule->matcher;
1054 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1055 	struct mlx5dr_matcher_rx_tx *nic_matcher;
1056 	struct mlx5dr_domain_rx_tx *nic_dmn;
1057 	struct mlx5dr_ste_htbl *htbl = NULL;
1058 	struct mlx5dr_ste_htbl *cur_htbl;
1059 	struct mlx5dr_ste *ste = NULL;
1060 	LIST_HEAD(send_ste_list);
1061 	u8 *hw_ste_arr = NULL;
1062 	u32 new_hw_ste_arr_sz;
1063 	int ret, i;
1064 
1065 	nic_matcher = nic_rule->nic_matcher;
1066 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
1067 
1068 	INIT_LIST_HEAD(&nic_rule->rule_members_list);
1069 
1070 	if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param,
1071 			 rule->flow_source))
1072 		return 0;
1073 
1074 	hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
1075 	if (!hw_ste_arr)
1076 		return -ENOMEM;
1077 
1078 	mlx5dr_domain_nic_lock(nic_dmn);
1079 
1080 	ret = mlx5dr_matcher_select_builders(matcher,
1081 					     nic_matcher,
1082 					     dr_rule_get_ipv(&param->outer),
1083 					     dr_rule_get_ipv(&param->inner));
1084 	if (ret)
1085 		goto free_hw_ste;
1086 
1087 	/* Set the tag values inside the ste array */
1088 	ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
1089 	if (ret)
1090 		goto free_hw_ste;
1091 
1092 	/* Set the actions values/addresses inside the ste array */
1093 	ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
1094 					   num_actions, hw_ste_arr,
1095 					   &new_hw_ste_arr_sz);
1096 	if (ret)
1097 		goto free_hw_ste;
1098 
1099 	cur_htbl = nic_matcher->s_htbl;
1100 
1101 	/* Go over the array of STEs, and build dr_ste accordingly.
1102 	 * The loop is over only the builders which are equal or less to the
1103 	 * number of stes, in case we have actions that lives in other stes.
1104 	 */
1105 	for (i = 0; i < nic_matcher->num_of_builders; i++) {
1106 		/* Calculate CRC and keep new ste entry */
1107 		u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
1108 
1109 		ste = dr_rule_handle_ste_branch(rule,
1110 						nic_rule,
1111 						&send_ste_list,
1112 						cur_htbl,
1113 						cur_hw_ste_ent,
1114 						i + 1,
1115 						&htbl);
1116 		if (!ste) {
1117 			mlx5dr_err(dmn, "Failed creating next branch\n");
1118 			ret = -ENOENT;
1119 			goto free_rule;
1120 		}
1121 
1122 		cur_htbl = ste->next_htbl;
1123 
1124 		/* Keep all STEs in the rule struct */
1125 		ret = dr_rule_add_member(nic_rule, ste);
1126 		if (ret) {
1127 			mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i);
1128 			goto free_ste;
1129 		}
1130 
1131 		mlx5dr_ste_get(ste);
1132 	}
1133 
1134 	/* Connect actions */
1135 	ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
1136 					 ste, hw_ste_arr, new_hw_ste_arr_sz);
1137 	if (ret) {
1138 		mlx5dr_dbg(dmn, "Failed apply actions\n");
1139 		goto free_rule;
1140 	}
1141 	ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
1142 	if (ret) {
1143 		mlx5dr_err(dmn, "Failed sending ste!\n");
1144 		goto free_rule;
1145 	}
1146 
1147 	if (htbl)
1148 		mlx5dr_htbl_put(htbl);
1149 
1150 	mlx5dr_domain_nic_unlock(nic_dmn);
1151 
1152 	kfree(hw_ste_arr);
1153 
1154 	return 0;
1155 
1156 free_ste:
1157 	mlx5dr_ste_put(ste, matcher, nic_matcher);
1158 free_rule:
1159 	dr_rule_clean_rule_members(rule, nic_rule);
1160 	/* Clean all ste_info's */
1161 	list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
1162 		list_del(&ste_info->send_list);
1163 		kfree(ste_info);
1164 	}
1165 free_hw_ste:
1166 	mlx5dr_domain_nic_unlock(nic_dmn);
1167 	kfree(hw_ste_arr);
1168 	return ret;
1169 }
1170 
1171 static int
1172 dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
1173 			struct mlx5dr_match_param *param,
1174 			size_t num_actions,
1175 			struct mlx5dr_action *actions[])
1176 {
1177 	struct mlx5dr_match_param copy_param = {};
1178 	int ret;
1179 
1180 	/* Copy match_param since they will be consumed during the first
1181 	 * nic_rule insertion.
1182 	 */
1183 	memcpy(&copy_param, param, sizeof(struct mlx5dr_match_param));
1184 
1185 	ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
1186 				      num_actions, actions);
1187 	if (ret)
1188 		return ret;
1189 
1190 	ret = dr_rule_create_rule_nic(rule, &rule->tx, &copy_param,
1191 				      num_actions, actions);
1192 	if (ret)
1193 		goto destroy_rule_nic_rx;
1194 
1195 	return 0;
1196 
1197 destroy_rule_nic_rx:
1198 	dr_rule_destroy_rule_nic(rule, &rule->rx);
1199 	return ret;
1200 }
1201 
1202 static struct mlx5dr_rule *
1203 dr_rule_create_rule(struct mlx5dr_matcher *matcher,
1204 		    struct mlx5dr_match_parameters *value,
1205 		    size_t num_actions,
1206 		    struct mlx5dr_action *actions[],
1207 		    u32 flow_source)
1208 {
1209 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1210 	struct mlx5dr_match_param param = {};
1211 	struct mlx5dr_rule *rule;
1212 	int ret;
1213 
1214 	if (!dr_rule_verify(matcher, value, &param))
1215 		return NULL;
1216 
1217 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1218 	if (!rule)
1219 		return NULL;
1220 
1221 	rule->matcher = matcher;
1222 	rule->flow_source = flow_source;
1223 	INIT_LIST_HEAD(&rule->rule_actions_list);
1224 
1225 	ret = dr_rule_add_action_members(rule, num_actions, actions);
1226 	if (ret)
1227 		goto free_rule;
1228 
1229 	switch (dmn->type) {
1230 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
1231 		rule->rx.nic_matcher = &matcher->rx;
1232 		ret = dr_rule_create_rule_nic(rule, &rule->rx, &param,
1233 					      num_actions, actions);
1234 		break;
1235 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
1236 		rule->tx.nic_matcher = &matcher->tx;
1237 		ret = dr_rule_create_rule_nic(rule, &rule->tx, &param,
1238 					      num_actions, actions);
1239 		break;
1240 	case MLX5DR_DOMAIN_TYPE_FDB:
1241 		rule->rx.nic_matcher = &matcher->rx;
1242 		rule->tx.nic_matcher = &matcher->tx;
1243 		ret = dr_rule_create_rule_fdb(rule, &param,
1244 					      num_actions, actions);
1245 		break;
1246 	default:
1247 		ret = -EINVAL;
1248 		break;
1249 	}
1250 
1251 	if (ret)
1252 		goto remove_action_members;
1253 
1254 	return rule;
1255 
1256 remove_action_members:
1257 	dr_rule_remove_action_members(rule);
1258 free_rule:
1259 	kfree(rule);
1260 	mlx5dr_err(dmn, "Failed creating rule\n");
1261 	return NULL;
1262 }
1263 
1264 struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
1265 				       struct mlx5dr_match_parameters *value,
1266 				       size_t num_actions,
1267 				       struct mlx5dr_action *actions[],
1268 				       u32 flow_source)
1269 {
1270 	struct mlx5dr_rule *rule;
1271 
1272 	refcount_inc(&matcher->refcount);
1273 
1274 	rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
1275 	if (!rule)
1276 		refcount_dec(&matcher->refcount);
1277 
1278 	return rule;
1279 }
1280 
1281 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
1282 {
1283 	struct mlx5dr_matcher *matcher = rule->matcher;
1284 	int ret;
1285 
1286 	ret = dr_rule_destroy_rule(rule);
1287 	if (!ret)
1288 		refcount_dec(&matcher->refcount);
1289 
1290 	return ret;
1291 }
1292