1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN < 2048)
7 /* don't try to optimize STE allocation if the stack is too constaraining */
8 #define DR_RULE_MAX_STES_OPTIMIZED 0
9 #else
10 #define DR_RULE_MAX_STES_OPTIMIZED 5
11 #endif
12 #define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
13 
dr_rule_append_to_miss_list(struct mlx5dr_domain * dmn,enum mlx5dr_domain_nic_type nic_type,struct mlx5dr_ste * new_last_ste,struct list_head * miss_list,struct list_head * send_list)14 static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
15 				       enum mlx5dr_domain_nic_type nic_type,
16 				       struct mlx5dr_ste *new_last_ste,
17 				       struct list_head *miss_list,
18 				       struct list_head *send_list)
19 {
20 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
21 	struct mlx5dr_ste_send_info *ste_info_last;
22 	struct mlx5dr_ste *last_ste;
23 
24 	/* The new entry will be inserted after the last */
25 	last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
26 	WARN_ON(!last_ste);
27 
28 	ste_info_last = mlx5dr_send_info_alloc(dmn, nic_type);
29 	if (!ste_info_last)
30 		return -ENOMEM;
31 
32 	mlx5dr_ste_set_miss_addr(ste_ctx, mlx5dr_ste_get_hw_ste(last_ste),
33 				 mlx5dr_ste_get_icm_addr(new_last_ste));
34 	list_add_tail(&new_last_ste->miss_list_node, miss_list);
35 
36 	mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
37 						  0, mlx5dr_ste_get_hw_ste(last_ste),
38 						  ste_info_last, send_list, true);
39 
40 	return 0;
41 }
42 
dr_rule_set_last_ste_miss_addr(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,u8 * hw_ste)43 static void dr_rule_set_last_ste_miss_addr(struct mlx5dr_matcher *matcher,
44 					   struct mlx5dr_matcher_rx_tx *nic_matcher,
45 					   u8 *hw_ste)
46 {
47 	struct mlx5dr_ste_ctx *ste_ctx = matcher->tbl->dmn->ste_ctx;
48 	u64 icm_addr;
49 
50 	if (mlx5dr_ste_is_miss_addr_set(ste_ctx, hw_ste))
51 		return;
52 
53 	icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
54 	mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
55 }
56 
57 static struct mlx5dr_ste *
dr_rule_create_collision_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,u8 * hw_ste)58 dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
59 			      struct mlx5dr_matcher_rx_tx *nic_matcher,
60 			      u8 *hw_ste)
61 {
62 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
63 	struct mlx5dr_ste_htbl *new_htbl;
64 	struct mlx5dr_ste *ste;
65 
66 	/* Create new table for miss entry */
67 	new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
68 					 DR_CHUNK_SIZE_1,
69 					 MLX5DR_STE_LU_TYPE_DONT_CARE,
70 					 0);
71 	if (!new_htbl) {
72 		mlx5dr_dbg(dmn, "Failed allocating collision table\n");
73 		return NULL;
74 	}
75 
76 	/* One and only entry, never grows */
77 	ste = new_htbl->chunk->ste_arr;
78 	dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
79 	mlx5dr_htbl_get(new_htbl);
80 
81 	return ste;
82 }
83 
84 static struct mlx5dr_ste *
dr_rule_create_collision_entry(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,u8 * hw_ste,struct mlx5dr_ste * orig_ste)85 dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
86 			       struct mlx5dr_matcher_rx_tx *nic_matcher,
87 			       u8 *hw_ste,
88 			       struct mlx5dr_ste *orig_ste)
89 {
90 	struct mlx5dr_ste *ste;
91 
92 	ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
93 	if (!ste) {
94 		mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
95 		return NULL;
96 	}
97 
98 	ste->ste_chain_location = orig_ste->ste_chain_location;
99 	ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
100 
101 	/* In collision entry, all members share the same miss_list_head */
102 	ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
103 
104 	/* Next table */
105 	if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
106 					DR_CHUNK_SIZE_1)) {
107 		mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
108 		goto free_tbl;
109 	}
110 
111 	return ste;
112 
113 free_tbl:
114 	mlx5dr_ste_free(ste, matcher, nic_matcher);
115 	return NULL;
116 }
117 
118 static int
dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info * ste_info,struct mlx5dr_domain * dmn)119 dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
120 				      struct mlx5dr_domain *dmn)
121 {
122 	int ret;
123 
124 	list_del(&ste_info->send_list);
125 
126 	/* Copy data to ste, only reduced size or control, the last 16B (mask)
127 	 * is already written to the hw.
128 	 */
129 	if (ste_info->size == DR_STE_SIZE_CTRL)
130 		memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
131 		       ste_info->data, DR_STE_SIZE_CTRL);
132 	else
133 		memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
134 		       ste_info->data, DR_STE_SIZE_REDUCED);
135 
136 	ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
137 				       ste_info->size, ste_info->offset);
138 	if (ret)
139 		goto out;
140 
141 out:
142 	mlx5dr_send_info_free(ste_info);
143 	return ret;
144 }
145 
dr_rule_send_update_list(struct list_head * send_ste_list,struct mlx5dr_domain * dmn,bool is_reverse)146 static int dr_rule_send_update_list(struct list_head *send_ste_list,
147 				    struct mlx5dr_domain *dmn,
148 				    bool is_reverse)
149 {
150 	struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
151 	int ret;
152 
153 	if (is_reverse) {
154 		list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
155 						 send_ste_list, send_list) {
156 			ret = dr_rule_handle_one_ste_in_update_list(ste_info,
157 								    dmn);
158 			if (ret)
159 				return ret;
160 		}
161 	} else {
162 		list_for_each_entry_safe(ste_info, tmp_ste_info,
163 					 send_ste_list, send_list) {
164 			ret = dr_rule_handle_one_ste_in_update_list(ste_info,
165 								    dmn);
166 			if (ret)
167 				return ret;
168 		}
169 	}
170 
171 	return 0;
172 }
173 
174 static struct mlx5dr_ste *
dr_rule_find_ste_in_miss_list(struct list_head * miss_list,u8 * hw_ste)175 dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
176 {
177 	struct mlx5dr_ste *ste;
178 
179 	if (list_empty(miss_list))
180 		return NULL;
181 
182 	/* Check if hw_ste is present in the list */
183 	list_for_each_entry(ste, miss_list, miss_list_node) {
184 		if (mlx5dr_ste_equal_tag(mlx5dr_ste_get_hw_ste(ste), hw_ste))
185 			return ste;
186 	}
187 
188 	return NULL;
189 }
190 
191 static struct mlx5dr_ste *
dr_rule_rehash_handle_collision(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct list_head * update_list,struct mlx5dr_ste * col_ste,u8 * hw_ste)192 dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
193 				struct mlx5dr_matcher_rx_tx *nic_matcher,
194 				struct list_head *update_list,
195 				struct mlx5dr_ste *col_ste,
196 				u8 *hw_ste)
197 {
198 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
199 	struct mlx5dr_ste *new_ste;
200 	int ret;
201 
202 	new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
203 	if (!new_ste)
204 		return NULL;
205 
206 	/* Update collision pointing STE */
207 	new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
208 
209 	/* In collision entry, all members share the same miss_list_head */
210 	new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
211 
212 	/* Update the previous from the list */
213 	ret = dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
214 					  new_ste, mlx5dr_ste_get_miss_list(col_ste),
215 					  update_list);
216 	if (ret) {
217 		mlx5dr_dbg(dmn, "Failed update dup entry\n");
218 		goto err_exit;
219 	}
220 
221 	return new_ste;
222 
223 err_exit:
224 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
225 	return NULL;
226 }
227 
dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * cur_ste,struct mlx5dr_ste * new_ste)228 static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
229 					 struct mlx5dr_matcher_rx_tx *nic_matcher,
230 					 struct mlx5dr_ste *cur_ste,
231 					 struct mlx5dr_ste *new_ste)
232 {
233 	new_ste->next_htbl = cur_ste->next_htbl;
234 	new_ste->ste_chain_location = cur_ste->ste_chain_location;
235 
236 	if (new_ste->next_htbl)
237 		new_ste->next_htbl->pointing_ste = new_ste;
238 
239 	/* We need to copy the refcount since this ste
240 	 * may have been traversed several times
241 	 */
242 	new_ste->refcount = cur_ste->refcount;
243 
244 	/* Link old STEs rule to the new ste */
245 	mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false);
246 }
247 
248 static struct mlx5dr_ste *
dr_rule_rehash_copy_ste(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * cur_ste,struct mlx5dr_ste_htbl * new_htbl,struct list_head * update_list)249 dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
250 			struct mlx5dr_matcher_rx_tx *nic_matcher,
251 			struct mlx5dr_ste *cur_ste,
252 			struct mlx5dr_ste_htbl *new_htbl,
253 			struct list_head *update_list)
254 {
255 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
256 	struct mlx5dr_ste_send_info *ste_info;
257 	bool use_update_list = false;
258 	u8 hw_ste[DR_STE_SIZE] = {};
259 	struct mlx5dr_ste *new_ste;
260 	int new_idx;
261 	u8 sb_idx;
262 
263 	/* Copy STE mask from the matcher */
264 	sb_idx = cur_ste->ste_chain_location - 1;
265 	mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
266 
267 	/* Copy STE control and tag */
268 	memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
269 	dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
270 
271 	new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
272 	new_ste = &new_htbl->chunk->ste_arr[new_idx];
273 
274 	if (mlx5dr_ste_is_not_used(new_ste)) {
275 		mlx5dr_htbl_get(new_htbl);
276 		list_add_tail(&new_ste->miss_list_node,
277 			      mlx5dr_ste_get_miss_list(new_ste));
278 	} else {
279 		new_ste = dr_rule_rehash_handle_collision(matcher,
280 							  nic_matcher,
281 							  update_list,
282 							  new_ste,
283 							  hw_ste);
284 		if (!new_ste) {
285 			mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
286 				   new_idx);
287 			return NULL;
288 		}
289 		new_htbl->ctrl.num_of_collisions++;
290 		use_update_list = true;
291 	}
292 
293 	memcpy(mlx5dr_ste_get_hw_ste(new_ste), hw_ste, DR_STE_SIZE_REDUCED);
294 
295 	new_htbl->ctrl.num_of_valid_entries++;
296 
297 	if (use_update_list) {
298 		ste_info = mlx5dr_send_info_alloc(dmn,
299 						  nic_matcher->nic_tbl->nic_dmn->type);
300 		if (!ste_info)
301 			goto err_exit;
302 
303 		mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
304 							  hw_ste, ste_info,
305 							  update_list, true);
306 	}
307 
308 	dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
309 
310 	return new_ste;
311 
312 err_exit:
313 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
314 	return NULL;
315 }
316 
dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct list_head * cur_miss_list,struct mlx5dr_ste_htbl * new_htbl,struct list_head * update_list)317 static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
318 					 struct mlx5dr_matcher_rx_tx *nic_matcher,
319 					 struct list_head *cur_miss_list,
320 					 struct mlx5dr_ste_htbl *new_htbl,
321 					 struct list_head *update_list)
322 {
323 	struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
324 
325 	if (list_empty(cur_miss_list))
326 		return 0;
327 
328 	list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
329 		new_ste = dr_rule_rehash_copy_ste(matcher,
330 						  nic_matcher,
331 						  cur_ste,
332 						  new_htbl,
333 						  update_list);
334 		if (!new_ste)
335 			goto err_insert;
336 
337 		list_del(&cur_ste->miss_list_node);
338 		mlx5dr_htbl_put(cur_ste->htbl);
339 	}
340 	return 0;
341 
342 err_insert:
343 	mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
344 	WARN_ON(true);
345 	return -EINVAL;
346 }
347 
dr_rule_rehash_copy_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_htbl * cur_htbl,struct mlx5dr_ste_htbl * new_htbl,struct list_head * update_list)348 static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
349 				    struct mlx5dr_matcher_rx_tx *nic_matcher,
350 				    struct mlx5dr_ste_htbl *cur_htbl,
351 				    struct mlx5dr_ste_htbl *new_htbl,
352 				    struct list_head *update_list)
353 {
354 	struct mlx5dr_ste *cur_ste;
355 	int cur_entries;
356 	int err = 0;
357 	int i;
358 
359 	cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
360 
361 	if (cur_entries < 1) {
362 		mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
363 		return -EINVAL;
364 	}
365 
366 	for (i = 0; i < cur_entries; i++) {
367 		cur_ste = &cur_htbl->chunk->ste_arr[i];
368 		if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
369 			continue;
370 
371 		err = dr_rule_rehash_copy_miss_list(matcher,
372 						    nic_matcher,
373 						    mlx5dr_ste_get_miss_list(cur_ste),
374 						    new_htbl,
375 						    update_list);
376 		if (err)
377 			goto clean_copy;
378 
379 		/* In order to decrease the number of allocated ste_send_info
380 		 * structs, send the current table row now.
381 		 */
382 		err = dr_rule_send_update_list(update_list, matcher->tbl->dmn, false);
383 		if (err) {
384 			mlx5dr_dbg(matcher->tbl->dmn, "Failed updating table to HW\n");
385 			goto clean_copy;
386 		}
387 	}
388 
389 clean_copy:
390 	return err;
391 }
392 
393 static struct mlx5dr_ste_htbl *
dr_rule_rehash_htbl(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_ste_htbl * cur_htbl,u8 ste_location,struct list_head * update_list,enum mlx5dr_icm_chunk_size new_size)394 dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
395 		    struct mlx5dr_rule_rx_tx *nic_rule,
396 		    struct mlx5dr_ste_htbl *cur_htbl,
397 		    u8 ste_location,
398 		    struct list_head *update_list,
399 		    enum mlx5dr_icm_chunk_size new_size)
400 {
401 	struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
402 	struct mlx5dr_matcher *matcher = rule->matcher;
403 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
404 	struct mlx5dr_matcher_rx_tx *nic_matcher;
405 	struct mlx5dr_ste_send_info *ste_info;
406 	struct mlx5dr_htbl_connect_info info;
407 	struct mlx5dr_domain_rx_tx *nic_dmn;
408 	u8 formatted_ste[DR_STE_SIZE] = {};
409 	LIST_HEAD(rehash_table_send_list);
410 	struct mlx5dr_ste *ste_to_update;
411 	struct mlx5dr_ste_htbl *new_htbl;
412 	int err;
413 
414 	nic_matcher = nic_rule->nic_matcher;
415 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
416 
417 	ste_info = mlx5dr_send_info_alloc(dmn,
418 					  nic_matcher->nic_tbl->nic_dmn->type);
419 	if (!ste_info)
420 		return NULL;
421 
422 	new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
423 					 new_size,
424 					 cur_htbl->lu_type,
425 					 cur_htbl->byte_mask);
426 	if (!new_htbl) {
427 		mlx5dr_err(dmn, "Failed to allocate new hash table\n");
428 		goto free_ste_info;
429 	}
430 
431 	/* Write new table to HW */
432 	info.type = CONNECT_MISS;
433 	info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
434 	mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
435 				     dmn->info.caps.gvmi,
436 				     nic_dmn->type,
437 				     new_htbl,
438 				     formatted_ste,
439 				     &info);
440 
441 	new_htbl->pointing_ste = cur_htbl->pointing_ste;
442 	new_htbl->pointing_ste->next_htbl = new_htbl;
443 	err = dr_rule_rehash_copy_htbl(matcher,
444 				       nic_matcher,
445 				       cur_htbl,
446 				       new_htbl,
447 				       &rehash_table_send_list);
448 	if (err)
449 		goto free_new_htbl;
450 
451 	if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
452 				      nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
453 		mlx5dr_err(dmn, "Failed writing table to HW\n");
454 		goto free_new_htbl;
455 	}
456 
457 	/* Writing to the hw is done in regular order of rehash_table_send_list,
458 	 * in order to have the origin data written before the miss address of
459 	 * collision entries, if exists.
460 	 */
461 	if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
462 		mlx5dr_err(dmn, "Failed updating table to HW\n");
463 		goto free_ste_list;
464 	}
465 
466 	/* Connect previous hash table to current */
467 	if (ste_location == 1) {
468 		/* The previous table is an anchor, anchors size is always one STE */
469 		struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
470 
471 		/* On matcher s_anchor we keep an extra refcount */
472 		mlx5dr_htbl_get(new_htbl);
473 		mlx5dr_htbl_put(cur_htbl);
474 
475 		nic_matcher->s_htbl = new_htbl;
476 
477 		/* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
478 		 * (48B len) which works only on first 32B
479 		 */
480 		mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
481 					prev_htbl->chunk->hw_ste_arr,
482 					mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
483 					mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
484 
485 		ste_to_update = &prev_htbl->chunk->ste_arr[0];
486 	} else {
487 		mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
488 						     mlx5dr_ste_get_hw_ste(cur_htbl->pointing_ste),
489 						     new_htbl);
490 		ste_to_update = cur_htbl->pointing_ste;
491 	}
492 
493 	mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
494 						  0, mlx5dr_ste_get_hw_ste(ste_to_update),
495 						  ste_info, update_list, false);
496 
497 	return new_htbl;
498 
499 free_ste_list:
500 	/* Clean all ste_info's from the new table */
501 	list_for_each_entry_safe(del_ste_info, tmp_ste_info,
502 				 &rehash_table_send_list, send_list) {
503 		list_del(&del_ste_info->send_list);
504 		mlx5dr_send_info_free(del_ste_info);
505 	}
506 
507 free_new_htbl:
508 	mlx5dr_ste_htbl_free(new_htbl);
509 free_ste_info:
510 	mlx5dr_send_info_free(ste_info);
511 	mlx5dr_info(dmn, "Failed creating rehash table\n");
512 	return NULL;
513 }
514 
dr_rule_rehash(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_ste_htbl * cur_htbl,u8 ste_location,struct list_head * update_list)515 static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
516 					      struct mlx5dr_rule_rx_tx *nic_rule,
517 					      struct mlx5dr_ste_htbl *cur_htbl,
518 					      u8 ste_location,
519 					      struct list_head *update_list)
520 {
521 	struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
522 	enum mlx5dr_icm_chunk_size new_size;
523 
524 	new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
525 	new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
526 
527 	if (new_size == cur_htbl->chunk->size)
528 		return NULL; /* Skip rehash, we already at the max size */
529 
530 	return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
531 				   update_list, new_size);
532 }
533 
534 static struct mlx5dr_ste *
dr_rule_handle_collision(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,u8 * hw_ste,struct list_head * miss_list,struct list_head * send_list)535 dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
536 			 struct mlx5dr_matcher_rx_tx *nic_matcher,
537 			 struct mlx5dr_ste *ste,
538 			 u8 *hw_ste,
539 			 struct list_head *miss_list,
540 			 struct list_head *send_list)
541 {
542 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
543 	struct mlx5dr_ste_send_info *ste_info;
544 	struct mlx5dr_ste *new_ste;
545 
546 	ste_info = mlx5dr_send_info_alloc(dmn,
547 					  nic_matcher->nic_tbl->nic_dmn->type);
548 	if (!ste_info)
549 		return NULL;
550 
551 	new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
552 	if (!new_ste)
553 		goto free_send_info;
554 
555 	if (dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
556 					new_ste, miss_list, send_list)) {
557 		mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
558 		goto err_exit;
559 	}
560 
561 	mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
562 						  ste_info, send_list, false);
563 
564 	ste->htbl->ctrl.num_of_collisions++;
565 	ste->htbl->ctrl.num_of_valid_entries++;
566 
567 	return new_ste;
568 
569 err_exit:
570 	mlx5dr_ste_free(new_ste, matcher, nic_matcher);
571 free_send_info:
572 	mlx5dr_send_info_free(ste_info);
573 	return NULL;
574 }
575 
dr_rule_remove_action_members(struct mlx5dr_rule * rule)576 static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
577 {
578 	struct mlx5dr_rule_action_member *action_mem;
579 	struct mlx5dr_rule_action_member *tmp;
580 
581 	list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
582 		list_del(&action_mem->list);
583 		refcount_dec(&action_mem->action->refcount);
584 		kvfree(action_mem);
585 	}
586 }
587 
dr_rule_add_action_members(struct mlx5dr_rule * rule,size_t num_actions,struct mlx5dr_action * actions[])588 static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
589 				      size_t num_actions,
590 				      struct mlx5dr_action *actions[])
591 {
592 	struct mlx5dr_rule_action_member *action_mem;
593 	int i;
594 
595 	for (i = 0; i < num_actions; i++) {
596 		action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
597 		if (!action_mem)
598 			goto free_action_members;
599 
600 		action_mem->action = actions[i];
601 		INIT_LIST_HEAD(&action_mem->list);
602 		list_add_tail(&action_mem->list, &rule->rule_actions_list);
603 		refcount_inc(&action_mem->action->refcount);
604 	}
605 
606 	return 0;
607 
608 free_action_members:
609 	dr_rule_remove_action_members(rule);
610 	return -ENOMEM;
611 }
612 
mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_ste * ste,bool force)613 void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
614 				 struct mlx5dr_ste *ste,
615 				 bool force)
616 {
617 	/* Update rule member is usually done for the last STE or during rule
618 	 * creation to recover from mid-creation failure (for this peruse the
619 	 * force flag is used)
620 	 */
621 	if (ste->next_htbl && !force)
622 		return;
623 
624 	/* Update is required since each rule keeps track of its last STE */
625 	ste->rule_rx_tx = nic_rule;
626 	nic_rule->last_rule_ste = ste;
627 }
628 
dr_rule_get_pointed_ste(struct mlx5dr_ste * curr_ste)629 static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste)
630 {
631 	struct mlx5dr_ste *first_ste;
632 
633 	first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste),
634 				     struct mlx5dr_ste, miss_list_node);
635 
636 	return first_ste->htbl->pointing_ste;
637 }
638 
mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste ** ste_arr,struct mlx5dr_ste * curr_ste,int * num_of_stes)639 int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
640 					 struct mlx5dr_ste *curr_ste,
641 					 int *num_of_stes)
642 {
643 	bool first = false;
644 
645 	*num_of_stes = 0;
646 
647 	if (!curr_ste)
648 		return -ENOENT;
649 
650 	/* Iterate from last to first */
651 	while (!first) {
652 		first = curr_ste->ste_chain_location == 1;
653 		ste_arr[*num_of_stes] = curr_ste;
654 		*num_of_stes += 1;
655 		curr_ste = dr_rule_get_pointed_ste(curr_ste);
656 	}
657 
658 	return 0;
659 }
660 
dr_rule_clean_rule_members(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule)661 static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
662 				       struct mlx5dr_rule_rx_tx *nic_rule)
663 {
664 	struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
665 	struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste;
666 	int i;
667 
668 	if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
669 		return;
670 
671 	while (i--)
672 		mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher);
673 }
674 
dr_get_bits_per_mask(u16 byte_mask)675 static u16 dr_get_bits_per_mask(u16 byte_mask)
676 {
677 	u16 bits = 0;
678 
679 	while (byte_mask) {
680 		byte_mask = byte_mask & (byte_mask - 1);
681 		bits++;
682 	}
683 
684 	return bits;
685 }
686 
dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl * htbl,struct mlx5dr_domain * dmn,struct mlx5dr_domain_rx_tx * nic_dmn)687 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
688 				      struct mlx5dr_domain *dmn,
689 				      struct mlx5dr_domain_rx_tx *nic_dmn)
690 {
691 	struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
692 	int threshold;
693 
694 	if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
695 		return false;
696 
697 	if (!mlx5dr_ste_htbl_may_grow(htbl))
698 		return false;
699 
700 	if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
701 		return false;
702 
703 	threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
704 	if (ctrl->num_of_collisions >= threshold &&
705 	    (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold)
706 		return true;
707 
708 	return false;
709 }
710 
dr_rule_handle_action_stes(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct list_head * send_ste_list,struct mlx5dr_ste * last_ste,u8 * hw_ste_arr,u32 new_hw_ste_arr_sz)711 static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
712 				      struct mlx5dr_rule_rx_tx *nic_rule,
713 				      struct list_head *send_ste_list,
714 				      struct mlx5dr_ste *last_ste,
715 				      u8 *hw_ste_arr,
716 				      u32 new_hw_ste_arr_sz)
717 {
718 	struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
719 	struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
720 	u8 num_of_builders = nic_matcher->num_of_builders;
721 	struct mlx5dr_matcher *matcher = rule->matcher;
722 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
723 	u8 *curr_hw_ste, *prev_hw_ste;
724 	struct mlx5dr_ste *action_ste;
725 	int i, k;
726 
727 	/* Two cases:
728 	 * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
729 	 * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
730 	 *    to support the action.
731 	 */
732 
733 	for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
734 		curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
735 		prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
736 		action_ste = dr_rule_create_collision_htbl(matcher,
737 							   nic_matcher,
738 							   curr_hw_ste);
739 		if (!action_ste)
740 			return -ENOMEM;
741 
742 		mlx5dr_ste_get(action_ste);
743 
744 		action_ste->htbl->pointing_ste = last_ste;
745 		last_ste->next_htbl = action_ste->htbl;
746 		last_ste = action_ste;
747 
748 		/* While free ste we go over the miss list, so add this ste to the list */
749 		list_add_tail(&action_ste->miss_list_node,
750 			      mlx5dr_ste_get_miss_list(action_ste));
751 
752 		ste_info_arr[k] = mlx5dr_send_info_alloc(dmn,
753 							 nic_matcher->nic_tbl->nic_dmn->type);
754 		if (!ste_info_arr[k])
755 			goto err_exit;
756 
757 		/* Point current ste to the new action */
758 		mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
759 						     prev_hw_ste,
760 						     action_ste->htbl);
761 
762 		mlx5dr_rule_set_last_member(nic_rule, action_ste, true);
763 
764 		mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
765 							  curr_hw_ste,
766 							  ste_info_arr[k],
767 							  send_ste_list, false);
768 	}
769 
770 	last_ste->next_htbl = NULL;
771 
772 	return 0;
773 
774 err_exit:
775 	mlx5dr_ste_put(action_ste, matcher, nic_matcher);
776 	return -ENOMEM;
777 }
778 
dr_rule_handle_empty_entry(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_htbl * cur_htbl,struct mlx5dr_ste * ste,u8 ste_location,u8 * hw_ste,struct list_head * miss_list,struct list_head * send_list)779 static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
780 				      struct mlx5dr_matcher_rx_tx *nic_matcher,
781 				      struct mlx5dr_ste_htbl *cur_htbl,
782 				      struct mlx5dr_ste *ste,
783 				      u8 ste_location,
784 				      u8 *hw_ste,
785 				      struct list_head *miss_list,
786 				      struct list_head *send_list)
787 {
788 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
789 	struct mlx5dr_ste_send_info *ste_info;
790 
791 	/* Take ref on table, only on first time this ste is used */
792 	mlx5dr_htbl_get(cur_htbl);
793 
794 	/* new entry -> new branch */
795 	list_add_tail(&ste->miss_list_node, miss_list);
796 
797 	dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
798 
799 	ste->ste_chain_location = ste_location;
800 
801 	ste_info = mlx5dr_send_info_alloc(dmn,
802 					  nic_matcher->nic_tbl->nic_dmn->type);
803 	if (!ste_info)
804 		goto clean_ste_setting;
805 
806 	if (mlx5dr_ste_create_next_htbl(matcher,
807 					nic_matcher,
808 					ste,
809 					hw_ste,
810 					DR_CHUNK_SIZE_1)) {
811 		mlx5dr_dbg(dmn, "Failed allocating table\n");
812 		goto clean_ste_info;
813 	}
814 
815 	cur_htbl->ctrl.num_of_valid_entries++;
816 
817 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
818 						  ste_info, send_list, false);
819 
820 	return 0;
821 
822 clean_ste_info:
823 	mlx5dr_send_info_free(ste_info);
824 clean_ste_setting:
825 	list_del_init(&ste->miss_list_node);
826 	mlx5dr_htbl_put(cur_htbl);
827 
828 	return -ENOMEM;
829 }
830 
831 static struct mlx5dr_ste *
dr_rule_handle_ste_branch(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * cur_htbl,u8 * hw_ste,u8 ste_location,struct mlx5dr_ste_htbl ** put_htbl)832 dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
833 			  struct mlx5dr_rule_rx_tx *nic_rule,
834 			  struct list_head *send_ste_list,
835 			  struct mlx5dr_ste_htbl *cur_htbl,
836 			  u8 *hw_ste,
837 			  u8 ste_location,
838 			  struct mlx5dr_ste_htbl **put_htbl)
839 {
840 	struct mlx5dr_matcher *matcher = rule->matcher;
841 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
842 	struct mlx5dr_matcher_rx_tx *nic_matcher;
843 	struct mlx5dr_domain_rx_tx *nic_dmn;
844 	struct mlx5dr_ste_htbl *new_htbl;
845 	struct mlx5dr_ste *matched_ste;
846 	struct list_head *miss_list;
847 	bool skip_rehash = false;
848 	struct mlx5dr_ste *ste;
849 	int index;
850 
851 	nic_matcher = nic_rule->nic_matcher;
852 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
853 
854 again:
855 	index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
856 	miss_list = &cur_htbl->chunk->miss_list[index];
857 	ste = &cur_htbl->chunk->ste_arr[index];
858 
859 	if (mlx5dr_ste_is_not_used(ste)) {
860 		if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
861 					       ste, ste_location,
862 					       hw_ste, miss_list,
863 					       send_ste_list))
864 			return NULL;
865 	} else {
866 		/* Hash table index in use, check if this ste is in the miss list */
867 		matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
868 		if (matched_ste) {
869 			/* If it is last STE in the chain, and has the same tag
870 			 * it means that all the previous stes are the same,
871 			 * if so, this rule is duplicated.
872 			 */
873 			if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
874 				return matched_ste;
875 
876 			mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
877 		}
878 
879 		if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
880 			/* Hash table index in use, try to resize of the hash */
881 			skip_rehash = true;
882 
883 			/* Hold the table till we update.
884 			 * Release in dr_rule_create_rule()
885 			 */
886 			*put_htbl = cur_htbl;
887 			mlx5dr_htbl_get(cur_htbl);
888 
889 			new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
890 						  ste_location, send_ste_list);
891 			if (!new_htbl) {
892 				mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
893 					   cur_htbl->chunk->size);
894 				mlx5dr_htbl_put(cur_htbl);
895 			} else {
896 				cur_htbl = new_htbl;
897 			}
898 			goto again;
899 		} else {
900 			/* Hash table index in use, add another collision (miss) */
901 			ste = dr_rule_handle_collision(matcher,
902 						       nic_matcher,
903 						       ste,
904 						       hw_ste,
905 						       miss_list,
906 						       send_ste_list);
907 			if (!ste) {
908 				mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
909 					   index);
910 				return NULL;
911 			}
912 		}
913 	}
914 	return ste;
915 }
916 
dr_rule_cmp_value_to_mask(u8 * mask,u8 * value,u32 s_idx,u32 e_idx)917 static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
918 				      u32 s_idx, u32 e_idx)
919 {
920 	u32 i;
921 
922 	for (i = s_idx; i < e_idx; i++) {
923 		if (value[i] & ~mask[i]) {
924 			pr_info("Rule parameters contains a value not specified by mask\n");
925 			return false;
926 		}
927 	}
928 	return true;
929 }
930 
dr_rule_verify(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * value,struct mlx5dr_match_param * param)931 static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
932 			   struct mlx5dr_match_parameters *value,
933 			   struct mlx5dr_match_param *param)
934 {
935 	u8 match_criteria = matcher->match_criteria;
936 	size_t value_size = value->match_sz;
937 	u8 *mask_p = (u8 *)&matcher->mask;
938 	u8 *param_p = (u8 *)param;
939 	u32 s_idx, e_idx;
940 
941 	if (!value_size ||
942 	    (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
943 		mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
944 		return false;
945 	}
946 
947 	mlx5dr_ste_copy_param(matcher->match_criteria, param, value, false);
948 
949 	if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
950 		s_idx = offsetof(struct mlx5dr_match_param, outer);
951 		e_idx = min(s_idx + sizeof(param->outer), value_size);
952 
953 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
954 			mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
955 			return false;
956 		}
957 	}
958 
959 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
960 		s_idx = offsetof(struct mlx5dr_match_param, misc);
961 		e_idx = min(s_idx + sizeof(param->misc), value_size);
962 
963 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
964 			mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
965 			return false;
966 		}
967 	}
968 
969 	if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
970 		s_idx = offsetof(struct mlx5dr_match_param, inner);
971 		e_idx = min(s_idx + sizeof(param->inner), value_size);
972 
973 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
974 			mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
975 			return false;
976 		}
977 	}
978 
979 	if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
980 		s_idx = offsetof(struct mlx5dr_match_param, misc2);
981 		e_idx = min(s_idx + sizeof(param->misc2), value_size);
982 
983 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
984 			mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
985 			return false;
986 		}
987 	}
988 
989 	if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
990 		s_idx = offsetof(struct mlx5dr_match_param, misc3);
991 		e_idx = min(s_idx + sizeof(param->misc3), value_size);
992 
993 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
994 			mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
995 			return false;
996 		}
997 	}
998 
999 	if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
1000 		s_idx = offsetof(struct mlx5dr_match_param, misc4);
1001 		e_idx = min(s_idx + sizeof(param->misc4), value_size);
1002 
1003 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
1004 			mlx5dr_err(matcher->tbl->dmn,
1005 				   "Rule misc4 parameters contains a value not specified by mask\n");
1006 			return false;
1007 		}
1008 	}
1009 
1010 	if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
1011 		s_idx = offsetof(struct mlx5dr_match_param, misc5);
1012 		e_idx = min(s_idx + sizeof(param->misc5), value_size);
1013 
1014 		if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
1015 			mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n");
1016 			return false;
1017 		}
1018 	}
1019 	return true;
1020 }
1021 
dr_rule_destroy_rule_nic(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule)1022 static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
1023 				    struct mlx5dr_rule_rx_tx *nic_rule)
1024 {
1025 	/* Check if this nic rule was actually created, or was it skipped
1026 	 * and only the other type of the RX/TX nic rule was created.
1027 	 */
1028 	if (!nic_rule->last_rule_ste)
1029 		return 0;
1030 
1031 	mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
1032 	dr_rule_clean_rule_members(rule, nic_rule);
1033 
1034 	nic_rule->nic_matcher->rules--;
1035 	if (!nic_rule->nic_matcher->rules)
1036 		mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn,
1037 						   nic_rule->nic_matcher);
1038 
1039 	mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
1040 
1041 	return 0;
1042 }
1043 
dr_rule_destroy_rule_fdb(struct mlx5dr_rule * rule)1044 static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
1045 {
1046 	dr_rule_destroy_rule_nic(rule, &rule->rx);
1047 	dr_rule_destroy_rule_nic(rule, &rule->tx);
1048 	return 0;
1049 }
1050 
dr_rule_destroy_rule(struct mlx5dr_rule * rule)1051 static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
1052 {
1053 	struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
1054 
1055 	mlx5dr_dbg_rule_del(rule);
1056 
1057 	switch (dmn->type) {
1058 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
1059 		dr_rule_destroy_rule_nic(rule, &rule->rx);
1060 		break;
1061 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
1062 		dr_rule_destroy_rule_nic(rule, &rule->tx);
1063 		break;
1064 	case MLX5DR_DOMAIN_TYPE_FDB:
1065 		dr_rule_destroy_rule_fdb(rule);
1066 		break;
1067 	default:
1068 		return -EINVAL;
1069 	}
1070 
1071 	dr_rule_remove_action_members(rule);
1072 	kfree(rule);
1073 	return 0;
1074 }
1075 
dr_rule_get_ipv(struct mlx5dr_match_spec * spec)1076 static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
1077 {
1078 	if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
1079 		return DR_RULE_IPV6;
1080 
1081 	return DR_RULE_IPV4;
1082 }
1083 
dr_rule_skip(enum mlx5dr_domain_type domain,enum mlx5dr_domain_nic_type nic_type,struct mlx5dr_match_param * mask,struct mlx5dr_match_param * value,u32 flow_source)1084 static bool dr_rule_skip(enum mlx5dr_domain_type domain,
1085 			 enum mlx5dr_domain_nic_type nic_type,
1086 			 struct mlx5dr_match_param *mask,
1087 			 struct mlx5dr_match_param *value,
1088 			 u32 flow_source)
1089 {
1090 	bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
1091 
1092 	if (domain != MLX5DR_DOMAIN_TYPE_FDB)
1093 		return false;
1094 
1095 	if (mask->misc.source_port) {
1096 		if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
1097 			return true;
1098 
1099 		if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
1100 			return true;
1101 	}
1102 
1103 	if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
1104 		return true;
1105 
1106 	if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
1107 		return true;
1108 
1109 	return false;
1110 }
1111 
1112 static int
dr_rule_create_rule_nic(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_match_param * param,size_t num_actions,struct mlx5dr_action * actions[])1113 dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1114 			struct mlx5dr_rule_rx_tx *nic_rule,
1115 			struct mlx5dr_match_param *param,
1116 			size_t num_actions,
1117 			struct mlx5dr_action *actions[])
1118 {
1119 	u8 hw_ste_arr_optimized[DR_RULE_MAX_STE_CHAIN_OPTIMIZED * DR_STE_SIZE] = {};
1120 	struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
1121 	struct mlx5dr_matcher *matcher = rule->matcher;
1122 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1123 	struct mlx5dr_matcher_rx_tx *nic_matcher;
1124 	struct mlx5dr_domain_rx_tx *nic_dmn;
1125 	struct mlx5dr_ste_htbl *htbl = NULL;
1126 	struct mlx5dr_ste_htbl *cur_htbl;
1127 	struct mlx5dr_ste *ste = NULL;
1128 	LIST_HEAD(send_ste_list);
1129 	bool hw_ste_arr_is_opt;
1130 	u8 *hw_ste_arr = NULL;
1131 	u32 new_hw_ste_arr_sz;
1132 	int ret, i;
1133 
1134 	nic_matcher = nic_rule->nic_matcher;
1135 	nic_dmn = nic_matcher->nic_tbl->nic_dmn;
1136 
1137 	if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param,
1138 			 rule->flow_source))
1139 		return 0;
1140 
1141 	mlx5dr_domain_nic_lock(nic_dmn);
1142 
1143 	ret = mlx5dr_matcher_select_builders(matcher,
1144 					     nic_matcher,
1145 					     dr_rule_get_ipv(&param->outer),
1146 					     dr_rule_get_ipv(&param->inner));
1147 	if (ret)
1148 		goto err_unlock;
1149 
1150 	hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
1151 	if (likely(hw_ste_arr_is_opt)) {
1152 		hw_ste_arr = hw_ste_arr_optimized;
1153 	} else {
1154 		hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
1155 				     DR_STE_SIZE, GFP_KERNEL);
1156 
1157 		if (!hw_ste_arr) {
1158 			ret = -ENOMEM;
1159 			goto err_unlock;
1160 		}
1161 	}
1162 
1163 	ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
1164 	if (ret)
1165 		goto free_hw_ste;
1166 
1167 	/* Set the tag values inside the ste array */
1168 	ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
1169 	if (ret)
1170 		goto remove_from_nic_tbl;
1171 
1172 	/* Set the actions values/addresses inside the ste array */
1173 	ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
1174 					   num_actions, hw_ste_arr,
1175 					   &new_hw_ste_arr_sz);
1176 	if (ret)
1177 		goto remove_from_nic_tbl;
1178 
1179 	cur_htbl = nic_matcher->s_htbl;
1180 
1181 	/* Go over the array of STEs, and build dr_ste accordingly.
1182 	 * The loop is over only the builders which are equal or less to the
1183 	 * number of stes, in case we have actions that lives in other stes.
1184 	 */
1185 	for (i = 0; i < nic_matcher->num_of_builders; i++) {
1186 		/* Calculate CRC and keep new ste entry */
1187 		u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
1188 
1189 		ste = dr_rule_handle_ste_branch(rule,
1190 						nic_rule,
1191 						&send_ste_list,
1192 						cur_htbl,
1193 						cur_hw_ste_ent,
1194 						i + 1,
1195 						&htbl);
1196 		if (!ste) {
1197 			mlx5dr_err(dmn, "Failed creating next branch\n");
1198 			ret = -ENOENT;
1199 			goto free_rule;
1200 		}
1201 
1202 		cur_htbl = ste->next_htbl;
1203 
1204 		mlx5dr_ste_get(ste);
1205 		mlx5dr_rule_set_last_member(nic_rule, ste, true);
1206 	}
1207 
1208 	/* Connect actions */
1209 	ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
1210 					 ste, hw_ste_arr, new_hw_ste_arr_sz);
1211 	if (ret) {
1212 		mlx5dr_dbg(dmn, "Failed apply actions\n");
1213 		goto free_rule;
1214 	}
1215 	ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
1216 	if (ret) {
1217 		mlx5dr_err(dmn, "Failed sending ste!\n");
1218 		goto free_rule;
1219 	}
1220 
1221 	if (htbl)
1222 		mlx5dr_htbl_put(htbl);
1223 
1224 	nic_matcher->rules++;
1225 
1226 	mlx5dr_domain_nic_unlock(nic_dmn);
1227 
1228 	if (unlikely(!hw_ste_arr_is_opt))
1229 		kfree(hw_ste_arr);
1230 
1231 	return 0;
1232 
1233 free_rule:
1234 	dr_rule_clean_rule_members(rule, nic_rule);
1235 	/* Clean all ste_info's */
1236 	list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
1237 		list_del(&ste_info->send_list);
1238 		mlx5dr_send_info_free(ste_info);
1239 	}
1240 
1241 remove_from_nic_tbl:
1242 	if (!nic_matcher->rules)
1243 		mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
1244 
1245 free_hw_ste:
1246 	if (!hw_ste_arr_is_opt)
1247 		kfree(hw_ste_arr);
1248 
1249 err_unlock:
1250 	mlx5dr_domain_nic_unlock(nic_dmn);
1251 
1252 	return ret;
1253 }
1254 
1255 static int
dr_rule_create_rule_fdb(struct mlx5dr_rule * rule,struct mlx5dr_match_param * param,size_t num_actions,struct mlx5dr_action * actions[])1256 dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
1257 			struct mlx5dr_match_param *param,
1258 			size_t num_actions,
1259 			struct mlx5dr_action *actions[])
1260 {
1261 	struct mlx5dr_match_param copy_param = {};
1262 	int ret;
1263 
1264 	/* Copy match_param since they will be consumed during the first
1265 	 * nic_rule insertion.
1266 	 */
1267 	memcpy(&copy_param, param, sizeof(struct mlx5dr_match_param));
1268 
1269 	ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
1270 				      num_actions, actions);
1271 	if (ret)
1272 		return ret;
1273 
1274 	ret = dr_rule_create_rule_nic(rule, &rule->tx, &copy_param,
1275 				      num_actions, actions);
1276 	if (ret)
1277 		goto destroy_rule_nic_rx;
1278 
1279 	return 0;
1280 
1281 destroy_rule_nic_rx:
1282 	dr_rule_destroy_rule_nic(rule, &rule->rx);
1283 	return ret;
1284 }
1285 
1286 static struct mlx5dr_rule *
dr_rule_create_rule(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * value,size_t num_actions,struct mlx5dr_action * actions[],u32 flow_source)1287 dr_rule_create_rule(struct mlx5dr_matcher *matcher,
1288 		    struct mlx5dr_match_parameters *value,
1289 		    size_t num_actions,
1290 		    struct mlx5dr_action *actions[],
1291 		    u32 flow_source)
1292 {
1293 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1294 	struct mlx5dr_match_param param = {};
1295 	struct mlx5dr_rule *rule;
1296 	int ret;
1297 
1298 	if (!dr_rule_verify(matcher, value, &param))
1299 		return NULL;
1300 
1301 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1302 	if (!rule)
1303 		return NULL;
1304 
1305 	rule->matcher = matcher;
1306 	rule->flow_source = flow_source;
1307 	INIT_LIST_HEAD(&rule->rule_actions_list);
1308 
1309 	ret = dr_rule_add_action_members(rule, num_actions, actions);
1310 	if (ret)
1311 		goto free_rule;
1312 
1313 	switch (dmn->type) {
1314 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
1315 		rule->rx.nic_matcher = &matcher->rx;
1316 		ret = dr_rule_create_rule_nic(rule, &rule->rx, &param,
1317 					      num_actions, actions);
1318 		break;
1319 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
1320 		rule->tx.nic_matcher = &matcher->tx;
1321 		ret = dr_rule_create_rule_nic(rule, &rule->tx, &param,
1322 					      num_actions, actions);
1323 		break;
1324 	case MLX5DR_DOMAIN_TYPE_FDB:
1325 		rule->rx.nic_matcher = &matcher->rx;
1326 		rule->tx.nic_matcher = &matcher->tx;
1327 		ret = dr_rule_create_rule_fdb(rule, &param,
1328 					      num_actions, actions);
1329 		break;
1330 	default:
1331 		ret = -EINVAL;
1332 		break;
1333 	}
1334 
1335 	if (ret)
1336 		goto remove_action_members;
1337 
1338 	INIT_LIST_HEAD(&rule->dbg_node);
1339 	mlx5dr_dbg_rule_add(rule);
1340 	return rule;
1341 
1342 remove_action_members:
1343 	dr_rule_remove_action_members(rule);
1344 free_rule:
1345 	kfree(rule);
1346 	mlx5dr_err(dmn, "Failed creating rule\n");
1347 	return NULL;
1348 }
1349 
mlx5dr_rule_create(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * value,size_t num_actions,struct mlx5dr_action * actions[],u32 flow_source)1350 struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
1351 				       struct mlx5dr_match_parameters *value,
1352 				       size_t num_actions,
1353 				       struct mlx5dr_action *actions[],
1354 				       u32 flow_source)
1355 {
1356 	struct mlx5dr_rule *rule;
1357 
1358 	refcount_inc(&matcher->refcount);
1359 
1360 	rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
1361 	if (!rule)
1362 		refcount_dec(&matcher->refcount);
1363 
1364 	return rule;
1365 }
1366 
mlx5dr_rule_destroy(struct mlx5dr_rule * rule)1367 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
1368 {
1369 	struct mlx5dr_matcher *matcher = rule->matcher;
1370 	int ret;
1371 
1372 	ret = dr_rule_destroy_rule(rule);
1373 	if (!ret)
1374 		refcount_dec(&matcher->refcount);
1375 
1376 	return ret;
1377 }
1378