1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
37 
38 #include "mlx5_core.h"
39 #include "fs_core.h"
40 #include "fs_cmd.h"
41 #include "fs_ft_pool.h"
42 #include "diag/fs_tracepoint.h"
43 #include "accel/ipsec.h"
44 #include "fpga/ipsec.h"
45 
46 #define INIT_TREE_NODE_ARRAY_SIZE(...)	(sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 					 sizeof(struct init_tree_node))
48 
49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 		 ...) {.type = FS_TYPE_PRIO,\
51 	.min_ft_level = min_level_val,\
52 	.num_levels = num_levels_val,\
53 	.num_leaf_prios = num_prios_val,\
54 	.caps = caps_val,\
55 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
56 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57 }
58 
59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 	ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
61 		 __VA_ARGS__)\
62 
63 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE,	\
64 	.def_miss_action = def_miss_act,\
65 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
66 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
67 }
68 
69 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
70 				   sizeof(long))
71 
72 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
73 
74 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 			       .caps = (long[]) {__VA_ARGS__} }
76 
77 #define FS_CHAINING_CAPS  FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 					   FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 					   FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 					   FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
81 
82 #define FS_CHAINING_CAPS_EGRESS                                                \
83 	FS_REQUIRED_CAPS(                                                      \
84 		FS_CAP(flow_table_properties_nic_transmit.flow_modify_en),     \
85 		FS_CAP(flow_table_properties_nic_transmit.modify_root),        \
86 		FS_CAP(flow_table_properties_nic_transmit                      \
87 			       .identified_miss_table_mode),                   \
88 		FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
89 
90 #define FS_CHAINING_CAPS_RDMA_TX                                                \
91 	FS_REQUIRED_CAPS(                                                       \
92 		FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
93 		FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root),    \
94 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
95 			       .identified_miss_table_mode),                    \
96 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
97 			       .flow_table_modify))
98 
99 #define LEFTOVERS_NUM_LEVELS 1
100 #define LEFTOVERS_NUM_PRIOS 1
101 
102 #define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
103 #define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
104 
105 #define BY_PASS_PRIO_NUM_LEVELS 1
106 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
107 			   LEFTOVERS_NUM_PRIOS)
108 
109 #define ETHTOOL_PRIO_NUM_LEVELS 1
110 #define ETHTOOL_NUM_PRIOS 11
111 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
112 /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
113 #define KERNEL_NIC_PRIO_NUM_LEVELS 7
114 #define KERNEL_NIC_NUM_PRIOS 1
115 /* One more level for tc */
116 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
117 
118 #define KERNEL_NIC_TC_NUM_PRIOS  1
119 #define KERNEL_NIC_TC_NUM_LEVELS 2
120 
121 #define ANCHOR_NUM_LEVELS 1
122 #define ANCHOR_NUM_PRIOS 1
123 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
124 
125 #define OFFLOADS_MAX_FT 2
126 #define OFFLOADS_NUM_PRIOS 2
127 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
128 
129 #define LAG_PRIO_NUM_LEVELS 1
130 #define LAG_NUM_PRIOS 1
131 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
132 
133 #define KERNEL_TX_IPSEC_NUM_PRIOS  1
134 #define KERNEL_TX_IPSEC_NUM_LEVELS 1
135 #define KERNEL_TX_MIN_LEVEL        (KERNEL_TX_IPSEC_NUM_LEVELS)
136 
137 struct node_caps {
138 	size_t	arr_sz;
139 	long	*caps;
140 };
141 
142 static struct init_tree_node {
143 	enum fs_node_type	type;
144 	struct init_tree_node *children;
145 	int ar_size;
146 	struct node_caps caps;
147 	int min_ft_level;
148 	int num_leaf_prios;
149 	int prio;
150 	int num_levels;
151 	enum mlx5_flow_table_miss_action def_miss_action;
152 } root_fs = {
153 	.type = FS_TYPE_NAMESPACE,
154 	.ar_size = 7,
155 	  .children = (struct init_tree_node[]){
156 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
157 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
158 				  ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
159 						    BY_PASS_PRIO_NUM_LEVELS))),
160 		  ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
161 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
162 				  ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
163 						    LAG_PRIO_NUM_LEVELS))),
164 		  ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
165 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
166 				  ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
167 						    OFFLOADS_MAX_FT))),
168 		  ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
169 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
170 				  ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
171 						    ETHTOOL_PRIO_NUM_LEVELS))),
172 		  ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
173 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
174 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
175 						    KERNEL_NIC_TC_NUM_LEVELS),
176 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
177 						    KERNEL_NIC_PRIO_NUM_LEVELS))),
178 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
179 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
180 				  ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
181 						    LEFTOVERS_NUM_LEVELS))),
182 		  ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
183 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
184 				  ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
185 						    ANCHOR_NUM_LEVELS))),
186 	}
187 };
188 
189 static struct init_tree_node egress_root_fs = {
190 	.type = FS_TYPE_NAMESPACE,
191 #ifdef CONFIG_MLX5_IPSEC
192 	.ar_size = 2,
193 #else
194 	.ar_size = 1,
195 #endif
196 	.children = (struct init_tree_node[]) {
197 		ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
198 			 FS_CHAINING_CAPS_EGRESS,
199 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
200 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
201 						  BY_PASS_PRIO_NUM_LEVELS))),
202 #ifdef CONFIG_MLX5_IPSEC
203 		ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
204 			 FS_CHAINING_CAPS_EGRESS,
205 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
206 				ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
207 						  KERNEL_TX_IPSEC_NUM_LEVELS))),
208 #endif
209 	}
210 };
211 
212 enum {
213 	RDMA_RX_COUNTERS_PRIO,
214 	RDMA_RX_BYPASS_PRIO,
215 	RDMA_RX_KERNEL_PRIO,
216 };
217 
218 #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
219 #define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
220 #define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
221 
222 static struct init_tree_node rdma_rx_root_fs = {
223 	.type = FS_TYPE_NAMESPACE,
224 	.ar_size = 3,
225 	.children = (struct init_tree_node[]) {
226 		[RDMA_RX_COUNTERS_PRIO] =
227 		ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
228 			 FS_CHAINING_CAPS,
229 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
230 				ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
231 						  RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
232 		[RDMA_RX_BYPASS_PRIO] =
233 		ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
234 			 FS_CHAINING_CAPS,
235 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
236 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
237 						  BY_PASS_PRIO_NUM_LEVELS))),
238 		[RDMA_RX_KERNEL_PRIO] =
239 		ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
240 			 FS_CHAINING_CAPS,
241 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
242 				ADD_MULTIPLE_PRIO(1, 1))),
243 	}
244 };
245 
246 enum {
247 	RDMA_TX_COUNTERS_PRIO,
248 	RDMA_TX_BYPASS_PRIO,
249 };
250 
251 #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
252 #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
253 
254 static struct init_tree_node rdma_tx_root_fs = {
255 	.type = FS_TYPE_NAMESPACE,
256 	.ar_size = 2,
257 	.children = (struct init_tree_node[]) {
258 		[RDMA_TX_COUNTERS_PRIO] =
259 		ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
260 			 FS_CHAINING_CAPS,
261 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
262 				ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
263 						  RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
264 		[RDMA_TX_BYPASS_PRIO] =
265 		ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
266 			 FS_CHAINING_CAPS_RDMA_TX,
267 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
268 				ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
269 						  BY_PASS_PRIO_NUM_LEVELS))),
270 	}
271 };
272 
273 enum fs_i_lock_class {
274 	FS_LOCK_GRANDPARENT,
275 	FS_LOCK_PARENT,
276 	FS_LOCK_CHILD
277 };
278 
279 static const struct rhashtable_params rhash_fte = {
280 	.key_len = sizeof_field(struct fs_fte, val),
281 	.key_offset = offsetof(struct fs_fte, val),
282 	.head_offset = offsetof(struct fs_fte, hash),
283 	.automatic_shrinking = true,
284 	.min_size = 1,
285 };
286 
287 static const struct rhashtable_params rhash_fg = {
288 	.key_len = sizeof_field(struct mlx5_flow_group, mask),
289 	.key_offset = offsetof(struct mlx5_flow_group, mask),
290 	.head_offset = offsetof(struct mlx5_flow_group, hash),
291 	.automatic_shrinking = true,
292 	.min_size = 1,
293 
294 };
295 
296 static void del_hw_flow_table(struct fs_node *node);
297 static void del_hw_flow_group(struct fs_node *node);
298 static void del_hw_fte(struct fs_node *node);
299 static void del_sw_flow_table(struct fs_node *node);
300 static void del_sw_flow_group(struct fs_node *node);
301 static void del_sw_fte(struct fs_node *node);
302 static void del_sw_prio(struct fs_node *node);
303 static void del_sw_ns(struct fs_node *node);
304 /* Delete rule (destination) is special case that
305  * requires to lock the FTE for all the deletion process.
306  */
307 static void del_sw_hw_rule(struct fs_node *node);
308 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
309 				struct mlx5_flow_destination *d2);
310 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
311 static struct mlx5_flow_rule *
312 find_flow_rule(struct fs_fte *fte,
313 	       struct mlx5_flow_destination *dest);
314 
315 static void tree_init_node(struct fs_node *node,
316 			   void (*del_hw_func)(struct fs_node *),
317 			   void (*del_sw_func)(struct fs_node *))
318 {
319 	refcount_set(&node->refcount, 1);
320 	INIT_LIST_HEAD(&node->list);
321 	INIT_LIST_HEAD(&node->children);
322 	init_rwsem(&node->lock);
323 	node->del_hw_func = del_hw_func;
324 	node->del_sw_func = del_sw_func;
325 	node->active = false;
326 }
327 
328 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
329 {
330 	if (parent)
331 		refcount_inc(&parent->refcount);
332 	node->parent = parent;
333 
334 	/* Parent is the root */
335 	if (!parent)
336 		node->root = node;
337 	else
338 		node->root = parent->root;
339 }
340 
341 static int tree_get_node(struct fs_node *node)
342 {
343 	return refcount_inc_not_zero(&node->refcount);
344 }
345 
346 static void nested_down_read_ref_node(struct fs_node *node,
347 				      enum fs_i_lock_class class)
348 {
349 	if (node) {
350 		down_read_nested(&node->lock, class);
351 		refcount_inc(&node->refcount);
352 	}
353 }
354 
355 static void nested_down_write_ref_node(struct fs_node *node,
356 				       enum fs_i_lock_class class)
357 {
358 	if (node) {
359 		down_write_nested(&node->lock, class);
360 		refcount_inc(&node->refcount);
361 	}
362 }
363 
364 static void down_write_ref_node(struct fs_node *node, bool locked)
365 {
366 	if (node) {
367 		if (!locked)
368 			down_write(&node->lock);
369 		refcount_inc(&node->refcount);
370 	}
371 }
372 
373 static void up_read_ref_node(struct fs_node *node)
374 {
375 	refcount_dec(&node->refcount);
376 	up_read(&node->lock);
377 }
378 
379 static void up_write_ref_node(struct fs_node *node, bool locked)
380 {
381 	refcount_dec(&node->refcount);
382 	if (!locked)
383 		up_write(&node->lock);
384 }
385 
386 static void tree_put_node(struct fs_node *node, bool locked)
387 {
388 	struct fs_node *parent_node = node->parent;
389 
390 	if (refcount_dec_and_test(&node->refcount)) {
391 		if (node->del_hw_func)
392 			node->del_hw_func(node);
393 		if (parent_node) {
394 			down_write_ref_node(parent_node, locked);
395 			list_del_init(&node->list);
396 		}
397 		node->del_sw_func(node);
398 		if (parent_node)
399 			up_write_ref_node(parent_node, locked);
400 		node = NULL;
401 	}
402 	if (!node && parent_node)
403 		tree_put_node(parent_node, locked);
404 }
405 
406 static int tree_remove_node(struct fs_node *node, bool locked)
407 {
408 	if (refcount_read(&node->refcount) > 1) {
409 		refcount_dec(&node->refcount);
410 		return -EEXIST;
411 	}
412 	tree_put_node(node, locked);
413 	return 0;
414 }
415 
416 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
417 				 unsigned int prio)
418 {
419 	struct fs_prio *iter_prio;
420 
421 	fs_for_each_prio(iter_prio, ns) {
422 		if (iter_prio->prio == prio)
423 			return iter_prio;
424 	}
425 
426 	return NULL;
427 }
428 
429 static bool is_fwd_next_action(u32 action)
430 {
431 	return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
432 			 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
433 }
434 
435 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
436 {
437 	int i;
438 
439 	for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
440 		if (spec->match_value[i] & ~spec->match_criteria[i]) {
441 			pr_warn("mlx5_core: match_value differs from match_criteria\n");
442 			return false;
443 		}
444 
445 	return true;
446 }
447 
448 struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
449 {
450 	struct fs_node *root;
451 	struct mlx5_flow_namespace *ns;
452 
453 	root = node->root;
454 
455 	if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
456 		pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
457 		return NULL;
458 	}
459 
460 	ns = container_of(root, struct mlx5_flow_namespace, node);
461 	return container_of(ns, struct mlx5_flow_root_namespace, ns);
462 }
463 
464 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
465 {
466 	struct mlx5_flow_root_namespace *root = find_root(node);
467 
468 	if (root)
469 		return root->dev->priv.steering;
470 	return NULL;
471 }
472 
473 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
474 {
475 	struct mlx5_flow_root_namespace *root = find_root(node);
476 
477 	if (root)
478 		return root->dev;
479 	return NULL;
480 }
481 
482 static void del_sw_ns(struct fs_node *node)
483 {
484 	kfree(node);
485 }
486 
487 static void del_sw_prio(struct fs_node *node)
488 {
489 	kfree(node);
490 }
491 
492 static void del_hw_flow_table(struct fs_node *node)
493 {
494 	struct mlx5_flow_root_namespace *root;
495 	struct mlx5_flow_table *ft;
496 	struct mlx5_core_dev *dev;
497 	int err;
498 
499 	fs_get_obj(ft, node);
500 	dev = get_dev(&ft->node);
501 	root = find_root(&ft->node);
502 	trace_mlx5_fs_del_ft(ft);
503 
504 	if (node->active) {
505 		err = root->cmds->destroy_flow_table(root, ft);
506 		if (err)
507 			mlx5_core_warn(dev, "flow steering can't destroy ft\n");
508 	}
509 }
510 
511 static void del_sw_flow_table(struct fs_node *node)
512 {
513 	struct mlx5_flow_table *ft;
514 	struct fs_prio *prio;
515 
516 	fs_get_obj(ft, node);
517 
518 	rhltable_destroy(&ft->fgs_hash);
519 	if (ft->node.parent) {
520 		fs_get_obj(prio, ft->node.parent);
521 		prio->num_ft--;
522 	}
523 	kfree(ft);
524 }
525 
526 static void modify_fte(struct fs_fte *fte)
527 {
528 	struct mlx5_flow_root_namespace *root;
529 	struct mlx5_flow_table *ft;
530 	struct mlx5_flow_group *fg;
531 	struct mlx5_core_dev *dev;
532 	int err;
533 
534 	fs_get_obj(fg, fte->node.parent);
535 	fs_get_obj(ft, fg->node.parent);
536 	dev = get_dev(&fte->node);
537 
538 	root = find_root(&ft->node);
539 	err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
540 	if (err)
541 		mlx5_core_warn(dev,
542 			       "%s can't del rule fg id=%d fte_index=%d\n",
543 			       __func__, fg->id, fte->index);
544 	fte->modify_mask = 0;
545 }
546 
547 static void del_sw_hw_rule(struct fs_node *node)
548 {
549 	struct mlx5_flow_rule *rule;
550 	struct fs_fte *fte;
551 
552 	fs_get_obj(rule, node);
553 	fs_get_obj(fte, rule->node.parent);
554 	trace_mlx5_fs_del_rule(rule);
555 	if (is_fwd_next_action(rule->sw_action)) {
556 		mutex_lock(&rule->dest_attr.ft->lock);
557 		list_del(&rule->next_ft);
558 		mutex_unlock(&rule->dest_attr.ft->lock);
559 	}
560 
561 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER  &&
562 	    --fte->dests_size) {
563 		fte->modify_mask |=
564 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
565 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
566 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
567 		goto out;
568 	}
569 
570 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
571 	    --fte->dests_size) {
572 		fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
573 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
574 		goto out;
575 	}
576 
577 	if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
578 	    --fte->dests_size) {
579 		fte->modify_mask |=
580 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
581 	}
582 out:
583 	kfree(rule);
584 }
585 
586 static void del_hw_fte(struct fs_node *node)
587 {
588 	struct mlx5_flow_root_namespace *root;
589 	struct mlx5_flow_table *ft;
590 	struct mlx5_flow_group *fg;
591 	struct mlx5_core_dev *dev;
592 	struct fs_fte *fte;
593 	int err;
594 
595 	fs_get_obj(fte, node);
596 	fs_get_obj(fg, fte->node.parent);
597 	fs_get_obj(ft, fg->node.parent);
598 
599 	trace_mlx5_fs_del_fte(fte);
600 	dev = get_dev(&ft->node);
601 	root = find_root(&ft->node);
602 	if (node->active) {
603 		err = root->cmds->delete_fte(root, ft, fte);
604 		if (err)
605 			mlx5_core_warn(dev,
606 				       "flow steering can't delete fte in index %d of flow group id %d\n",
607 				       fte->index, fg->id);
608 		node->active = false;
609 	}
610 }
611 
612 static void del_sw_fte(struct fs_node *node)
613 {
614 	struct mlx5_flow_steering *steering = get_steering(node);
615 	struct mlx5_flow_group *fg;
616 	struct fs_fte *fte;
617 	int err;
618 
619 	fs_get_obj(fte, node);
620 	fs_get_obj(fg, fte->node.parent);
621 
622 	err = rhashtable_remove_fast(&fg->ftes_hash,
623 				     &fte->hash,
624 				     rhash_fte);
625 	WARN_ON(err);
626 	ida_free(&fg->fte_allocator, fte->index - fg->start_index);
627 	kmem_cache_free(steering->ftes_cache, fte);
628 }
629 
630 static void del_hw_flow_group(struct fs_node *node)
631 {
632 	struct mlx5_flow_root_namespace *root;
633 	struct mlx5_flow_group *fg;
634 	struct mlx5_flow_table *ft;
635 	struct mlx5_core_dev *dev;
636 
637 	fs_get_obj(fg, node);
638 	fs_get_obj(ft, fg->node.parent);
639 	dev = get_dev(&ft->node);
640 	trace_mlx5_fs_del_fg(fg);
641 
642 	root = find_root(&ft->node);
643 	if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
644 		mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
645 			       fg->id, ft->id);
646 }
647 
648 static void del_sw_flow_group(struct fs_node *node)
649 {
650 	struct mlx5_flow_steering *steering = get_steering(node);
651 	struct mlx5_flow_group *fg;
652 	struct mlx5_flow_table *ft;
653 	int err;
654 
655 	fs_get_obj(fg, node);
656 	fs_get_obj(ft, fg->node.parent);
657 
658 	rhashtable_destroy(&fg->ftes_hash);
659 	ida_destroy(&fg->fte_allocator);
660 	if (ft->autogroup.active &&
661 	    fg->max_ftes == ft->autogroup.group_size &&
662 	    fg->start_index < ft->autogroup.max_fte)
663 		ft->autogroup.num_groups--;
664 	err = rhltable_remove(&ft->fgs_hash,
665 			      &fg->hash,
666 			      rhash_fg);
667 	WARN_ON(err);
668 	kmem_cache_free(steering->fgs_cache, fg);
669 }
670 
671 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
672 {
673 	int index;
674 	int ret;
675 
676 	index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
677 	if (index < 0)
678 		return index;
679 
680 	fte->index = index + fg->start_index;
681 	ret = rhashtable_insert_fast(&fg->ftes_hash,
682 				     &fte->hash,
683 				     rhash_fte);
684 	if (ret)
685 		goto err_ida_remove;
686 
687 	tree_add_node(&fte->node, &fg->node);
688 	list_add_tail(&fte->node.list, &fg->node.children);
689 	return 0;
690 
691 err_ida_remove:
692 	ida_free(&fg->fte_allocator, index);
693 	return ret;
694 }
695 
696 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
697 				const struct mlx5_flow_spec *spec,
698 				struct mlx5_flow_act *flow_act)
699 {
700 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
701 	struct fs_fte *fte;
702 
703 	fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
704 	if (!fte)
705 		return ERR_PTR(-ENOMEM);
706 
707 	memcpy(fte->val, &spec->match_value, sizeof(fte->val));
708 	fte->node.type =  FS_TYPE_FLOW_ENTRY;
709 	fte->action = *flow_act;
710 	fte->flow_context = spec->flow_context;
711 
712 	tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
713 
714 	return fte;
715 }
716 
717 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
718 			       struct mlx5_flow_group *fg)
719 {
720 	rhashtable_destroy(&fg->ftes_hash);
721 	kmem_cache_free(steering->fgs_cache, fg);
722 }
723 
724 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
725 						u8 match_criteria_enable,
726 						const void *match_criteria,
727 						int start_index,
728 						int end_index)
729 {
730 	struct mlx5_flow_group *fg;
731 	int ret;
732 
733 	fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
734 	if (!fg)
735 		return ERR_PTR(-ENOMEM);
736 
737 	ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
738 	if (ret) {
739 		kmem_cache_free(steering->fgs_cache, fg);
740 		return ERR_PTR(ret);
741 	}
742 
743 	ida_init(&fg->fte_allocator);
744 	fg->mask.match_criteria_enable = match_criteria_enable;
745 	memcpy(&fg->mask.match_criteria, match_criteria,
746 	       sizeof(fg->mask.match_criteria));
747 	fg->node.type =  FS_TYPE_FLOW_GROUP;
748 	fg->start_index = start_index;
749 	fg->max_ftes = end_index - start_index + 1;
750 
751 	return fg;
752 }
753 
754 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
755 						       u8 match_criteria_enable,
756 						       const void *match_criteria,
757 						       int start_index,
758 						       int end_index,
759 						       struct list_head *prev)
760 {
761 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
762 	struct mlx5_flow_group *fg;
763 	int ret;
764 
765 	fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
766 			      start_index, end_index);
767 	if (IS_ERR(fg))
768 		return fg;
769 
770 	/* initialize refcnt, add to parent list */
771 	ret = rhltable_insert(&ft->fgs_hash,
772 			      &fg->hash,
773 			      rhash_fg);
774 	if (ret) {
775 		dealloc_flow_group(steering, fg);
776 		return ERR_PTR(ret);
777 	}
778 
779 	tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
780 	tree_add_node(&fg->node, &ft->node);
781 	/* Add node to group list */
782 	list_add(&fg->node.list, prev);
783 	atomic_inc(&ft->node.version);
784 
785 	return fg;
786 }
787 
788 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
789 						enum fs_flow_table_type table_type,
790 						enum fs_flow_table_op_mod op_mod,
791 						u32 flags)
792 {
793 	struct mlx5_flow_table *ft;
794 	int ret;
795 
796 	ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
797 	if (!ft)
798 		return ERR_PTR(-ENOMEM);
799 
800 	ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
801 	if (ret) {
802 		kfree(ft);
803 		return ERR_PTR(ret);
804 	}
805 
806 	ft->level = level;
807 	ft->node.type = FS_TYPE_FLOW_TABLE;
808 	ft->op_mod = op_mod;
809 	ft->type = table_type;
810 	ft->vport = vport;
811 	ft->flags = flags;
812 	INIT_LIST_HEAD(&ft->fwd_rules);
813 	mutex_init(&ft->lock);
814 
815 	return ft;
816 }
817 
818 /* If reverse is false, then we search for the first flow table in the
819  * root sub-tree from start(closest from right), else we search for the
820  * last flow table in the root sub-tree till start(closest from left).
821  */
822 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
823 							 struct list_head *start,
824 							 bool reverse)
825 {
826 #define list_advance_entry(pos, reverse)		\
827 	((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
828 
829 #define list_for_each_advance_continue(pos, head, reverse)	\
830 	for (pos = list_advance_entry(pos, reverse);		\
831 	     &pos->list != (head);				\
832 	     pos = list_advance_entry(pos, reverse))
833 
834 	struct fs_node *iter = list_entry(start, struct fs_node, list);
835 	struct mlx5_flow_table *ft = NULL;
836 
837 	if (!root || root->type == FS_TYPE_PRIO_CHAINS)
838 		return NULL;
839 
840 	list_for_each_advance_continue(iter, &root->children, reverse) {
841 		if (iter->type == FS_TYPE_FLOW_TABLE) {
842 			fs_get_obj(ft, iter);
843 			return ft;
844 		}
845 		ft = find_closest_ft_recursive(iter, &iter->children, reverse);
846 		if (ft)
847 			return ft;
848 	}
849 
850 	return ft;
851 }
852 
853 /* If reverse is false then return the first flow table in next priority of
854  * prio in the tree, else return the last flow table in the previous priority
855  * of prio in the tree.
856  */
857 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
858 {
859 	struct mlx5_flow_table *ft = NULL;
860 	struct fs_node *curr_node;
861 	struct fs_node *parent;
862 
863 	parent = prio->node.parent;
864 	curr_node = &prio->node;
865 	while (!ft && parent) {
866 		ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
867 		curr_node = parent;
868 		parent = curr_node->parent;
869 	}
870 	return ft;
871 }
872 
873 /* Assuming all the tree is locked by mutex chain lock */
874 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
875 {
876 	return find_closest_ft(prio, false);
877 }
878 
879 /* Assuming all the tree is locked by mutex chain lock */
880 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
881 {
882 	return find_closest_ft(prio, true);
883 }
884 
885 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
886 						struct mlx5_flow_act *flow_act)
887 {
888 	struct fs_prio *prio;
889 	bool next_ns;
890 
891 	next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
892 	fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
893 
894 	return find_next_chained_ft(prio);
895 }
896 
897 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
898 			       struct fs_prio *prio,
899 			       struct mlx5_flow_table *ft)
900 {
901 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
902 	struct mlx5_flow_table *iter;
903 	int err;
904 
905 	fs_for_each_ft(iter, prio) {
906 		err = root->cmds->modify_flow_table(root, iter, ft);
907 		if (err) {
908 			mlx5_core_err(dev,
909 				      "Failed to modify flow table id %d, type %d, err %d\n",
910 				      iter->id, iter->type, err);
911 			/* The driver is out of sync with the FW */
912 			return err;
913 		}
914 	}
915 	return 0;
916 }
917 
918 /* Connect flow tables from previous priority of prio to ft */
919 static int connect_prev_fts(struct mlx5_core_dev *dev,
920 			    struct mlx5_flow_table *ft,
921 			    struct fs_prio *prio)
922 {
923 	struct mlx5_flow_table *prev_ft;
924 
925 	prev_ft = find_prev_chained_ft(prio);
926 	if (prev_ft) {
927 		struct fs_prio *prev_prio;
928 
929 		fs_get_obj(prev_prio, prev_ft->node.parent);
930 		return connect_fts_in_prio(dev, prev_prio, ft);
931 	}
932 	return 0;
933 }
934 
935 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
936 				 *prio)
937 {
938 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
939 	struct mlx5_ft_underlay_qp *uqp;
940 	int min_level = INT_MAX;
941 	int err = 0;
942 	u32 qpn;
943 
944 	if (root->root_ft)
945 		min_level = root->root_ft->level;
946 
947 	if (ft->level >= min_level)
948 		return 0;
949 
950 	if (list_empty(&root->underlay_qpns)) {
951 		/* Don't set any QPN (zero) in case QPN list is empty */
952 		qpn = 0;
953 		err = root->cmds->update_root_ft(root, ft, qpn, false);
954 	} else {
955 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
956 			qpn = uqp->qpn;
957 			err = root->cmds->update_root_ft(root, ft,
958 							 qpn, false);
959 			if (err)
960 				break;
961 		}
962 	}
963 
964 	if (err)
965 		mlx5_core_warn(root->dev,
966 			       "Update root flow table of id(%u) qpn(%d) failed\n",
967 			       ft->id, qpn);
968 	else
969 		root->root_ft = ft;
970 
971 	return err;
972 }
973 
974 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
975 					 struct mlx5_flow_destination *dest)
976 {
977 	struct mlx5_flow_root_namespace *root;
978 	struct mlx5_flow_table *ft;
979 	struct mlx5_flow_group *fg;
980 	struct fs_fte *fte;
981 	int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
982 	int err = 0;
983 
984 	fs_get_obj(fte, rule->node.parent);
985 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
986 		return -EINVAL;
987 	down_write_ref_node(&fte->node, false);
988 	fs_get_obj(fg, fte->node.parent);
989 	fs_get_obj(ft, fg->node.parent);
990 
991 	memcpy(&rule->dest_attr, dest, sizeof(*dest));
992 	root = find_root(&ft->node);
993 	err = root->cmds->update_fte(root, ft, fg,
994 				     modify_mask, fte);
995 	up_write_ref_node(&fte->node, false);
996 
997 	return err;
998 }
999 
1000 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
1001 				 struct mlx5_flow_destination *new_dest,
1002 				 struct mlx5_flow_destination *old_dest)
1003 {
1004 	int i;
1005 
1006 	if (!old_dest) {
1007 		if (handle->num_rules != 1)
1008 			return -EINVAL;
1009 		return _mlx5_modify_rule_destination(handle->rule[0],
1010 						     new_dest);
1011 	}
1012 
1013 	for (i = 0; i < handle->num_rules; i++) {
1014 		if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
1015 			return _mlx5_modify_rule_destination(handle->rule[i],
1016 							     new_dest);
1017 	}
1018 
1019 	return -EINVAL;
1020 }
1021 
1022 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft  */
1023 static int connect_fwd_rules(struct mlx5_core_dev *dev,
1024 			     struct mlx5_flow_table *new_next_ft,
1025 			     struct mlx5_flow_table *old_next_ft)
1026 {
1027 	struct mlx5_flow_destination dest = {};
1028 	struct mlx5_flow_rule *iter;
1029 	int err = 0;
1030 
1031 	/* new_next_ft and old_next_ft could be NULL only
1032 	 * when we create/destroy the anchor flow table.
1033 	 */
1034 	if (!new_next_ft || !old_next_ft)
1035 		return 0;
1036 
1037 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1038 	dest.ft = new_next_ft;
1039 
1040 	mutex_lock(&old_next_ft->lock);
1041 	list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1042 	mutex_unlock(&old_next_ft->lock);
1043 	list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1044 		if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1045 		    iter->ft->ns == new_next_ft->ns)
1046 			continue;
1047 
1048 		err = _mlx5_modify_rule_destination(iter, &dest);
1049 		if (err)
1050 			pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1051 			       new_next_ft->id);
1052 	}
1053 	return 0;
1054 }
1055 
1056 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1057 			      struct fs_prio *prio)
1058 {
1059 	struct mlx5_flow_table *next_ft, *first_ft;
1060 	int err = 0;
1061 
1062 	/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1063 
1064 	first_ft = list_first_entry_or_null(&prio->node.children,
1065 					    struct mlx5_flow_table, node.list);
1066 	if (!first_ft || first_ft->level > ft->level) {
1067 		err = connect_prev_fts(dev, ft, prio);
1068 		if (err)
1069 			return err;
1070 
1071 		next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
1072 		err = connect_fwd_rules(dev, ft, next_ft);
1073 		if (err)
1074 			return err;
1075 	}
1076 
1077 	if (MLX5_CAP_FLOWTABLE(dev,
1078 			       flow_table_properties_nic_receive.modify_root))
1079 		err = update_root_ft_create(ft, prio);
1080 	return err;
1081 }
1082 
1083 static void list_add_flow_table(struct mlx5_flow_table *ft,
1084 				struct fs_prio *prio)
1085 {
1086 	struct list_head *prev = &prio->node.children;
1087 	struct mlx5_flow_table *iter;
1088 
1089 	fs_for_each_ft(iter, prio) {
1090 		if (iter->level > ft->level)
1091 			break;
1092 		prev = &iter->node.list;
1093 	}
1094 	list_add(&ft->node.list, prev);
1095 }
1096 
1097 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1098 							struct mlx5_flow_table_attr *ft_attr,
1099 							enum fs_flow_table_op_mod op_mod,
1100 							u16 vport)
1101 {
1102 	struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1103 	bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1104 	struct mlx5_flow_table *next_ft;
1105 	struct fs_prio *fs_prio = NULL;
1106 	struct mlx5_flow_table *ft;
1107 	int err;
1108 
1109 	if (!root) {
1110 		pr_err("mlx5: flow steering failed to find root of namespace\n");
1111 		return ERR_PTR(-ENODEV);
1112 	}
1113 
1114 	mutex_lock(&root->chain_lock);
1115 	fs_prio = find_prio(ns, ft_attr->prio);
1116 	if (!fs_prio) {
1117 		err = -EINVAL;
1118 		goto unlock_root;
1119 	}
1120 	if (!unmanaged) {
1121 		/* The level is related to the
1122 		 * priority level range.
1123 		 */
1124 		if (ft_attr->level >= fs_prio->num_levels) {
1125 			err = -ENOSPC;
1126 			goto unlock_root;
1127 		}
1128 
1129 		ft_attr->level += fs_prio->start_level;
1130 	}
1131 
1132 	/* The level is related to the
1133 	 * priority level range.
1134 	 */
1135 	ft = alloc_flow_table(ft_attr->level,
1136 			      vport,
1137 			      root->table_type,
1138 			      op_mod, ft_attr->flags);
1139 	if (IS_ERR(ft)) {
1140 		err = PTR_ERR(ft);
1141 		goto unlock_root;
1142 	}
1143 
1144 	tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1145 	next_ft = unmanaged ? ft_attr->next_ft :
1146 			      find_next_chained_ft(fs_prio);
1147 	ft->def_miss_action = ns->def_miss_action;
1148 	ft->ns = ns;
1149 	err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
1150 	if (err)
1151 		goto free_ft;
1152 
1153 	if (!unmanaged) {
1154 		err = connect_flow_table(root->dev, ft, fs_prio);
1155 		if (err)
1156 			goto destroy_ft;
1157 	}
1158 
1159 	ft->node.active = true;
1160 	down_write_ref_node(&fs_prio->node, false);
1161 	if (!unmanaged) {
1162 		tree_add_node(&ft->node, &fs_prio->node);
1163 		list_add_flow_table(ft, fs_prio);
1164 	} else {
1165 		ft->node.root = fs_prio->node.root;
1166 	}
1167 	fs_prio->num_ft++;
1168 	up_write_ref_node(&fs_prio->node, false);
1169 	mutex_unlock(&root->chain_lock);
1170 	trace_mlx5_fs_add_ft(ft);
1171 	return ft;
1172 destroy_ft:
1173 	root->cmds->destroy_flow_table(root, ft);
1174 free_ft:
1175 	rhltable_destroy(&ft->fgs_hash);
1176 	kfree(ft);
1177 unlock_root:
1178 	mutex_unlock(&root->chain_lock);
1179 	return ERR_PTR(err);
1180 }
1181 
1182 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1183 					       struct mlx5_flow_table_attr *ft_attr)
1184 {
1185 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1186 }
1187 EXPORT_SYMBOL(mlx5_create_flow_table);
1188 
1189 struct mlx5_flow_table *
1190 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1191 			     struct mlx5_flow_table_attr *ft_attr, u16 vport)
1192 {
1193 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1194 }
1195 
1196 struct mlx5_flow_table*
1197 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1198 				 int prio, u32 level)
1199 {
1200 	struct mlx5_flow_table_attr ft_attr = {};
1201 
1202 	ft_attr.level = level;
1203 	ft_attr.prio  = prio;
1204 	ft_attr.max_fte = 1;
1205 
1206 	return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1207 }
1208 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1209 
1210 #define MAX_FLOW_GROUP_SIZE BIT(24)
1211 struct mlx5_flow_table*
1212 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1213 				    struct mlx5_flow_table_attr *ft_attr)
1214 {
1215 	int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1216 	int max_num_groups = ft_attr->autogroup.max_num_groups;
1217 	struct mlx5_flow_table *ft;
1218 	int autogroups_max_fte;
1219 
1220 	ft = mlx5_create_flow_table(ns, ft_attr);
1221 	if (IS_ERR(ft))
1222 		return ft;
1223 
1224 	autogroups_max_fte = ft->max_fte - num_reserved_entries;
1225 	if (max_num_groups > autogroups_max_fte)
1226 		goto err_validate;
1227 	if (num_reserved_entries > ft->max_fte)
1228 		goto err_validate;
1229 
1230 	/* Align the number of groups according to the largest group size */
1231 	if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1232 		max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1233 
1234 	ft->autogroup.active = true;
1235 	ft->autogroup.required_groups = max_num_groups;
1236 	ft->autogroup.max_fte = autogroups_max_fte;
1237 	/* We save place for flow groups in addition to max types */
1238 	ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1239 
1240 	return ft;
1241 
1242 err_validate:
1243 	mlx5_destroy_flow_table(ft);
1244 	return ERR_PTR(-ENOSPC);
1245 }
1246 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1247 
1248 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1249 					       u32 *fg_in)
1250 {
1251 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1252 	void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1253 					    fg_in, match_criteria);
1254 	u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1255 					    fg_in,
1256 					    match_criteria_enable);
1257 	int start_index = MLX5_GET(create_flow_group_in, fg_in,
1258 				   start_flow_index);
1259 	int end_index = MLX5_GET(create_flow_group_in, fg_in,
1260 				 end_flow_index);
1261 	struct mlx5_flow_group *fg;
1262 	int err;
1263 
1264 	if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1265 		return ERR_PTR(-EPERM);
1266 
1267 	down_write_ref_node(&ft->node, false);
1268 	fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1269 				     start_index, end_index,
1270 				     ft->node.children.prev);
1271 	up_write_ref_node(&ft->node, false);
1272 	if (IS_ERR(fg))
1273 		return fg;
1274 
1275 	err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1276 	if (err) {
1277 		tree_put_node(&fg->node, false);
1278 		return ERR_PTR(err);
1279 	}
1280 	trace_mlx5_fs_add_fg(fg);
1281 	fg->node.active = true;
1282 
1283 	return fg;
1284 }
1285 EXPORT_SYMBOL(mlx5_create_flow_group);
1286 
1287 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1288 {
1289 	struct mlx5_flow_rule *rule;
1290 
1291 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1292 	if (!rule)
1293 		return NULL;
1294 
1295 	INIT_LIST_HEAD(&rule->next_ft);
1296 	rule->node.type = FS_TYPE_FLOW_DEST;
1297 	if (dest)
1298 		memcpy(&rule->dest_attr, dest, sizeof(*dest));
1299 
1300 	return rule;
1301 }
1302 
1303 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1304 {
1305 	struct mlx5_flow_handle *handle;
1306 
1307 	handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1308 	if (!handle)
1309 		return NULL;
1310 
1311 	handle->num_rules = num_rules;
1312 
1313 	return handle;
1314 }
1315 
1316 static void destroy_flow_handle(struct fs_fte *fte,
1317 				struct mlx5_flow_handle *handle,
1318 				struct mlx5_flow_destination *dest,
1319 				int i)
1320 {
1321 	for (; --i >= 0;) {
1322 		if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1323 			fte->dests_size--;
1324 			list_del(&handle->rule[i]->node.list);
1325 			kfree(handle->rule[i]);
1326 		}
1327 	}
1328 	kfree(handle);
1329 }
1330 
1331 static struct mlx5_flow_handle *
1332 create_flow_handle(struct fs_fte *fte,
1333 		   struct mlx5_flow_destination *dest,
1334 		   int dest_num,
1335 		   int *modify_mask,
1336 		   bool *new_rule)
1337 {
1338 	struct mlx5_flow_handle *handle;
1339 	struct mlx5_flow_rule *rule = NULL;
1340 	static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1341 	static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1342 	int type;
1343 	int i = 0;
1344 
1345 	handle = alloc_handle((dest_num) ? dest_num : 1);
1346 	if (!handle)
1347 		return ERR_PTR(-ENOMEM);
1348 
1349 	do {
1350 		if (dest) {
1351 			rule = find_flow_rule(fte, dest + i);
1352 			if (rule) {
1353 				refcount_inc(&rule->node.refcount);
1354 				goto rule_found;
1355 			}
1356 		}
1357 
1358 		*new_rule = true;
1359 		rule = alloc_rule(dest + i);
1360 		if (!rule)
1361 			goto free_rules;
1362 
1363 		/* Add dest to dests list- we need flow tables to be in the
1364 		 * end of the list for forward to next prio rules.
1365 		 */
1366 		tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1367 		if (dest &&
1368 		    dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1369 			list_add(&rule->node.list, &fte->node.children);
1370 		else
1371 			list_add_tail(&rule->node.list, &fte->node.children);
1372 		if (dest) {
1373 			fte->dests_size++;
1374 
1375 			type = dest[i].type ==
1376 				MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1377 			*modify_mask |= type ? count : dst;
1378 		}
1379 rule_found:
1380 		handle->rule[i] = rule;
1381 	} while (++i < dest_num);
1382 
1383 	return handle;
1384 
1385 free_rules:
1386 	destroy_flow_handle(fte, handle, dest, i);
1387 	return ERR_PTR(-ENOMEM);
1388 }
1389 
1390 /* fte should not be deleted while calling this function */
1391 static struct mlx5_flow_handle *
1392 add_rule_fte(struct fs_fte *fte,
1393 	     struct mlx5_flow_group *fg,
1394 	     struct mlx5_flow_destination *dest,
1395 	     int dest_num,
1396 	     bool update_action)
1397 {
1398 	struct mlx5_flow_root_namespace *root;
1399 	struct mlx5_flow_handle *handle;
1400 	struct mlx5_flow_table *ft;
1401 	int modify_mask = 0;
1402 	int err;
1403 	bool new_rule = false;
1404 
1405 	handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1406 				    &new_rule);
1407 	if (IS_ERR(handle) || !new_rule)
1408 		goto out;
1409 
1410 	if (update_action)
1411 		modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1412 
1413 	fs_get_obj(ft, fg->node.parent);
1414 	root = find_root(&fg->node);
1415 	if (!(fte->status & FS_FTE_STATUS_EXISTING))
1416 		err = root->cmds->create_fte(root, ft, fg, fte);
1417 	else
1418 		err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1419 	if (err)
1420 		goto free_handle;
1421 
1422 	fte->node.active = true;
1423 	fte->status |= FS_FTE_STATUS_EXISTING;
1424 	atomic_inc(&fg->node.version);
1425 
1426 out:
1427 	return handle;
1428 
1429 free_handle:
1430 	destroy_flow_handle(fte, handle, dest, handle->num_rules);
1431 	return ERR_PTR(err);
1432 }
1433 
1434 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table  *ft,
1435 						     const struct mlx5_flow_spec *spec)
1436 {
1437 	struct list_head *prev = &ft->node.children;
1438 	u32 max_fte = ft->autogroup.max_fte;
1439 	unsigned int candidate_index = 0;
1440 	unsigned int group_size = 0;
1441 	struct mlx5_flow_group *fg;
1442 
1443 	if (!ft->autogroup.active)
1444 		return ERR_PTR(-ENOENT);
1445 
1446 	if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1447 		group_size = ft->autogroup.group_size;
1448 
1449 	/*  max_fte == ft->autogroup.max_types */
1450 	if (group_size == 0)
1451 		group_size = 1;
1452 
1453 	/* sorted by start_index */
1454 	fs_for_each_fg(fg, ft) {
1455 		if (candidate_index + group_size > fg->start_index)
1456 			candidate_index = fg->start_index + fg->max_ftes;
1457 		else
1458 			break;
1459 		prev = &fg->node.list;
1460 	}
1461 
1462 	if (candidate_index + group_size > max_fte)
1463 		return ERR_PTR(-ENOSPC);
1464 
1465 	fg = alloc_insert_flow_group(ft,
1466 				     spec->match_criteria_enable,
1467 				     spec->match_criteria,
1468 				     candidate_index,
1469 				     candidate_index + group_size - 1,
1470 				     prev);
1471 	if (IS_ERR(fg))
1472 		goto out;
1473 
1474 	if (group_size == ft->autogroup.group_size)
1475 		ft->autogroup.num_groups++;
1476 
1477 out:
1478 	return fg;
1479 }
1480 
1481 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1482 				  struct mlx5_flow_group *fg)
1483 {
1484 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1485 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1486 	void *match_criteria_addr;
1487 	u8 src_esw_owner_mask_on;
1488 	void *misc;
1489 	int err;
1490 	u32 *in;
1491 
1492 	in = kvzalloc(inlen, GFP_KERNEL);
1493 	if (!in)
1494 		return -ENOMEM;
1495 
1496 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1497 		 fg->mask.match_criteria_enable);
1498 	MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1499 	MLX5_SET(create_flow_group_in, in, end_flow_index,   fg->start_index +
1500 		 fg->max_ftes - 1);
1501 
1502 	misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1503 			    misc_parameters);
1504 	src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1505 					 source_eswitch_owner_vhca_id);
1506 	MLX5_SET(create_flow_group_in, in,
1507 		 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1508 
1509 	match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1510 					   in, match_criteria);
1511 	memcpy(match_criteria_addr, fg->mask.match_criteria,
1512 	       sizeof(fg->mask.match_criteria));
1513 
1514 	err = root->cmds->create_flow_group(root, ft, in, fg);
1515 	if (!err) {
1516 		fg->node.active = true;
1517 		trace_mlx5_fs_add_fg(fg);
1518 	}
1519 
1520 	kvfree(in);
1521 	return err;
1522 }
1523 
1524 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1525 				struct mlx5_flow_destination *d2)
1526 {
1527 	if (d1->type == d2->type) {
1528 		if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
1529 		      d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
1530 		     d1->vport.num == d2->vport.num &&
1531 		     d1->vport.flags == d2->vport.flags &&
1532 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1533 		      (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1534 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1535 		      (d1->vport.pkt_reformat->id ==
1536 		       d2->vport.pkt_reformat->id) : true)) ||
1537 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1538 		     d1->ft == d2->ft) ||
1539 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1540 		     d1->tir_num == d2->tir_num) ||
1541 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1542 		     d1->ft_num == d2->ft_num) ||
1543 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
1544 		     d1->sampler_id == d2->sampler_id))
1545 			return true;
1546 	}
1547 
1548 	return false;
1549 }
1550 
1551 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1552 					     struct mlx5_flow_destination *dest)
1553 {
1554 	struct mlx5_flow_rule *rule;
1555 
1556 	list_for_each_entry(rule, &fte->node.children, node.list) {
1557 		if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1558 			return rule;
1559 	}
1560 	return NULL;
1561 }
1562 
1563 static bool check_conflicting_actions(u32 action1, u32 action2)
1564 {
1565 	u32 xored_actions = action1 ^ action2;
1566 
1567 	/* if one rule only wants to count, it's ok */
1568 	if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1569 	    action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1570 		return false;
1571 
1572 	if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP  |
1573 			     MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1574 			     MLX5_FLOW_CONTEXT_ACTION_DECAP |
1575 			     MLX5_FLOW_CONTEXT_ACTION_MOD_HDR  |
1576 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1577 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1578 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1579 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1580 		return true;
1581 
1582 	return false;
1583 }
1584 
1585 static int check_conflicting_ftes(struct fs_fte *fte,
1586 				  const struct mlx5_flow_context *flow_context,
1587 				  const struct mlx5_flow_act *flow_act)
1588 {
1589 	if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1590 		mlx5_core_warn(get_dev(&fte->node),
1591 			       "Found two FTEs with conflicting actions\n");
1592 		return -EEXIST;
1593 	}
1594 
1595 	if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1596 	    fte->flow_context.flow_tag != flow_context->flow_tag) {
1597 		mlx5_core_warn(get_dev(&fte->node),
1598 			       "FTE flow tag %u already exists with different flow tag %u\n",
1599 			       fte->flow_context.flow_tag,
1600 			       flow_context->flow_tag);
1601 		return -EEXIST;
1602 	}
1603 
1604 	return 0;
1605 }
1606 
1607 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1608 					    const struct mlx5_flow_spec *spec,
1609 					    struct mlx5_flow_act *flow_act,
1610 					    struct mlx5_flow_destination *dest,
1611 					    int dest_num,
1612 					    struct fs_fte *fte)
1613 {
1614 	struct mlx5_flow_handle *handle;
1615 	int old_action;
1616 	int i;
1617 	int ret;
1618 
1619 	ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1620 	if (ret)
1621 		return ERR_PTR(ret);
1622 
1623 	old_action = fte->action.action;
1624 	fte->action.action |= flow_act->action;
1625 	handle = add_rule_fte(fte, fg, dest, dest_num,
1626 			      old_action != flow_act->action);
1627 	if (IS_ERR(handle)) {
1628 		fte->action.action = old_action;
1629 		return handle;
1630 	}
1631 	trace_mlx5_fs_set_fte(fte, false);
1632 
1633 	for (i = 0; i < handle->num_rules; i++) {
1634 		if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1635 			tree_add_node(&handle->rule[i]->node, &fte->node);
1636 			trace_mlx5_fs_add_rule(handle->rule[i]);
1637 		}
1638 	}
1639 	return handle;
1640 }
1641 
1642 static bool counter_is_valid(u32 action)
1643 {
1644 	return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1645 			  MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1646 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1647 }
1648 
1649 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1650 			  struct mlx5_flow_act *flow_act,
1651 			  struct mlx5_flow_table *ft)
1652 {
1653 	bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1654 	u32 action = flow_act->action;
1655 
1656 	if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1657 		return counter_is_valid(action);
1658 
1659 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1660 		return true;
1661 
1662 	if (ignore_level) {
1663 		if (ft->type != FS_FT_FDB &&
1664 		    ft->type != FS_FT_NIC_RX)
1665 			return false;
1666 
1667 		if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1668 		    ft->type != dest->ft->type)
1669 			return false;
1670 	}
1671 
1672 	if (!dest || ((dest->type ==
1673 	    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1674 	    (dest->ft->level <= ft->level && !ignore_level)))
1675 		return false;
1676 	return true;
1677 }
1678 
1679 struct match_list {
1680 	struct list_head	list;
1681 	struct mlx5_flow_group *g;
1682 };
1683 
1684 static void free_match_list(struct match_list *head, bool ft_locked)
1685 {
1686 	struct match_list *iter, *match_tmp;
1687 
1688 	list_for_each_entry_safe(iter, match_tmp, &head->list,
1689 				 list) {
1690 		tree_put_node(&iter->g->node, ft_locked);
1691 		list_del(&iter->list);
1692 		kfree(iter);
1693 	}
1694 }
1695 
1696 static int build_match_list(struct match_list *match_head,
1697 			    struct mlx5_flow_table *ft,
1698 			    const struct mlx5_flow_spec *spec,
1699 			    struct mlx5_flow_group *fg,
1700 			    bool ft_locked)
1701 {
1702 	struct rhlist_head *tmp, *list;
1703 	struct mlx5_flow_group *g;
1704 	int err = 0;
1705 
1706 	rcu_read_lock();
1707 	INIT_LIST_HEAD(&match_head->list);
1708 	/* Collect all fgs which has a matching match_criteria */
1709 	list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1710 	/* RCU is atomic, we can't execute FW commands here */
1711 	rhl_for_each_entry_rcu(g, tmp, list, hash) {
1712 		struct match_list *curr_match;
1713 
1714 		if (fg && fg != g)
1715 			continue;
1716 
1717 		if (unlikely(!tree_get_node(&g->node)))
1718 			continue;
1719 
1720 		curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1721 		if (!curr_match) {
1722 			rcu_read_unlock();
1723 			free_match_list(match_head, ft_locked);
1724 			return -ENOMEM;
1725 		}
1726 		curr_match->g = g;
1727 		list_add_tail(&curr_match->list, &match_head->list);
1728 	}
1729 	rcu_read_unlock();
1730 	return err;
1731 }
1732 
1733 static u64 matched_fgs_get_version(struct list_head *match_head)
1734 {
1735 	struct match_list *iter;
1736 	u64 version = 0;
1737 
1738 	list_for_each_entry(iter, match_head, list)
1739 		version += (u64)atomic_read(&iter->g->node.version);
1740 	return version;
1741 }
1742 
1743 static struct fs_fte *
1744 lookup_fte_locked(struct mlx5_flow_group *g,
1745 		  const u32 *match_value,
1746 		  bool take_write)
1747 {
1748 	struct fs_fte *fte_tmp;
1749 
1750 	if (take_write)
1751 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1752 	else
1753 		nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1754 	fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1755 					 rhash_fte);
1756 	if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1757 		fte_tmp = NULL;
1758 		goto out;
1759 	}
1760 	if (!fte_tmp->node.active) {
1761 		tree_put_node(&fte_tmp->node, false);
1762 		fte_tmp = NULL;
1763 		goto out;
1764 	}
1765 
1766 	nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1767 out:
1768 	if (take_write)
1769 		up_write_ref_node(&g->node, false);
1770 	else
1771 		up_read_ref_node(&g->node);
1772 	return fte_tmp;
1773 }
1774 
1775 static struct mlx5_flow_handle *
1776 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1777 		       struct list_head *match_head,
1778 		       const struct mlx5_flow_spec *spec,
1779 		       struct mlx5_flow_act *flow_act,
1780 		       struct mlx5_flow_destination *dest,
1781 		       int dest_num,
1782 		       int ft_version)
1783 {
1784 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1785 	struct mlx5_flow_group *g;
1786 	struct mlx5_flow_handle *rule;
1787 	struct match_list *iter;
1788 	bool take_write = false;
1789 	struct fs_fte *fte;
1790 	u64  version = 0;
1791 	int err;
1792 
1793 	fte = alloc_fte(ft, spec, flow_act);
1794 	if (IS_ERR(fte))
1795 		return  ERR_PTR(-ENOMEM);
1796 
1797 search_again_locked:
1798 	if (flow_act->flags & FLOW_ACT_NO_APPEND)
1799 		goto skip_search;
1800 	version = matched_fgs_get_version(match_head);
1801 	/* Try to find an fte with identical match value and attempt update its
1802 	 * action.
1803 	 */
1804 	list_for_each_entry(iter, match_head, list) {
1805 		struct fs_fte *fte_tmp;
1806 
1807 		g = iter->g;
1808 		fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1809 		if (!fte_tmp)
1810 			continue;
1811 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1812 		/* No error check needed here, because insert_fte() is not called */
1813 		up_write_ref_node(&fte_tmp->node, false);
1814 		tree_put_node(&fte_tmp->node, false);
1815 		kmem_cache_free(steering->ftes_cache, fte);
1816 		return rule;
1817 	}
1818 
1819 skip_search:
1820 	/* No group with matching fte found, or we skipped the search.
1821 	 * Try to add a new fte to any matching fg.
1822 	 */
1823 
1824 	/* Check the ft version, for case that new flow group
1825 	 * was added while the fgs weren't locked
1826 	 */
1827 	if (atomic_read(&ft->node.version) != ft_version) {
1828 		rule = ERR_PTR(-EAGAIN);
1829 		goto out;
1830 	}
1831 
1832 	/* Check the fgs version. If version have changed it could be that an
1833 	 * FTE with the same match value was added while the fgs weren't
1834 	 * locked.
1835 	 */
1836 	if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1837 	    version != matched_fgs_get_version(match_head)) {
1838 		take_write = true;
1839 		goto search_again_locked;
1840 	}
1841 
1842 	list_for_each_entry(iter, match_head, list) {
1843 		g = iter->g;
1844 
1845 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1846 
1847 		if (!g->node.active) {
1848 			up_write_ref_node(&g->node, false);
1849 			continue;
1850 		}
1851 
1852 		err = insert_fte(g, fte);
1853 		if (err) {
1854 			up_write_ref_node(&g->node, false);
1855 			if (err == -ENOSPC)
1856 				continue;
1857 			kmem_cache_free(steering->ftes_cache, fte);
1858 			return ERR_PTR(err);
1859 		}
1860 
1861 		nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1862 		up_write_ref_node(&g->node, false);
1863 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1864 		up_write_ref_node(&fte->node, false);
1865 		if (IS_ERR(rule))
1866 			tree_put_node(&fte->node, false);
1867 		return rule;
1868 	}
1869 	rule = ERR_PTR(-ENOENT);
1870 out:
1871 	kmem_cache_free(steering->ftes_cache, fte);
1872 	return rule;
1873 }
1874 
1875 static struct mlx5_flow_handle *
1876 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1877 		     const struct mlx5_flow_spec *spec,
1878 		     struct mlx5_flow_act *flow_act,
1879 		     struct mlx5_flow_destination *dest,
1880 		     int dest_num)
1881 
1882 {
1883 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1884 	struct mlx5_flow_handle *rule;
1885 	struct match_list match_head;
1886 	struct mlx5_flow_group *g;
1887 	bool take_write = false;
1888 	struct fs_fte *fte;
1889 	int version;
1890 	int err;
1891 	int i;
1892 
1893 	if (!check_valid_spec(spec))
1894 		return ERR_PTR(-EINVAL);
1895 
1896 	if (flow_act->fg && ft->autogroup.active)
1897 		return ERR_PTR(-EINVAL);
1898 
1899 	for (i = 0; i < dest_num; i++) {
1900 		if (!dest_is_valid(&dest[i], flow_act, ft))
1901 			return ERR_PTR(-EINVAL);
1902 	}
1903 	nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1904 search_again_locked:
1905 	version = atomic_read(&ft->node.version);
1906 
1907 	/* Collect all fgs which has a matching match_criteria */
1908 	err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
1909 	if (err) {
1910 		if (take_write)
1911 			up_write_ref_node(&ft->node, false);
1912 		else
1913 			up_read_ref_node(&ft->node);
1914 		return ERR_PTR(err);
1915 	}
1916 
1917 	if (!take_write)
1918 		up_read_ref_node(&ft->node);
1919 
1920 	rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1921 				      dest_num, version);
1922 	free_match_list(&match_head, take_write);
1923 	if (!IS_ERR(rule) ||
1924 	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1925 		if (take_write)
1926 			up_write_ref_node(&ft->node, false);
1927 		return rule;
1928 	}
1929 
1930 	if (!take_write) {
1931 		nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1932 		take_write = true;
1933 	}
1934 
1935 	if (PTR_ERR(rule) == -EAGAIN ||
1936 	    version != atomic_read(&ft->node.version))
1937 		goto search_again_locked;
1938 
1939 	g = alloc_auto_flow_group(ft, spec);
1940 	if (IS_ERR(g)) {
1941 		rule = ERR_CAST(g);
1942 		up_write_ref_node(&ft->node, false);
1943 		return rule;
1944 	}
1945 
1946 	fte = alloc_fte(ft, spec, flow_act);
1947 	if (IS_ERR(fte)) {
1948 		up_write_ref_node(&ft->node, false);
1949 		err = PTR_ERR(fte);
1950 		goto err_alloc_fte;
1951 	}
1952 
1953 	nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1954 	up_write_ref_node(&ft->node, false);
1955 
1956 	err = create_auto_flow_group(ft, g);
1957 	if (err)
1958 		goto err_release_fg;
1959 
1960 	err = insert_fte(g, fte);
1961 	if (err)
1962 		goto err_release_fg;
1963 
1964 	nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1965 	up_write_ref_node(&g->node, false);
1966 	rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1967 	up_write_ref_node(&fte->node, false);
1968 	if (IS_ERR(rule))
1969 		tree_put_node(&fte->node, false);
1970 	tree_put_node(&g->node, false);
1971 	return rule;
1972 
1973 err_release_fg:
1974 	up_write_ref_node(&g->node, false);
1975 	kmem_cache_free(steering->ftes_cache, fte);
1976 err_alloc_fte:
1977 	tree_put_node(&g->node, false);
1978 	return ERR_PTR(err);
1979 }
1980 
1981 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1982 {
1983 	return ((ft->type == FS_FT_NIC_RX) &&
1984 		(MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1985 }
1986 
1987 struct mlx5_flow_handle *
1988 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1989 		    const struct mlx5_flow_spec *spec,
1990 		    struct mlx5_flow_act *flow_act,
1991 		    struct mlx5_flow_destination *dest,
1992 		    int num_dest)
1993 {
1994 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1995 	static const struct mlx5_flow_spec zero_spec = {};
1996 	struct mlx5_flow_destination *gen_dest = NULL;
1997 	struct mlx5_flow_table *next_ft = NULL;
1998 	struct mlx5_flow_handle *handle = NULL;
1999 	u32 sw_action = flow_act->action;
2000 	int i;
2001 
2002 	if (!spec)
2003 		spec = &zero_spec;
2004 
2005 	if (!is_fwd_next_action(sw_action))
2006 		return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2007 
2008 	if (!fwd_next_prio_supported(ft))
2009 		return ERR_PTR(-EOPNOTSUPP);
2010 
2011 	mutex_lock(&root->chain_lock);
2012 	next_ft = find_next_fwd_ft(ft, flow_act);
2013 	if (!next_ft) {
2014 		handle = ERR_PTR(-EOPNOTSUPP);
2015 		goto unlock;
2016 	}
2017 
2018 	gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2019 			   GFP_KERNEL);
2020 	if (!gen_dest) {
2021 		handle = ERR_PTR(-ENOMEM);
2022 		goto unlock;
2023 	}
2024 	for (i = 0; i < num_dest; i++)
2025 		gen_dest[i] = dest[i];
2026 	gen_dest[i].type =
2027 		MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2028 	gen_dest[i].ft = next_ft;
2029 	dest = gen_dest;
2030 	num_dest++;
2031 	flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2032 			      MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
2033 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2034 	handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2035 	if (IS_ERR(handle))
2036 		goto unlock;
2037 
2038 	if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2039 		mutex_lock(&next_ft->lock);
2040 		list_add(&handle->rule[num_dest - 1]->next_ft,
2041 			 &next_ft->fwd_rules);
2042 		mutex_unlock(&next_ft->lock);
2043 		handle->rule[num_dest - 1]->sw_action = sw_action;
2044 		handle->rule[num_dest - 1]->ft = ft;
2045 	}
2046 unlock:
2047 	mutex_unlock(&root->chain_lock);
2048 	kfree(gen_dest);
2049 	return handle;
2050 }
2051 EXPORT_SYMBOL(mlx5_add_flow_rules);
2052 
2053 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2054 {
2055 	struct fs_fte *fte;
2056 	int i;
2057 
2058 	/* In order to consolidate the HW changes we lock the FTE for other
2059 	 * changes, and increase its refcount, in order not to perform the
2060 	 * "del" functions of the FTE. Will handle them here.
2061 	 * The removal of the rules is done under locked FTE.
2062 	 * After removing all the handle's rules, if there are remaining
2063 	 * rules, it means we just need to modify the FTE in FW, and
2064 	 * unlock/decrease the refcount we increased before.
2065 	 * Otherwise, it means the FTE should be deleted. First delete the
2066 	 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2067 	 * the FTE, which will handle the last decrease of the refcount, as
2068 	 * well as required handling of its parent.
2069 	 */
2070 	fs_get_obj(fte, handle->rule[0]->node.parent);
2071 	down_write_ref_node(&fte->node, false);
2072 	for (i = handle->num_rules - 1; i >= 0; i--)
2073 		tree_remove_node(&handle->rule[i]->node, true);
2074 	if (fte->dests_size) {
2075 		if (fte->modify_mask)
2076 			modify_fte(fte);
2077 		up_write_ref_node(&fte->node, false);
2078 	} else if (list_empty(&fte->node.children)) {
2079 		del_hw_fte(&fte->node);
2080 		/* Avoid double call to del_hw_fte */
2081 		fte->node.del_hw_func = NULL;
2082 		up_write_ref_node(&fte->node, false);
2083 		tree_put_node(&fte->node, false);
2084 	} else {
2085 		up_write_ref_node(&fte->node, false);
2086 	}
2087 	kfree(handle);
2088 }
2089 EXPORT_SYMBOL(mlx5_del_flow_rules);
2090 
2091 /* Assuming prio->node.children(flow tables) is sorted by level */
2092 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2093 {
2094 	struct fs_prio *prio;
2095 
2096 	fs_get_obj(prio, ft->node.parent);
2097 
2098 	if (!list_is_last(&ft->node.list, &prio->node.children))
2099 		return list_next_entry(ft, node.list);
2100 	return find_next_chained_ft(prio);
2101 }
2102 
2103 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2104 {
2105 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2106 	struct mlx5_ft_underlay_qp *uqp;
2107 	struct mlx5_flow_table *new_root_ft = NULL;
2108 	int err = 0;
2109 	u32 qpn;
2110 
2111 	if (root->root_ft != ft)
2112 		return 0;
2113 
2114 	new_root_ft = find_next_ft(ft);
2115 	if (!new_root_ft) {
2116 		root->root_ft = NULL;
2117 		return 0;
2118 	}
2119 
2120 	if (list_empty(&root->underlay_qpns)) {
2121 		/* Don't set any QPN (zero) in case QPN list is empty */
2122 		qpn = 0;
2123 		err = root->cmds->update_root_ft(root, new_root_ft,
2124 						 qpn, false);
2125 	} else {
2126 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
2127 			qpn = uqp->qpn;
2128 			err = root->cmds->update_root_ft(root,
2129 							 new_root_ft, qpn,
2130 							 false);
2131 			if (err)
2132 				break;
2133 		}
2134 	}
2135 
2136 	if (err)
2137 		mlx5_core_warn(root->dev,
2138 			       "Update root flow table of id(%u) qpn(%d) failed\n",
2139 			       ft->id, qpn);
2140 	else
2141 		root->root_ft = new_root_ft;
2142 
2143 	return 0;
2144 }
2145 
2146 /* Connect flow table from previous priority to
2147  * the next flow table.
2148  */
2149 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2150 {
2151 	struct mlx5_core_dev *dev = get_dev(&ft->node);
2152 	struct mlx5_flow_table *next_ft;
2153 	struct fs_prio *prio;
2154 	int err = 0;
2155 
2156 	err = update_root_ft_destroy(ft);
2157 	if (err)
2158 		return err;
2159 
2160 	fs_get_obj(prio, ft->node.parent);
2161 	if  (!(list_first_entry(&prio->node.children,
2162 				struct mlx5_flow_table,
2163 				node.list) == ft))
2164 		return 0;
2165 
2166 	next_ft = find_next_ft(ft);
2167 	err = connect_fwd_rules(dev, next_ft, ft);
2168 	if (err)
2169 		return err;
2170 
2171 	err = connect_prev_fts(dev, next_ft, prio);
2172 	if (err)
2173 		mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2174 			       ft->id);
2175 	return err;
2176 }
2177 
2178 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2179 {
2180 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2181 	int err = 0;
2182 
2183 	mutex_lock(&root->chain_lock);
2184 	if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2185 		err = disconnect_flow_table(ft);
2186 	if (err) {
2187 		mutex_unlock(&root->chain_lock);
2188 		return err;
2189 	}
2190 	if (tree_remove_node(&ft->node, false))
2191 		mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2192 			       ft->id);
2193 	mutex_unlock(&root->chain_lock);
2194 
2195 	return err;
2196 }
2197 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2198 
2199 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2200 {
2201 	if (tree_remove_node(&fg->node, false))
2202 		mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2203 			       fg->id);
2204 }
2205 EXPORT_SYMBOL(mlx5_destroy_flow_group);
2206 
2207 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2208 						int n)
2209 {
2210 	struct mlx5_flow_steering *steering = dev->priv.steering;
2211 
2212 	if (!steering || !steering->fdb_sub_ns)
2213 		return NULL;
2214 
2215 	return steering->fdb_sub_ns[n];
2216 }
2217 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2218 
2219 static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
2220 {
2221 	switch (type) {
2222 	case MLX5_FLOW_NAMESPACE_BYPASS:
2223 	case MLX5_FLOW_NAMESPACE_LAG:
2224 	case MLX5_FLOW_NAMESPACE_OFFLOADS:
2225 	case MLX5_FLOW_NAMESPACE_ETHTOOL:
2226 	case MLX5_FLOW_NAMESPACE_KERNEL:
2227 	case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2228 	case MLX5_FLOW_NAMESPACE_ANCHOR:
2229 		return true;
2230 	default:
2231 		return false;
2232 	}
2233 }
2234 
2235 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2236 						    enum mlx5_flow_namespace_type type)
2237 {
2238 	struct mlx5_flow_steering *steering = dev->priv.steering;
2239 	struct mlx5_flow_root_namespace *root_ns;
2240 	int prio = 0;
2241 	struct fs_prio *fs_prio;
2242 	struct mlx5_flow_namespace *ns;
2243 
2244 	if (!steering)
2245 		return NULL;
2246 
2247 	switch (type) {
2248 	case MLX5_FLOW_NAMESPACE_FDB:
2249 		if (steering->fdb_root_ns)
2250 			return &steering->fdb_root_ns->ns;
2251 		return NULL;
2252 	case MLX5_FLOW_NAMESPACE_PORT_SEL:
2253 		if (steering->port_sel_root_ns)
2254 			return &steering->port_sel_root_ns->ns;
2255 		return NULL;
2256 	case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2257 		if (steering->sniffer_rx_root_ns)
2258 			return &steering->sniffer_rx_root_ns->ns;
2259 		return NULL;
2260 	case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2261 		if (steering->sniffer_tx_root_ns)
2262 			return &steering->sniffer_tx_root_ns->ns;
2263 		return NULL;
2264 	case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
2265 		root_ns = steering->fdb_root_ns;
2266 		prio =  FDB_BYPASS_PATH;
2267 		break;
2268 	case MLX5_FLOW_NAMESPACE_EGRESS:
2269 	case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
2270 		root_ns = steering->egress_root_ns;
2271 		prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2272 		break;
2273 	case MLX5_FLOW_NAMESPACE_RDMA_RX:
2274 		root_ns = steering->rdma_rx_root_ns;
2275 		prio = RDMA_RX_BYPASS_PRIO;
2276 		break;
2277 	case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
2278 		root_ns = steering->rdma_rx_root_ns;
2279 		prio = RDMA_RX_KERNEL_PRIO;
2280 		break;
2281 	case MLX5_FLOW_NAMESPACE_RDMA_TX:
2282 		root_ns = steering->rdma_tx_root_ns;
2283 		break;
2284 	case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
2285 		root_ns = steering->rdma_rx_root_ns;
2286 		prio = RDMA_RX_COUNTERS_PRIO;
2287 		break;
2288 	case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
2289 		root_ns = steering->rdma_tx_root_ns;
2290 		prio = RDMA_TX_COUNTERS_PRIO;
2291 		break;
2292 	default: /* Must be NIC RX */
2293 		WARN_ON(!is_nic_rx_ns(type));
2294 		root_ns = steering->root_ns;
2295 		prio = type;
2296 		break;
2297 	}
2298 
2299 	if (!root_ns)
2300 		return NULL;
2301 
2302 	fs_prio = find_prio(&root_ns->ns, prio);
2303 	if (!fs_prio)
2304 		return NULL;
2305 
2306 	ns = list_first_entry(&fs_prio->node.children,
2307 			      typeof(*ns),
2308 			      node.list);
2309 
2310 	return ns;
2311 }
2312 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2313 
2314 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2315 							      enum mlx5_flow_namespace_type type,
2316 							      int vport)
2317 {
2318 	struct mlx5_flow_steering *steering = dev->priv.steering;
2319 
2320 	if (!steering)
2321 		return NULL;
2322 
2323 	switch (type) {
2324 	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2325 		if (vport >= steering->esw_egress_acl_vports)
2326 			return NULL;
2327 		if (steering->esw_egress_root_ns &&
2328 		    steering->esw_egress_root_ns[vport])
2329 			return &steering->esw_egress_root_ns[vport]->ns;
2330 		else
2331 			return NULL;
2332 	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2333 		if (vport >= steering->esw_ingress_acl_vports)
2334 			return NULL;
2335 		if (steering->esw_ingress_root_ns &&
2336 		    steering->esw_ingress_root_ns[vport])
2337 			return &steering->esw_ingress_root_ns[vport]->ns;
2338 		else
2339 			return NULL;
2340 	default:
2341 		return NULL;
2342 	}
2343 }
2344 
2345 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2346 				       unsigned int prio,
2347 				       int num_levels,
2348 				       enum fs_node_type type)
2349 {
2350 	struct fs_prio *fs_prio;
2351 
2352 	fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2353 	if (!fs_prio)
2354 		return ERR_PTR(-ENOMEM);
2355 
2356 	fs_prio->node.type = type;
2357 	tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2358 	tree_add_node(&fs_prio->node, &ns->node);
2359 	fs_prio->num_levels = num_levels;
2360 	fs_prio->prio = prio;
2361 	list_add_tail(&fs_prio->node.list, &ns->node.children);
2362 
2363 	return fs_prio;
2364 }
2365 
2366 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2367 					      unsigned int prio,
2368 					      int num_levels)
2369 {
2370 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2371 }
2372 
2373 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2374 				      unsigned int prio, int num_levels)
2375 {
2376 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2377 }
2378 
2379 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2380 						     *ns)
2381 {
2382 	ns->node.type = FS_TYPE_NAMESPACE;
2383 
2384 	return ns;
2385 }
2386 
2387 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2388 						       int def_miss_act)
2389 {
2390 	struct mlx5_flow_namespace	*ns;
2391 
2392 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2393 	if (!ns)
2394 		return ERR_PTR(-ENOMEM);
2395 
2396 	fs_init_namespace(ns);
2397 	ns->def_miss_action = def_miss_act;
2398 	tree_init_node(&ns->node, NULL, del_sw_ns);
2399 	tree_add_node(&ns->node, &prio->node);
2400 	list_add_tail(&ns->node.list, &prio->node.children);
2401 
2402 	return ns;
2403 }
2404 
2405 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2406 			     struct init_tree_node *prio_metadata)
2407 {
2408 	struct fs_prio *fs_prio;
2409 	int i;
2410 
2411 	for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2412 		fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2413 		if (IS_ERR(fs_prio))
2414 			return PTR_ERR(fs_prio);
2415 	}
2416 	return 0;
2417 }
2418 
2419 #define FLOW_TABLE_BIT_SZ 1
2420 #define GET_FLOW_TABLE_CAP(dev, offset) \
2421 	((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) +	\
2422 			offset / 32)) >>					\
2423 	  (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2424 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2425 {
2426 	int i;
2427 
2428 	for (i = 0; i < caps->arr_sz; i++) {
2429 		if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2430 			return false;
2431 	}
2432 	return true;
2433 }
2434 
2435 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2436 				    struct init_tree_node *init_node,
2437 				    struct fs_node *fs_parent_node,
2438 				    struct init_tree_node *init_parent_node,
2439 				    int prio)
2440 {
2441 	int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2442 					      flow_table_properties_nic_receive.
2443 					      max_ft_level);
2444 	struct mlx5_flow_namespace *fs_ns;
2445 	struct fs_prio *fs_prio;
2446 	struct fs_node *base;
2447 	int i;
2448 	int err;
2449 
2450 	if (init_node->type == FS_TYPE_PRIO) {
2451 		if ((init_node->min_ft_level > max_ft_level) ||
2452 		    !has_required_caps(steering->dev, &init_node->caps))
2453 			return 0;
2454 
2455 		fs_get_obj(fs_ns, fs_parent_node);
2456 		if (init_node->num_leaf_prios)
2457 			return create_leaf_prios(fs_ns, prio, init_node);
2458 		fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2459 		if (IS_ERR(fs_prio))
2460 			return PTR_ERR(fs_prio);
2461 		base = &fs_prio->node;
2462 	} else if (init_node->type == FS_TYPE_NAMESPACE) {
2463 		fs_get_obj(fs_prio, fs_parent_node);
2464 		fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2465 		if (IS_ERR(fs_ns))
2466 			return PTR_ERR(fs_ns);
2467 		base = &fs_ns->node;
2468 	} else {
2469 		return -EINVAL;
2470 	}
2471 	prio = 0;
2472 	for (i = 0; i < init_node->ar_size; i++) {
2473 		err = init_root_tree_recursive(steering, &init_node->children[i],
2474 					       base, init_node, prio);
2475 		if (err)
2476 			return err;
2477 		if (init_node->children[i].type == FS_TYPE_PRIO &&
2478 		    init_node->children[i].num_leaf_prios) {
2479 			prio += init_node->children[i].num_leaf_prios;
2480 		}
2481 	}
2482 
2483 	return 0;
2484 }
2485 
2486 static int init_root_tree(struct mlx5_flow_steering *steering,
2487 			  struct init_tree_node *init_node,
2488 			  struct fs_node *fs_parent_node)
2489 {
2490 	int err;
2491 	int i;
2492 
2493 	for (i = 0; i < init_node->ar_size; i++) {
2494 		err = init_root_tree_recursive(steering, &init_node->children[i],
2495 					       fs_parent_node,
2496 					       init_node, i);
2497 		if (err)
2498 			return err;
2499 	}
2500 	return 0;
2501 }
2502 
2503 static void del_sw_root_ns(struct fs_node *node)
2504 {
2505 	struct mlx5_flow_root_namespace *root_ns;
2506 	struct mlx5_flow_namespace *ns;
2507 
2508 	fs_get_obj(ns, node);
2509 	root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2510 	mutex_destroy(&root_ns->chain_lock);
2511 	kfree(node);
2512 }
2513 
2514 static struct mlx5_flow_root_namespace
2515 *create_root_ns(struct mlx5_flow_steering *steering,
2516 		enum fs_flow_table_type table_type)
2517 {
2518 	const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2519 	struct mlx5_flow_root_namespace *root_ns;
2520 	struct mlx5_flow_namespace *ns;
2521 
2522 	if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2523 	    (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2524 		cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2525 
2526 	/* Create the root namespace */
2527 	root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2528 	if (!root_ns)
2529 		return NULL;
2530 
2531 	root_ns->dev = steering->dev;
2532 	root_ns->table_type = table_type;
2533 	root_ns->cmds = cmds;
2534 
2535 	INIT_LIST_HEAD(&root_ns->underlay_qpns);
2536 
2537 	ns = &root_ns->ns;
2538 	fs_init_namespace(ns);
2539 	mutex_init(&root_ns->chain_lock);
2540 	tree_init_node(&ns->node, NULL, del_sw_root_ns);
2541 	tree_add_node(&ns->node, NULL);
2542 
2543 	return root_ns;
2544 }
2545 
2546 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2547 
2548 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2549 {
2550 	struct fs_prio *prio;
2551 
2552 	fs_for_each_prio(prio, ns) {
2553 		 /* This updates prio start_level and num_levels */
2554 		set_prio_attrs_in_prio(prio, acc_level);
2555 		acc_level += prio->num_levels;
2556 	}
2557 	return acc_level;
2558 }
2559 
2560 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2561 {
2562 	struct mlx5_flow_namespace *ns;
2563 	int acc_level_ns = acc_level;
2564 
2565 	prio->start_level = acc_level;
2566 	fs_for_each_ns(ns, prio) {
2567 		/* This updates start_level and num_levels of ns's priority descendants */
2568 		acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2569 
2570 		/* If this a prio with chains, and we can jump from one chain
2571 		 * (namespace) to another, so we accumulate the levels
2572 		 */
2573 		if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2574 			acc_level = acc_level_ns;
2575 	}
2576 
2577 	if (!prio->num_levels)
2578 		prio->num_levels = acc_level_ns - prio->start_level;
2579 	WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2580 }
2581 
2582 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2583 {
2584 	struct mlx5_flow_namespace *ns = &root_ns->ns;
2585 	struct fs_prio *prio;
2586 	int start_level = 0;
2587 
2588 	fs_for_each_prio(prio, ns) {
2589 		set_prio_attrs_in_prio(prio, start_level);
2590 		start_level += prio->num_levels;
2591 	}
2592 }
2593 
2594 #define ANCHOR_PRIO 0
2595 #define ANCHOR_SIZE 1
2596 #define ANCHOR_LEVEL 0
2597 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2598 {
2599 	struct mlx5_flow_namespace *ns = NULL;
2600 	struct mlx5_flow_table_attr ft_attr = {};
2601 	struct mlx5_flow_table *ft;
2602 
2603 	ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2604 	if (WARN_ON(!ns))
2605 		return -EINVAL;
2606 
2607 	ft_attr.max_fte = ANCHOR_SIZE;
2608 	ft_attr.level   = ANCHOR_LEVEL;
2609 	ft_attr.prio    = ANCHOR_PRIO;
2610 
2611 	ft = mlx5_create_flow_table(ns, &ft_attr);
2612 	if (IS_ERR(ft)) {
2613 		mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2614 		return PTR_ERR(ft);
2615 	}
2616 	return 0;
2617 }
2618 
2619 static int init_root_ns(struct mlx5_flow_steering *steering)
2620 {
2621 	int err;
2622 
2623 	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2624 	if (!steering->root_ns)
2625 		return -ENOMEM;
2626 
2627 	err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2628 	if (err)
2629 		goto out_err;
2630 
2631 	set_prio_attrs(steering->root_ns);
2632 	err = create_anchor_flow_table(steering);
2633 	if (err)
2634 		goto out_err;
2635 
2636 	return 0;
2637 
2638 out_err:
2639 	cleanup_root_ns(steering->root_ns);
2640 	steering->root_ns = NULL;
2641 	return err;
2642 }
2643 
2644 static void clean_tree(struct fs_node *node)
2645 {
2646 	if (node) {
2647 		struct fs_node *iter;
2648 		struct fs_node *temp;
2649 
2650 		tree_get_node(node);
2651 		list_for_each_entry_safe(iter, temp, &node->children, list)
2652 			clean_tree(iter);
2653 		tree_put_node(node, false);
2654 		tree_remove_node(node, false);
2655 	}
2656 }
2657 
2658 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2659 {
2660 	if (!root_ns)
2661 		return;
2662 
2663 	clean_tree(&root_ns->ns.node);
2664 }
2665 
2666 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2667 {
2668 	struct mlx5_flow_steering *steering = dev->priv.steering;
2669 
2670 	cleanup_root_ns(steering->root_ns);
2671 	cleanup_root_ns(steering->fdb_root_ns);
2672 	steering->fdb_root_ns = NULL;
2673 	kfree(steering->fdb_sub_ns);
2674 	steering->fdb_sub_ns = NULL;
2675 	cleanup_root_ns(steering->port_sel_root_ns);
2676 	cleanup_root_ns(steering->sniffer_rx_root_ns);
2677 	cleanup_root_ns(steering->sniffer_tx_root_ns);
2678 	cleanup_root_ns(steering->rdma_rx_root_ns);
2679 	cleanup_root_ns(steering->rdma_tx_root_ns);
2680 	cleanup_root_ns(steering->egress_root_ns);
2681 	mlx5_cleanup_fc_stats(dev);
2682 	kmem_cache_destroy(steering->ftes_cache);
2683 	kmem_cache_destroy(steering->fgs_cache);
2684 	mlx5_ft_pool_destroy(dev);
2685 	kfree(steering);
2686 }
2687 
2688 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2689 {
2690 	struct fs_prio *prio;
2691 
2692 	steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2693 	if (!steering->sniffer_tx_root_ns)
2694 		return -ENOMEM;
2695 
2696 	/* Create single prio */
2697 	prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2698 	return PTR_ERR_OR_ZERO(prio);
2699 }
2700 
2701 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2702 {
2703 	struct fs_prio *prio;
2704 
2705 	steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2706 	if (!steering->sniffer_rx_root_ns)
2707 		return -ENOMEM;
2708 
2709 	/* Create single prio */
2710 	prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2711 	return PTR_ERR_OR_ZERO(prio);
2712 }
2713 
2714 #define PORT_SEL_NUM_LEVELS 3
2715 static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
2716 {
2717 	struct fs_prio *prio;
2718 
2719 	steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
2720 	if (!steering->port_sel_root_ns)
2721 		return -ENOMEM;
2722 
2723 	/* Create single prio */
2724 	prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
2725 			      PORT_SEL_NUM_LEVELS);
2726 	return PTR_ERR_OR_ZERO(prio);
2727 }
2728 
2729 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2730 {
2731 	int err;
2732 
2733 	steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2734 	if (!steering->rdma_rx_root_ns)
2735 		return -ENOMEM;
2736 
2737 	err = init_root_tree(steering, &rdma_rx_root_fs,
2738 			     &steering->rdma_rx_root_ns->ns.node);
2739 	if (err)
2740 		goto out_err;
2741 
2742 	set_prio_attrs(steering->rdma_rx_root_ns);
2743 
2744 	return 0;
2745 
2746 out_err:
2747 	cleanup_root_ns(steering->rdma_rx_root_ns);
2748 	steering->rdma_rx_root_ns = NULL;
2749 	return err;
2750 }
2751 
2752 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2753 {
2754 	int err;
2755 
2756 	steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2757 	if (!steering->rdma_tx_root_ns)
2758 		return -ENOMEM;
2759 
2760 	err = init_root_tree(steering, &rdma_tx_root_fs,
2761 			     &steering->rdma_tx_root_ns->ns.node);
2762 	if (err)
2763 		goto out_err;
2764 
2765 	set_prio_attrs(steering->rdma_tx_root_ns);
2766 
2767 	return 0;
2768 
2769 out_err:
2770 	cleanup_root_ns(steering->rdma_tx_root_ns);
2771 	steering->rdma_tx_root_ns = NULL;
2772 	return err;
2773 }
2774 
2775 /* FT and tc chains are stored in the same array so we can re-use the
2776  * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2777  * When creating a new ns for each chain store it in the first available slot.
2778  * Assume tc chains are created and stored first and only then the FT chain.
2779  */
2780 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2781 					struct mlx5_flow_namespace *ns)
2782 {
2783 	int chain = 0;
2784 
2785 	while (steering->fdb_sub_ns[chain])
2786 		++chain;
2787 
2788 	steering->fdb_sub_ns[chain] = ns;
2789 }
2790 
2791 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2792 					struct fs_prio *maj_prio)
2793 {
2794 	struct mlx5_flow_namespace *ns;
2795 	struct fs_prio *min_prio;
2796 	int prio;
2797 
2798 	ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2799 	if (IS_ERR(ns))
2800 		return PTR_ERR(ns);
2801 
2802 	for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2803 		min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2804 		if (IS_ERR(min_prio))
2805 			return PTR_ERR(min_prio);
2806 	}
2807 
2808 	store_fdb_sub_ns_prio_chain(steering, ns);
2809 
2810 	return 0;
2811 }
2812 
2813 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2814 			     int fs_prio,
2815 			     int chains)
2816 {
2817 	struct fs_prio *maj_prio;
2818 	int levels;
2819 	int chain;
2820 	int err;
2821 
2822 	levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2823 	maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2824 					  fs_prio,
2825 					  levels);
2826 	if (IS_ERR(maj_prio))
2827 		return PTR_ERR(maj_prio);
2828 
2829 	for (chain = 0; chain < chains; chain++) {
2830 		err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2831 		if (err)
2832 			return err;
2833 	}
2834 
2835 	return 0;
2836 }
2837 
2838 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2839 {
2840 	int err;
2841 
2842 	steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2843 				       sizeof(*steering->fdb_sub_ns),
2844 				       GFP_KERNEL);
2845 	if (!steering->fdb_sub_ns)
2846 		return -ENOMEM;
2847 
2848 	err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2849 	if (err)
2850 		return err;
2851 
2852 	err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2853 	if (err)
2854 		return err;
2855 
2856 	return 0;
2857 }
2858 
2859 static int create_fdb_bypass(struct mlx5_flow_steering *steering)
2860 {
2861 	struct mlx5_flow_namespace *ns;
2862 	struct fs_prio *prio;
2863 	int i;
2864 
2865 	prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
2866 	if (IS_ERR(prio))
2867 		return PTR_ERR(prio);
2868 
2869 	ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2870 	if (IS_ERR(ns))
2871 		return PTR_ERR(ns);
2872 
2873 	for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
2874 		prio = fs_create_prio(ns, i, 1);
2875 		if (IS_ERR(prio))
2876 			return PTR_ERR(prio);
2877 	}
2878 	return 0;
2879 }
2880 
2881 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2882 {
2883 	struct fs_prio *maj_prio;
2884 	int err;
2885 
2886 	steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2887 	if (!steering->fdb_root_ns)
2888 		return -ENOMEM;
2889 
2890 	err = create_fdb_bypass(steering);
2891 	if (err)
2892 		goto out_err;
2893 
2894 	err = create_fdb_fast_path(steering);
2895 	if (err)
2896 		goto out_err;
2897 
2898 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
2899 	if (IS_ERR(maj_prio)) {
2900 		err = PTR_ERR(maj_prio);
2901 		goto out_err;
2902 	}
2903 
2904 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
2905 	if (IS_ERR(maj_prio)) {
2906 		err = PTR_ERR(maj_prio);
2907 		goto out_err;
2908 	}
2909 
2910 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2911 	if (IS_ERR(maj_prio)) {
2912 		err = PTR_ERR(maj_prio);
2913 		goto out_err;
2914 	}
2915 
2916 	/* We put this priority last, knowing that nothing will get here
2917 	 * unless explicitly forwarded to. This is possible because the
2918 	 * slow path tables have catch all rules and nothing gets passed
2919 	 * those tables.
2920 	 */
2921 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2922 	if (IS_ERR(maj_prio)) {
2923 		err = PTR_ERR(maj_prio);
2924 		goto out_err;
2925 	}
2926 
2927 	set_prio_attrs(steering->fdb_root_ns);
2928 	return 0;
2929 
2930 out_err:
2931 	cleanup_root_ns(steering->fdb_root_ns);
2932 	kfree(steering->fdb_sub_ns);
2933 	steering->fdb_sub_ns = NULL;
2934 	steering->fdb_root_ns = NULL;
2935 	return err;
2936 }
2937 
2938 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2939 {
2940 	struct fs_prio *prio;
2941 
2942 	steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2943 	if (!steering->esw_egress_root_ns[vport])
2944 		return -ENOMEM;
2945 
2946 	/* create 1 prio*/
2947 	prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2948 	return PTR_ERR_OR_ZERO(prio);
2949 }
2950 
2951 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2952 {
2953 	struct fs_prio *prio;
2954 
2955 	steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2956 	if (!steering->esw_ingress_root_ns[vport])
2957 		return -ENOMEM;
2958 
2959 	/* create 1 prio*/
2960 	prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2961 	return PTR_ERR_OR_ZERO(prio);
2962 }
2963 
2964 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2965 {
2966 	struct mlx5_flow_steering *steering = dev->priv.steering;
2967 	int err;
2968 	int i;
2969 
2970 	steering->esw_egress_root_ns =
2971 			kcalloc(total_vports,
2972 				sizeof(*steering->esw_egress_root_ns),
2973 				GFP_KERNEL);
2974 	if (!steering->esw_egress_root_ns)
2975 		return -ENOMEM;
2976 
2977 	for (i = 0; i < total_vports; i++) {
2978 		err = init_egress_acl_root_ns(steering, i);
2979 		if (err)
2980 			goto cleanup_root_ns;
2981 	}
2982 	steering->esw_egress_acl_vports = total_vports;
2983 	return 0;
2984 
2985 cleanup_root_ns:
2986 	for (i--; i >= 0; i--)
2987 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
2988 	kfree(steering->esw_egress_root_ns);
2989 	steering->esw_egress_root_ns = NULL;
2990 	return err;
2991 }
2992 
2993 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
2994 {
2995 	struct mlx5_flow_steering *steering = dev->priv.steering;
2996 	int i;
2997 
2998 	if (!steering->esw_egress_root_ns)
2999 		return;
3000 
3001 	for (i = 0; i < steering->esw_egress_acl_vports; i++)
3002 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
3003 
3004 	kfree(steering->esw_egress_root_ns);
3005 	steering->esw_egress_root_ns = NULL;
3006 }
3007 
3008 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
3009 {
3010 	struct mlx5_flow_steering *steering = dev->priv.steering;
3011 	int err;
3012 	int i;
3013 
3014 	steering->esw_ingress_root_ns =
3015 			kcalloc(total_vports,
3016 				sizeof(*steering->esw_ingress_root_ns),
3017 				GFP_KERNEL);
3018 	if (!steering->esw_ingress_root_ns)
3019 		return -ENOMEM;
3020 
3021 	for (i = 0; i < total_vports; i++) {
3022 		err = init_ingress_acl_root_ns(steering, i);
3023 		if (err)
3024 			goto cleanup_root_ns;
3025 	}
3026 	steering->esw_ingress_acl_vports = total_vports;
3027 	return 0;
3028 
3029 cleanup_root_ns:
3030 	for (i--; i >= 0; i--)
3031 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3032 	kfree(steering->esw_ingress_root_ns);
3033 	steering->esw_ingress_root_ns = NULL;
3034 	return err;
3035 }
3036 
3037 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
3038 {
3039 	struct mlx5_flow_steering *steering = dev->priv.steering;
3040 	int i;
3041 
3042 	if (!steering->esw_ingress_root_ns)
3043 		return;
3044 
3045 	for (i = 0; i < steering->esw_ingress_acl_vports; i++)
3046 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3047 
3048 	kfree(steering->esw_ingress_root_ns);
3049 	steering->esw_ingress_root_ns = NULL;
3050 }
3051 
3052 u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
3053 {
3054 	struct mlx5_flow_root_namespace *root;
3055 	struct mlx5_flow_namespace *ns;
3056 
3057 	ns = mlx5_get_flow_namespace(dev, type);
3058 	if (!ns)
3059 		return 0;
3060 
3061 	root = find_root(&ns->node);
3062 	if (!root)
3063 		return 0;
3064 
3065 	return root->cmds->get_capabilities(root, root->table_type);
3066 }
3067 
3068 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
3069 {
3070 	int err;
3071 
3072 	steering->egress_root_ns = create_root_ns(steering,
3073 						  FS_FT_NIC_TX);
3074 	if (!steering->egress_root_ns)
3075 		return -ENOMEM;
3076 
3077 	err = init_root_tree(steering, &egress_root_fs,
3078 			     &steering->egress_root_ns->ns.node);
3079 	if (err)
3080 		goto cleanup;
3081 	set_prio_attrs(steering->egress_root_ns);
3082 	return 0;
3083 cleanup:
3084 	cleanup_root_ns(steering->egress_root_ns);
3085 	steering->egress_root_ns = NULL;
3086 	return err;
3087 }
3088 
3089 int mlx5_init_fs(struct mlx5_core_dev *dev)
3090 {
3091 	struct mlx5_flow_steering *steering;
3092 	int err = 0;
3093 
3094 	err = mlx5_init_fc_stats(dev);
3095 	if (err)
3096 		return err;
3097 
3098 	err = mlx5_ft_pool_init(dev);
3099 	if (err)
3100 		return err;
3101 
3102 	steering = kzalloc(sizeof(*steering), GFP_KERNEL);
3103 	if (!steering) {
3104 		err = -ENOMEM;
3105 		goto err;
3106 	}
3107 
3108 	steering->dev = dev;
3109 	dev->priv.steering = steering;
3110 
3111 	if (mlx5_fs_dr_is_supported(dev))
3112 		steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
3113 	else
3114 		steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
3115 
3116 	steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
3117 						sizeof(struct mlx5_flow_group), 0,
3118 						0, NULL);
3119 	steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
3120 						 0, NULL);
3121 	if (!steering->ftes_cache || !steering->fgs_cache) {
3122 		err = -ENOMEM;
3123 		goto err;
3124 	}
3125 
3126 	if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3127 	      (MLX5_CAP_GEN(dev, nic_flow_table))) ||
3128 	     ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
3129 	      MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
3130 	    MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
3131 		err = init_root_ns(steering);
3132 		if (err)
3133 			goto err;
3134 	}
3135 
3136 	if (MLX5_ESWITCH_MANAGER(dev)) {
3137 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
3138 			err = init_fdb_root_ns(steering);
3139 			if (err)
3140 				goto err;
3141 		}
3142 	}
3143 
3144 	if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3145 		err = init_sniffer_rx_root_ns(steering);
3146 		if (err)
3147 			goto err;
3148 	}
3149 
3150 	if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3151 		err = init_sniffer_tx_root_ns(steering);
3152 		if (err)
3153 			goto err;
3154 	}
3155 
3156 	if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
3157 		err = init_port_sel_root_ns(steering);
3158 		if (err)
3159 			goto err;
3160 	}
3161 
3162 	if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3163 	    MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3164 		err = init_rdma_rx_root_ns(steering);
3165 		if (err)
3166 			goto err;
3167 	}
3168 
3169 	if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3170 		err = init_rdma_tx_root_ns(steering);
3171 		if (err)
3172 			goto err;
3173 	}
3174 
3175 	if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
3176 	    MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3177 		err = init_egress_root_ns(steering);
3178 		if (err)
3179 			goto err;
3180 	}
3181 
3182 	return 0;
3183 err:
3184 	mlx5_cleanup_fs(dev);
3185 	return err;
3186 }
3187 
3188 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3189 {
3190 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3191 	struct mlx5_ft_underlay_qp *new_uqp;
3192 	int err = 0;
3193 
3194 	new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3195 	if (!new_uqp)
3196 		return -ENOMEM;
3197 
3198 	mutex_lock(&root->chain_lock);
3199 
3200 	if (!root->root_ft) {
3201 		err = -EINVAL;
3202 		goto update_ft_fail;
3203 	}
3204 
3205 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3206 					 false);
3207 	if (err) {
3208 		mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3209 			       underlay_qpn, err);
3210 		goto update_ft_fail;
3211 	}
3212 
3213 	new_uqp->qpn = underlay_qpn;
3214 	list_add_tail(&new_uqp->list, &root->underlay_qpns);
3215 
3216 	mutex_unlock(&root->chain_lock);
3217 
3218 	return 0;
3219 
3220 update_ft_fail:
3221 	mutex_unlock(&root->chain_lock);
3222 	kfree(new_uqp);
3223 	return err;
3224 }
3225 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3226 
3227 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3228 {
3229 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3230 	struct mlx5_ft_underlay_qp *uqp;
3231 	bool found = false;
3232 	int err = 0;
3233 
3234 	mutex_lock(&root->chain_lock);
3235 	list_for_each_entry(uqp, &root->underlay_qpns, list) {
3236 		if (uqp->qpn == underlay_qpn) {
3237 			found = true;
3238 			break;
3239 		}
3240 	}
3241 
3242 	if (!found) {
3243 		mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3244 			       underlay_qpn);
3245 		err = -EINVAL;
3246 		goto out;
3247 	}
3248 
3249 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3250 					 true);
3251 	if (err)
3252 		mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3253 			       underlay_qpn, err);
3254 
3255 	list_del(&uqp->list);
3256 	mutex_unlock(&root->chain_lock);
3257 	kfree(uqp);
3258 
3259 	return 0;
3260 
3261 out:
3262 	mutex_unlock(&root->chain_lock);
3263 	return err;
3264 }
3265 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3266 
3267 static struct mlx5_flow_root_namespace
3268 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3269 {
3270 	struct mlx5_flow_namespace *ns;
3271 
3272 	if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3273 	    ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3274 		ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3275 	else
3276 		ns = mlx5_get_flow_namespace(dev, ns_type);
3277 	if (!ns)
3278 		return NULL;
3279 
3280 	return find_root(&ns->node);
3281 }
3282 
3283 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3284 						 u8 ns_type, u8 num_actions,
3285 						 void *modify_actions)
3286 {
3287 	struct mlx5_flow_root_namespace *root;
3288 	struct mlx5_modify_hdr *modify_hdr;
3289 	int err;
3290 
3291 	root = get_root_namespace(dev, ns_type);
3292 	if (!root)
3293 		return ERR_PTR(-EOPNOTSUPP);
3294 
3295 	modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3296 	if (!modify_hdr)
3297 		return ERR_PTR(-ENOMEM);
3298 
3299 	modify_hdr->ns_type = ns_type;
3300 	err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3301 					      modify_actions, modify_hdr);
3302 	if (err) {
3303 		kfree(modify_hdr);
3304 		return ERR_PTR(err);
3305 	}
3306 
3307 	return modify_hdr;
3308 }
3309 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3310 
3311 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3312 				struct mlx5_modify_hdr *modify_hdr)
3313 {
3314 	struct mlx5_flow_root_namespace *root;
3315 
3316 	root = get_root_namespace(dev, modify_hdr->ns_type);
3317 	if (WARN_ON(!root))
3318 		return;
3319 	root->cmds->modify_header_dealloc(root, modify_hdr);
3320 	kfree(modify_hdr);
3321 }
3322 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3323 
3324 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3325 						     struct mlx5_pkt_reformat_params *params,
3326 						     enum mlx5_flow_namespace_type ns_type)
3327 {
3328 	struct mlx5_pkt_reformat *pkt_reformat;
3329 	struct mlx5_flow_root_namespace *root;
3330 	int err;
3331 
3332 	root = get_root_namespace(dev, ns_type);
3333 	if (!root)
3334 		return ERR_PTR(-EOPNOTSUPP);
3335 
3336 	pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3337 	if (!pkt_reformat)
3338 		return ERR_PTR(-ENOMEM);
3339 
3340 	pkt_reformat->ns_type = ns_type;
3341 	pkt_reformat->reformat_type = params->type;
3342 	err = root->cmds->packet_reformat_alloc(root, params, ns_type,
3343 						pkt_reformat);
3344 	if (err) {
3345 		kfree(pkt_reformat);
3346 		return ERR_PTR(err);
3347 	}
3348 
3349 	return pkt_reformat;
3350 }
3351 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3352 
3353 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3354 				  struct mlx5_pkt_reformat *pkt_reformat)
3355 {
3356 	struct mlx5_flow_root_namespace *root;
3357 
3358 	root = get_root_namespace(dev, pkt_reformat->ns_type);
3359 	if (WARN_ON(!root))
3360 		return;
3361 	root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3362 	kfree(pkt_reformat);
3363 }
3364 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3365 
3366 int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
3367 {
3368 	return definer->id;
3369 }
3370 
3371 struct mlx5_flow_definer *
3372 mlx5_create_match_definer(struct mlx5_core_dev *dev,
3373 			  enum mlx5_flow_namespace_type ns_type, u16 format_id,
3374 			  u32 *match_mask)
3375 {
3376 	struct mlx5_flow_root_namespace *root;
3377 	struct mlx5_flow_definer *definer;
3378 	int id;
3379 
3380 	root = get_root_namespace(dev, ns_type);
3381 	if (!root)
3382 		return ERR_PTR(-EOPNOTSUPP);
3383 
3384 	definer = kzalloc(sizeof(*definer), GFP_KERNEL);
3385 	if (!definer)
3386 		return ERR_PTR(-ENOMEM);
3387 
3388 	definer->ns_type = ns_type;
3389 	id = root->cmds->create_match_definer(root, format_id, match_mask);
3390 	if (id < 0) {
3391 		mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
3392 		kfree(definer);
3393 		return ERR_PTR(id);
3394 	}
3395 	definer->id = id;
3396 	return definer;
3397 }
3398 
3399 void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
3400 				struct mlx5_flow_definer *definer)
3401 {
3402 	struct mlx5_flow_root_namespace *root;
3403 
3404 	root = get_root_namespace(dev, definer->ns_type);
3405 	if (WARN_ON(!root))
3406 		return;
3407 
3408 	root->cmds->destroy_match_definer(root, definer->id);
3409 	kfree(definer);
3410 }
3411 
3412 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3413 				 struct mlx5_flow_root_namespace *peer_ns)
3414 {
3415 	if (peer_ns && ns->mode != peer_ns->mode) {
3416 		mlx5_core_err(ns->dev,
3417 			      "Can't peer namespace of different steering mode\n");
3418 		return -EINVAL;
3419 	}
3420 
3421 	return ns->cmds->set_peer(ns, peer_ns);
3422 }
3423 
3424 /* This function should be called only at init stage of the namespace.
3425  * It is not safe to call this function while steering operations
3426  * are executed in the namespace.
3427  */
3428 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3429 				 enum mlx5_flow_steering_mode mode)
3430 {
3431 	struct mlx5_flow_root_namespace *root;
3432 	const struct mlx5_flow_cmds *cmds;
3433 	int err;
3434 
3435 	root = find_root(&ns->node);
3436 	if (&root->ns != ns)
3437 	/* Can't set cmds to non root namespace */
3438 		return -EINVAL;
3439 
3440 	if (root->table_type != FS_FT_FDB)
3441 		return -EOPNOTSUPP;
3442 
3443 	if (root->mode == mode)
3444 		return 0;
3445 
3446 	if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3447 		cmds = mlx5_fs_cmd_get_dr_cmds();
3448 	else
3449 		cmds = mlx5_fs_cmd_get_fw_cmds();
3450 	if (!cmds)
3451 		return -EOPNOTSUPP;
3452 
3453 	err = cmds->create_ns(root);
3454 	if (err) {
3455 		mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3456 			      err);
3457 		return err;
3458 	}
3459 
3460 	root->cmds->destroy_ns(root);
3461 	root->cmds = cmds;
3462 	root->mode = mode;
3463 
3464 	return 0;
3465 }
3466