1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
37 
38 #include "mlx5_core.h"
39 #include "fs_core.h"
40 #include "fs_cmd.h"
41 #include "fs_ft_pool.h"
42 #include "diag/fs_tracepoint.h"
43 #include "accel/ipsec.h"
44 #include "fpga/ipsec.h"
45 
46 #define INIT_TREE_NODE_ARRAY_SIZE(...)	(sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 					 sizeof(struct init_tree_node))
48 
49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 		 ...) {.type = FS_TYPE_PRIO,\
51 	.min_ft_level = min_level_val,\
52 	.num_levels = num_levels_val,\
53 	.num_leaf_prios = num_prios_val,\
54 	.caps = caps_val,\
55 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
56 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57 }
58 
59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 	ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
61 		 __VA_ARGS__)\
62 
63 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE,	\
64 	.def_miss_action = def_miss_act,\
65 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
66 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
67 }
68 
69 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
70 				   sizeof(long))
71 
72 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
73 
74 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 			       .caps = (long[]) {__VA_ARGS__} }
76 
77 #define FS_CHAINING_CAPS  FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 					   FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 					   FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 					   FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
81 
82 #define FS_CHAINING_CAPS_EGRESS                                                \
83 	FS_REQUIRED_CAPS(                                                      \
84 		FS_CAP(flow_table_properties_nic_transmit.flow_modify_en),     \
85 		FS_CAP(flow_table_properties_nic_transmit.modify_root),        \
86 		FS_CAP(flow_table_properties_nic_transmit                      \
87 			       .identified_miss_table_mode),                   \
88 		FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
89 
90 #define FS_CHAINING_CAPS_RDMA_TX                                                \
91 	FS_REQUIRED_CAPS(                                                       \
92 		FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
93 		FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root),    \
94 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
95 			       .identified_miss_table_mode),                    \
96 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
97 			       .flow_table_modify))
98 
99 #define LEFTOVERS_NUM_LEVELS 1
100 #define LEFTOVERS_NUM_PRIOS 1
101 
102 #define BY_PASS_PRIO_NUM_LEVELS 1
103 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
104 			   LEFTOVERS_NUM_PRIOS)
105 
106 #define ETHTOOL_PRIO_NUM_LEVELS 1
107 #define ETHTOOL_NUM_PRIOS 11
108 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
109 /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
110 #define KERNEL_NIC_PRIO_NUM_LEVELS 7
111 #define KERNEL_NIC_NUM_PRIOS 1
112 /* One more level for tc */
113 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
114 
115 #define KERNEL_NIC_TC_NUM_PRIOS  1
116 #define KERNEL_NIC_TC_NUM_LEVELS 2
117 
118 #define ANCHOR_NUM_LEVELS 1
119 #define ANCHOR_NUM_PRIOS 1
120 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
121 
122 #define OFFLOADS_MAX_FT 2
123 #define OFFLOADS_NUM_PRIOS 2
124 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
125 
126 #define LAG_PRIO_NUM_LEVELS 1
127 #define LAG_NUM_PRIOS 1
128 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
129 
130 #define KERNEL_TX_IPSEC_NUM_PRIOS  1
131 #define KERNEL_TX_IPSEC_NUM_LEVELS 1
132 #define KERNEL_TX_MIN_LEVEL        (KERNEL_TX_IPSEC_NUM_LEVELS)
133 
134 struct node_caps {
135 	size_t	arr_sz;
136 	long	*caps;
137 };
138 
139 static struct init_tree_node {
140 	enum fs_node_type	type;
141 	struct init_tree_node *children;
142 	int ar_size;
143 	struct node_caps caps;
144 	int min_ft_level;
145 	int num_leaf_prios;
146 	int prio;
147 	int num_levels;
148 	enum mlx5_flow_table_miss_action def_miss_action;
149 } root_fs = {
150 	.type = FS_TYPE_NAMESPACE,
151 	.ar_size = 7,
152 	  .children = (struct init_tree_node[]){
153 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
154 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
155 				  ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
156 						    BY_PASS_PRIO_NUM_LEVELS))),
157 		  ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
158 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
159 				  ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
160 						    LAG_PRIO_NUM_LEVELS))),
161 		  ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
162 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
163 				  ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
164 						    OFFLOADS_MAX_FT))),
165 		  ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
166 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
167 				  ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
168 						    ETHTOOL_PRIO_NUM_LEVELS))),
169 		  ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
170 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
171 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
172 						    KERNEL_NIC_TC_NUM_LEVELS),
173 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
174 						    KERNEL_NIC_PRIO_NUM_LEVELS))),
175 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
176 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
177 				  ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
178 						    LEFTOVERS_NUM_LEVELS))),
179 		  ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
180 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
181 				  ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
182 						    ANCHOR_NUM_LEVELS))),
183 	}
184 };
185 
186 static struct init_tree_node egress_root_fs = {
187 	.type = FS_TYPE_NAMESPACE,
188 #ifdef CONFIG_MLX5_IPSEC
189 	.ar_size = 2,
190 #else
191 	.ar_size = 1,
192 #endif
193 	.children = (struct init_tree_node[]) {
194 		ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
195 			 FS_CHAINING_CAPS_EGRESS,
196 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
197 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
198 						  BY_PASS_PRIO_NUM_LEVELS))),
199 #ifdef CONFIG_MLX5_IPSEC
200 		ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
201 			 FS_CHAINING_CAPS_EGRESS,
202 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
203 				ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
204 						  KERNEL_TX_IPSEC_NUM_LEVELS))),
205 #endif
206 	}
207 };
208 
209 #define RDMA_RX_BYPASS_PRIO 0
210 #define RDMA_RX_KERNEL_PRIO 1
211 static struct init_tree_node rdma_rx_root_fs = {
212 	.type = FS_TYPE_NAMESPACE,
213 	.ar_size = 2,
214 	.children = (struct init_tree_node[]) {
215 		[RDMA_RX_BYPASS_PRIO] =
216 		ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
217 			 FS_CHAINING_CAPS,
218 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
219 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
220 						  BY_PASS_PRIO_NUM_LEVELS))),
221 		[RDMA_RX_KERNEL_PRIO] =
222 		ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
223 			 FS_CHAINING_CAPS,
224 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
225 				ADD_MULTIPLE_PRIO(1, 1))),
226 	}
227 };
228 
229 static struct init_tree_node rdma_tx_root_fs = {
230 	.type = FS_TYPE_NAMESPACE,
231 	.ar_size = 1,
232 	.children = (struct init_tree_node[]) {
233 		ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
234 			 FS_CHAINING_CAPS_RDMA_TX,
235 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
236 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
237 						  BY_PASS_PRIO_NUM_LEVELS))),
238 	}
239 };
240 
241 enum fs_i_lock_class {
242 	FS_LOCK_GRANDPARENT,
243 	FS_LOCK_PARENT,
244 	FS_LOCK_CHILD
245 };
246 
247 static const struct rhashtable_params rhash_fte = {
248 	.key_len = sizeof_field(struct fs_fte, val),
249 	.key_offset = offsetof(struct fs_fte, val),
250 	.head_offset = offsetof(struct fs_fte, hash),
251 	.automatic_shrinking = true,
252 	.min_size = 1,
253 };
254 
255 static const struct rhashtable_params rhash_fg = {
256 	.key_len = sizeof_field(struct mlx5_flow_group, mask),
257 	.key_offset = offsetof(struct mlx5_flow_group, mask),
258 	.head_offset = offsetof(struct mlx5_flow_group, hash),
259 	.automatic_shrinking = true,
260 	.min_size = 1,
261 
262 };
263 
264 static void del_hw_flow_table(struct fs_node *node);
265 static void del_hw_flow_group(struct fs_node *node);
266 static void del_hw_fte(struct fs_node *node);
267 static void del_sw_flow_table(struct fs_node *node);
268 static void del_sw_flow_group(struct fs_node *node);
269 static void del_sw_fte(struct fs_node *node);
270 static void del_sw_prio(struct fs_node *node);
271 static void del_sw_ns(struct fs_node *node);
272 /* Delete rule (destination) is special case that
273  * requires to lock the FTE for all the deletion process.
274  */
275 static void del_sw_hw_rule(struct fs_node *node);
276 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
277 				struct mlx5_flow_destination *d2);
278 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
279 static struct mlx5_flow_rule *
280 find_flow_rule(struct fs_fte *fte,
281 	       struct mlx5_flow_destination *dest);
282 
283 static void tree_init_node(struct fs_node *node,
284 			   void (*del_hw_func)(struct fs_node *),
285 			   void (*del_sw_func)(struct fs_node *))
286 {
287 	refcount_set(&node->refcount, 1);
288 	INIT_LIST_HEAD(&node->list);
289 	INIT_LIST_HEAD(&node->children);
290 	init_rwsem(&node->lock);
291 	node->del_hw_func = del_hw_func;
292 	node->del_sw_func = del_sw_func;
293 	node->active = false;
294 }
295 
296 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
297 {
298 	if (parent)
299 		refcount_inc(&parent->refcount);
300 	node->parent = parent;
301 
302 	/* Parent is the root */
303 	if (!parent)
304 		node->root = node;
305 	else
306 		node->root = parent->root;
307 }
308 
309 static int tree_get_node(struct fs_node *node)
310 {
311 	return refcount_inc_not_zero(&node->refcount);
312 }
313 
314 static void nested_down_read_ref_node(struct fs_node *node,
315 				      enum fs_i_lock_class class)
316 {
317 	if (node) {
318 		down_read_nested(&node->lock, class);
319 		refcount_inc(&node->refcount);
320 	}
321 }
322 
323 static void nested_down_write_ref_node(struct fs_node *node,
324 				       enum fs_i_lock_class class)
325 {
326 	if (node) {
327 		down_write_nested(&node->lock, class);
328 		refcount_inc(&node->refcount);
329 	}
330 }
331 
332 static void down_write_ref_node(struct fs_node *node, bool locked)
333 {
334 	if (node) {
335 		if (!locked)
336 			down_write(&node->lock);
337 		refcount_inc(&node->refcount);
338 	}
339 }
340 
341 static void up_read_ref_node(struct fs_node *node)
342 {
343 	refcount_dec(&node->refcount);
344 	up_read(&node->lock);
345 }
346 
347 static void up_write_ref_node(struct fs_node *node, bool locked)
348 {
349 	refcount_dec(&node->refcount);
350 	if (!locked)
351 		up_write(&node->lock);
352 }
353 
354 static void tree_put_node(struct fs_node *node, bool locked)
355 {
356 	struct fs_node *parent_node = node->parent;
357 
358 	if (refcount_dec_and_test(&node->refcount)) {
359 		if (node->del_hw_func)
360 			node->del_hw_func(node);
361 		if (parent_node) {
362 			down_write_ref_node(parent_node, locked);
363 			list_del_init(&node->list);
364 		}
365 		node->del_sw_func(node);
366 		if (parent_node)
367 			up_write_ref_node(parent_node, locked);
368 		node = NULL;
369 	}
370 	if (!node && parent_node)
371 		tree_put_node(parent_node, locked);
372 }
373 
374 static int tree_remove_node(struct fs_node *node, bool locked)
375 {
376 	if (refcount_read(&node->refcount) > 1) {
377 		refcount_dec(&node->refcount);
378 		return -EEXIST;
379 	}
380 	tree_put_node(node, locked);
381 	return 0;
382 }
383 
384 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
385 				 unsigned int prio)
386 {
387 	struct fs_prio *iter_prio;
388 
389 	fs_for_each_prio(iter_prio, ns) {
390 		if (iter_prio->prio == prio)
391 			return iter_prio;
392 	}
393 
394 	return NULL;
395 }
396 
397 static bool is_fwd_next_action(u32 action)
398 {
399 	return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
400 			 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
401 }
402 
403 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
404 {
405 	int i;
406 
407 	for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
408 		if (spec->match_value[i] & ~spec->match_criteria[i]) {
409 			pr_warn("mlx5_core: match_value differs from match_criteria\n");
410 			return false;
411 		}
412 
413 	return true;
414 }
415 
416 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
417 {
418 	struct fs_node *root;
419 	struct mlx5_flow_namespace *ns;
420 
421 	root = node->root;
422 
423 	if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
424 		pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
425 		return NULL;
426 	}
427 
428 	ns = container_of(root, struct mlx5_flow_namespace, node);
429 	return container_of(ns, struct mlx5_flow_root_namespace, ns);
430 }
431 
432 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
433 {
434 	struct mlx5_flow_root_namespace *root = find_root(node);
435 
436 	if (root)
437 		return root->dev->priv.steering;
438 	return NULL;
439 }
440 
441 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
442 {
443 	struct mlx5_flow_root_namespace *root = find_root(node);
444 
445 	if (root)
446 		return root->dev;
447 	return NULL;
448 }
449 
450 static void del_sw_ns(struct fs_node *node)
451 {
452 	kfree(node);
453 }
454 
455 static void del_sw_prio(struct fs_node *node)
456 {
457 	kfree(node);
458 }
459 
460 static void del_hw_flow_table(struct fs_node *node)
461 {
462 	struct mlx5_flow_root_namespace *root;
463 	struct mlx5_flow_table *ft;
464 	struct mlx5_core_dev *dev;
465 	int err;
466 
467 	fs_get_obj(ft, node);
468 	dev = get_dev(&ft->node);
469 	root = find_root(&ft->node);
470 	trace_mlx5_fs_del_ft(ft);
471 
472 	if (node->active) {
473 		err = root->cmds->destroy_flow_table(root, ft);
474 		if (err)
475 			mlx5_core_warn(dev, "flow steering can't destroy ft\n");
476 	}
477 }
478 
479 static void del_sw_flow_table(struct fs_node *node)
480 {
481 	struct mlx5_flow_table *ft;
482 	struct fs_prio *prio;
483 
484 	fs_get_obj(ft, node);
485 
486 	rhltable_destroy(&ft->fgs_hash);
487 	if (ft->node.parent) {
488 		fs_get_obj(prio, ft->node.parent);
489 		prio->num_ft--;
490 	}
491 	kfree(ft);
492 }
493 
494 static void modify_fte(struct fs_fte *fte)
495 {
496 	struct mlx5_flow_root_namespace *root;
497 	struct mlx5_flow_table *ft;
498 	struct mlx5_flow_group *fg;
499 	struct mlx5_core_dev *dev;
500 	int err;
501 
502 	fs_get_obj(fg, fte->node.parent);
503 	fs_get_obj(ft, fg->node.parent);
504 	dev = get_dev(&fte->node);
505 
506 	root = find_root(&ft->node);
507 	err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
508 	if (err)
509 		mlx5_core_warn(dev,
510 			       "%s can't del rule fg id=%d fte_index=%d\n",
511 			       __func__, fg->id, fte->index);
512 	fte->modify_mask = 0;
513 }
514 
515 static void del_sw_hw_rule(struct fs_node *node)
516 {
517 	struct mlx5_flow_rule *rule;
518 	struct fs_fte *fte;
519 
520 	fs_get_obj(rule, node);
521 	fs_get_obj(fte, rule->node.parent);
522 	trace_mlx5_fs_del_rule(rule);
523 	if (is_fwd_next_action(rule->sw_action)) {
524 		mutex_lock(&rule->dest_attr.ft->lock);
525 		list_del(&rule->next_ft);
526 		mutex_unlock(&rule->dest_attr.ft->lock);
527 	}
528 
529 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER  &&
530 	    --fte->dests_size) {
531 		fte->modify_mask |=
532 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
533 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
534 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
535 		goto out;
536 	}
537 
538 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
539 	    --fte->dests_size) {
540 		fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
541 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
542 		goto out;
543 	}
544 
545 	if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
546 	    --fte->dests_size) {
547 		fte->modify_mask |=
548 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
549 	}
550 out:
551 	kfree(rule);
552 }
553 
554 static void del_hw_fte(struct fs_node *node)
555 {
556 	struct mlx5_flow_root_namespace *root;
557 	struct mlx5_flow_table *ft;
558 	struct mlx5_flow_group *fg;
559 	struct mlx5_core_dev *dev;
560 	struct fs_fte *fte;
561 	int err;
562 
563 	fs_get_obj(fte, node);
564 	fs_get_obj(fg, fte->node.parent);
565 	fs_get_obj(ft, fg->node.parent);
566 
567 	trace_mlx5_fs_del_fte(fte);
568 	dev = get_dev(&ft->node);
569 	root = find_root(&ft->node);
570 	if (node->active) {
571 		err = root->cmds->delete_fte(root, ft, fte);
572 		if (err)
573 			mlx5_core_warn(dev,
574 				       "flow steering can't delete fte in index %d of flow group id %d\n",
575 				       fte->index, fg->id);
576 		node->active = false;
577 	}
578 }
579 
580 static void del_sw_fte(struct fs_node *node)
581 {
582 	struct mlx5_flow_steering *steering = get_steering(node);
583 	struct mlx5_flow_group *fg;
584 	struct fs_fte *fte;
585 	int err;
586 
587 	fs_get_obj(fte, node);
588 	fs_get_obj(fg, fte->node.parent);
589 
590 	err = rhashtable_remove_fast(&fg->ftes_hash,
591 				     &fte->hash,
592 				     rhash_fte);
593 	WARN_ON(err);
594 	ida_free(&fg->fte_allocator, fte->index - fg->start_index);
595 	kmem_cache_free(steering->ftes_cache, fte);
596 }
597 
598 static void del_hw_flow_group(struct fs_node *node)
599 {
600 	struct mlx5_flow_root_namespace *root;
601 	struct mlx5_flow_group *fg;
602 	struct mlx5_flow_table *ft;
603 	struct mlx5_core_dev *dev;
604 
605 	fs_get_obj(fg, node);
606 	fs_get_obj(ft, fg->node.parent);
607 	dev = get_dev(&ft->node);
608 	trace_mlx5_fs_del_fg(fg);
609 
610 	root = find_root(&ft->node);
611 	if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
612 		mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
613 			       fg->id, ft->id);
614 }
615 
616 static void del_sw_flow_group(struct fs_node *node)
617 {
618 	struct mlx5_flow_steering *steering = get_steering(node);
619 	struct mlx5_flow_group *fg;
620 	struct mlx5_flow_table *ft;
621 	int err;
622 
623 	fs_get_obj(fg, node);
624 	fs_get_obj(ft, fg->node.parent);
625 
626 	rhashtable_destroy(&fg->ftes_hash);
627 	ida_destroy(&fg->fte_allocator);
628 	if (ft->autogroup.active &&
629 	    fg->max_ftes == ft->autogroup.group_size &&
630 	    fg->start_index < ft->autogroup.max_fte)
631 		ft->autogroup.num_groups--;
632 	err = rhltable_remove(&ft->fgs_hash,
633 			      &fg->hash,
634 			      rhash_fg);
635 	WARN_ON(err);
636 	kmem_cache_free(steering->fgs_cache, fg);
637 }
638 
639 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
640 {
641 	int index;
642 	int ret;
643 
644 	index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
645 	if (index < 0)
646 		return index;
647 
648 	fte->index = index + fg->start_index;
649 	ret = rhashtable_insert_fast(&fg->ftes_hash,
650 				     &fte->hash,
651 				     rhash_fte);
652 	if (ret)
653 		goto err_ida_remove;
654 
655 	tree_add_node(&fte->node, &fg->node);
656 	list_add_tail(&fte->node.list, &fg->node.children);
657 	return 0;
658 
659 err_ida_remove:
660 	ida_free(&fg->fte_allocator, index);
661 	return ret;
662 }
663 
664 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
665 				const struct mlx5_flow_spec *spec,
666 				struct mlx5_flow_act *flow_act)
667 {
668 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
669 	struct fs_fte *fte;
670 
671 	fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
672 	if (!fte)
673 		return ERR_PTR(-ENOMEM);
674 
675 	memcpy(fte->val, &spec->match_value, sizeof(fte->val));
676 	fte->node.type =  FS_TYPE_FLOW_ENTRY;
677 	fte->action = *flow_act;
678 	fte->flow_context = spec->flow_context;
679 
680 	tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
681 
682 	return fte;
683 }
684 
685 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
686 			       struct mlx5_flow_group *fg)
687 {
688 	rhashtable_destroy(&fg->ftes_hash);
689 	kmem_cache_free(steering->fgs_cache, fg);
690 }
691 
692 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
693 						u8 match_criteria_enable,
694 						const void *match_criteria,
695 						int start_index,
696 						int end_index)
697 {
698 	struct mlx5_flow_group *fg;
699 	int ret;
700 
701 	fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
702 	if (!fg)
703 		return ERR_PTR(-ENOMEM);
704 
705 	ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
706 	if (ret) {
707 		kmem_cache_free(steering->fgs_cache, fg);
708 		return ERR_PTR(ret);
709 	}
710 
711 	ida_init(&fg->fte_allocator);
712 	fg->mask.match_criteria_enable = match_criteria_enable;
713 	memcpy(&fg->mask.match_criteria, match_criteria,
714 	       sizeof(fg->mask.match_criteria));
715 	fg->node.type =  FS_TYPE_FLOW_GROUP;
716 	fg->start_index = start_index;
717 	fg->max_ftes = end_index - start_index + 1;
718 
719 	return fg;
720 }
721 
722 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
723 						       u8 match_criteria_enable,
724 						       const void *match_criteria,
725 						       int start_index,
726 						       int end_index,
727 						       struct list_head *prev)
728 {
729 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
730 	struct mlx5_flow_group *fg;
731 	int ret;
732 
733 	fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
734 			      start_index, end_index);
735 	if (IS_ERR(fg))
736 		return fg;
737 
738 	/* initialize refcnt, add to parent list */
739 	ret = rhltable_insert(&ft->fgs_hash,
740 			      &fg->hash,
741 			      rhash_fg);
742 	if (ret) {
743 		dealloc_flow_group(steering, fg);
744 		return ERR_PTR(ret);
745 	}
746 
747 	tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
748 	tree_add_node(&fg->node, &ft->node);
749 	/* Add node to group list */
750 	list_add(&fg->node.list, prev);
751 	atomic_inc(&ft->node.version);
752 
753 	return fg;
754 }
755 
756 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
757 						enum fs_flow_table_type table_type,
758 						enum fs_flow_table_op_mod op_mod,
759 						u32 flags)
760 {
761 	struct mlx5_flow_table *ft;
762 	int ret;
763 
764 	ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
765 	if (!ft)
766 		return ERR_PTR(-ENOMEM);
767 
768 	ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
769 	if (ret) {
770 		kfree(ft);
771 		return ERR_PTR(ret);
772 	}
773 
774 	ft->level = level;
775 	ft->node.type = FS_TYPE_FLOW_TABLE;
776 	ft->op_mod = op_mod;
777 	ft->type = table_type;
778 	ft->vport = vport;
779 	ft->flags = flags;
780 	INIT_LIST_HEAD(&ft->fwd_rules);
781 	mutex_init(&ft->lock);
782 
783 	return ft;
784 }
785 
786 /* If reverse is false, then we search for the first flow table in the
787  * root sub-tree from start(closest from right), else we search for the
788  * last flow table in the root sub-tree till start(closest from left).
789  */
790 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
791 							 struct list_head *start,
792 							 bool reverse)
793 {
794 #define list_advance_entry(pos, reverse)		\
795 	((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
796 
797 #define list_for_each_advance_continue(pos, head, reverse)	\
798 	for (pos = list_advance_entry(pos, reverse);		\
799 	     &pos->list != (head);				\
800 	     pos = list_advance_entry(pos, reverse))
801 
802 	struct fs_node *iter = list_entry(start, struct fs_node, list);
803 	struct mlx5_flow_table *ft = NULL;
804 
805 	if (!root || root->type == FS_TYPE_PRIO_CHAINS)
806 		return NULL;
807 
808 	list_for_each_advance_continue(iter, &root->children, reverse) {
809 		if (iter->type == FS_TYPE_FLOW_TABLE) {
810 			fs_get_obj(ft, iter);
811 			return ft;
812 		}
813 		ft = find_closest_ft_recursive(iter, &iter->children, reverse);
814 		if (ft)
815 			return ft;
816 	}
817 
818 	return ft;
819 }
820 
821 /* If reverse is false then return the first flow table in next priority of
822  * prio in the tree, else return the last flow table in the previous priority
823  * of prio in the tree.
824  */
825 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
826 {
827 	struct mlx5_flow_table *ft = NULL;
828 	struct fs_node *curr_node;
829 	struct fs_node *parent;
830 
831 	parent = prio->node.parent;
832 	curr_node = &prio->node;
833 	while (!ft && parent) {
834 		ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
835 		curr_node = parent;
836 		parent = curr_node->parent;
837 	}
838 	return ft;
839 }
840 
841 /* Assuming all the tree is locked by mutex chain lock */
842 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
843 {
844 	return find_closest_ft(prio, false);
845 }
846 
847 /* Assuming all the tree is locked by mutex chain lock */
848 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
849 {
850 	return find_closest_ft(prio, true);
851 }
852 
853 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
854 						struct mlx5_flow_act *flow_act)
855 {
856 	struct fs_prio *prio;
857 	bool next_ns;
858 
859 	next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
860 	fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
861 
862 	return find_next_chained_ft(prio);
863 }
864 
865 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
866 			       struct fs_prio *prio,
867 			       struct mlx5_flow_table *ft)
868 {
869 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
870 	struct mlx5_flow_table *iter;
871 	int err;
872 
873 	fs_for_each_ft(iter, prio) {
874 		err = root->cmds->modify_flow_table(root, iter, ft);
875 		if (err) {
876 			mlx5_core_err(dev,
877 				      "Failed to modify flow table id %d, type %d, err %d\n",
878 				      iter->id, iter->type, err);
879 			/* The driver is out of sync with the FW */
880 			return err;
881 		}
882 	}
883 	return 0;
884 }
885 
886 /* Connect flow tables from previous priority of prio to ft */
887 static int connect_prev_fts(struct mlx5_core_dev *dev,
888 			    struct mlx5_flow_table *ft,
889 			    struct fs_prio *prio)
890 {
891 	struct mlx5_flow_table *prev_ft;
892 
893 	prev_ft = find_prev_chained_ft(prio);
894 	if (prev_ft) {
895 		struct fs_prio *prev_prio;
896 
897 		fs_get_obj(prev_prio, prev_ft->node.parent);
898 		return connect_fts_in_prio(dev, prev_prio, ft);
899 	}
900 	return 0;
901 }
902 
903 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
904 				 *prio)
905 {
906 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
907 	struct mlx5_ft_underlay_qp *uqp;
908 	int min_level = INT_MAX;
909 	int err = 0;
910 	u32 qpn;
911 
912 	if (root->root_ft)
913 		min_level = root->root_ft->level;
914 
915 	if (ft->level >= min_level)
916 		return 0;
917 
918 	if (list_empty(&root->underlay_qpns)) {
919 		/* Don't set any QPN (zero) in case QPN list is empty */
920 		qpn = 0;
921 		err = root->cmds->update_root_ft(root, ft, qpn, false);
922 	} else {
923 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
924 			qpn = uqp->qpn;
925 			err = root->cmds->update_root_ft(root, ft,
926 							 qpn, false);
927 			if (err)
928 				break;
929 		}
930 	}
931 
932 	if (err)
933 		mlx5_core_warn(root->dev,
934 			       "Update root flow table of id(%u) qpn(%d) failed\n",
935 			       ft->id, qpn);
936 	else
937 		root->root_ft = ft;
938 
939 	return err;
940 }
941 
942 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
943 					 struct mlx5_flow_destination *dest)
944 {
945 	struct mlx5_flow_root_namespace *root;
946 	struct mlx5_flow_table *ft;
947 	struct mlx5_flow_group *fg;
948 	struct fs_fte *fte;
949 	int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
950 	int err = 0;
951 
952 	fs_get_obj(fte, rule->node.parent);
953 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
954 		return -EINVAL;
955 	down_write_ref_node(&fte->node, false);
956 	fs_get_obj(fg, fte->node.parent);
957 	fs_get_obj(ft, fg->node.parent);
958 
959 	memcpy(&rule->dest_attr, dest, sizeof(*dest));
960 	root = find_root(&ft->node);
961 	err = root->cmds->update_fte(root, ft, fg,
962 				     modify_mask, fte);
963 	up_write_ref_node(&fte->node, false);
964 
965 	return err;
966 }
967 
968 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
969 				 struct mlx5_flow_destination *new_dest,
970 				 struct mlx5_flow_destination *old_dest)
971 {
972 	int i;
973 
974 	if (!old_dest) {
975 		if (handle->num_rules != 1)
976 			return -EINVAL;
977 		return _mlx5_modify_rule_destination(handle->rule[0],
978 						     new_dest);
979 	}
980 
981 	for (i = 0; i < handle->num_rules; i++) {
982 		if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
983 			return _mlx5_modify_rule_destination(handle->rule[i],
984 							     new_dest);
985 	}
986 
987 	return -EINVAL;
988 }
989 
990 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft  */
991 static int connect_fwd_rules(struct mlx5_core_dev *dev,
992 			     struct mlx5_flow_table *new_next_ft,
993 			     struct mlx5_flow_table *old_next_ft)
994 {
995 	struct mlx5_flow_destination dest = {};
996 	struct mlx5_flow_rule *iter;
997 	int err = 0;
998 
999 	/* new_next_ft and old_next_ft could be NULL only
1000 	 * when we create/destroy the anchor flow table.
1001 	 */
1002 	if (!new_next_ft || !old_next_ft)
1003 		return 0;
1004 
1005 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1006 	dest.ft = new_next_ft;
1007 
1008 	mutex_lock(&old_next_ft->lock);
1009 	list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1010 	mutex_unlock(&old_next_ft->lock);
1011 	list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1012 		if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1013 		    iter->ft->ns == new_next_ft->ns)
1014 			continue;
1015 
1016 		err = _mlx5_modify_rule_destination(iter, &dest);
1017 		if (err)
1018 			pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1019 			       new_next_ft->id);
1020 	}
1021 	return 0;
1022 }
1023 
1024 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1025 			      struct fs_prio *prio)
1026 {
1027 	struct mlx5_flow_table *next_ft;
1028 	int err = 0;
1029 
1030 	/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1031 
1032 	if (list_empty(&prio->node.children)) {
1033 		err = connect_prev_fts(dev, ft, prio);
1034 		if (err)
1035 			return err;
1036 
1037 		next_ft = find_next_chained_ft(prio);
1038 		err = connect_fwd_rules(dev, ft, next_ft);
1039 		if (err)
1040 			return err;
1041 	}
1042 
1043 	if (MLX5_CAP_FLOWTABLE(dev,
1044 			       flow_table_properties_nic_receive.modify_root))
1045 		err = update_root_ft_create(ft, prio);
1046 	return err;
1047 }
1048 
1049 static void list_add_flow_table(struct mlx5_flow_table *ft,
1050 				struct fs_prio *prio)
1051 {
1052 	struct list_head *prev = &prio->node.children;
1053 	struct mlx5_flow_table *iter;
1054 
1055 	fs_for_each_ft(iter, prio) {
1056 		if (iter->level > ft->level)
1057 			break;
1058 		prev = &iter->node.list;
1059 	}
1060 	list_add(&ft->node.list, prev);
1061 }
1062 
1063 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1064 							struct mlx5_flow_table_attr *ft_attr,
1065 							enum fs_flow_table_op_mod op_mod,
1066 							u16 vport)
1067 {
1068 	struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1069 	bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1070 	struct mlx5_flow_table *next_ft;
1071 	struct fs_prio *fs_prio = NULL;
1072 	struct mlx5_flow_table *ft;
1073 	int err;
1074 
1075 	if (!root) {
1076 		pr_err("mlx5: flow steering failed to find root of namespace\n");
1077 		return ERR_PTR(-ENODEV);
1078 	}
1079 
1080 	mutex_lock(&root->chain_lock);
1081 	fs_prio = find_prio(ns, ft_attr->prio);
1082 	if (!fs_prio) {
1083 		err = -EINVAL;
1084 		goto unlock_root;
1085 	}
1086 	if (!unmanaged) {
1087 		/* The level is related to the
1088 		 * priority level range.
1089 		 */
1090 		if (ft_attr->level >= fs_prio->num_levels) {
1091 			err = -ENOSPC;
1092 			goto unlock_root;
1093 		}
1094 
1095 		ft_attr->level += fs_prio->start_level;
1096 	}
1097 
1098 	/* The level is related to the
1099 	 * priority level range.
1100 	 */
1101 	ft = alloc_flow_table(ft_attr->level,
1102 			      vport,
1103 			      root->table_type,
1104 			      op_mod, ft_attr->flags);
1105 	if (IS_ERR(ft)) {
1106 		err = PTR_ERR(ft);
1107 		goto unlock_root;
1108 	}
1109 
1110 	tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1111 	next_ft = unmanaged ? ft_attr->next_ft :
1112 			      find_next_chained_ft(fs_prio);
1113 	ft->def_miss_action = ns->def_miss_action;
1114 	ft->ns = ns;
1115 	err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
1116 	if (err)
1117 		goto free_ft;
1118 
1119 	if (!unmanaged) {
1120 		err = connect_flow_table(root->dev, ft, fs_prio);
1121 		if (err)
1122 			goto destroy_ft;
1123 	}
1124 
1125 	ft->node.active = true;
1126 	down_write_ref_node(&fs_prio->node, false);
1127 	if (!unmanaged) {
1128 		tree_add_node(&ft->node, &fs_prio->node);
1129 		list_add_flow_table(ft, fs_prio);
1130 	} else {
1131 		ft->node.root = fs_prio->node.root;
1132 	}
1133 	fs_prio->num_ft++;
1134 	up_write_ref_node(&fs_prio->node, false);
1135 	mutex_unlock(&root->chain_lock);
1136 	trace_mlx5_fs_add_ft(ft);
1137 	return ft;
1138 destroy_ft:
1139 	root->cmds->destroy_flow_table(root, ft);
1140 free_ft:
1141 	rhltable_destroy(&ft->fgs_hash);
1142 	kfree(ft);
1143 unlock_root:
1144 	mutex_unlock(&root->chain_lock);
1145 	return ERR_PTR(err);
1146 }
1147 
1148 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1149 					       struct mlx5_flow_table_attr *ft_attr)
1150 {
1151 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1152 }
1153 EXPORT_SYMBOL(mlx5_create_flow_table);
1154 
1155 struct mlx5_flow_table *
1156 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1157 			     struct mlx5_flow_table_attr *ft_attr, u16 vport)
1158 {
1159 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1160 }
1161 
1162 struct mlx5_flow_table*
1163 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1164 				 int prio, u32 level)
1165 {
1166 	struct mlx5_flow_table_attr ft_attr = {};
1167 
1168 	ft_attr.level = level;
1169 	ft_attr.prio  = prio;
1170 	ft_attr.max_fte = 1;
1171 
1172 	return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1173 }
1174 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1175 
1176 #define MAX_FLOW_GROUP_SIZE BIT(24)
1177 struct mlx5_flow_table*
1178 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1179 				    struct mlx5_flow_table_attr *ft_attr)
1180 {
1181 	int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1182 	int max_num_groups = ft_attr->autogroup.max_num_groups;
1183 	struct mlx5_flow_table *ft;
1184 	int autogroups_max_fte;
1185 
1186 	ft = mlx5_create_flow_table(ns, ft_attr);
1187 	if (IS_ERR(ft))
1188 		return ft;
1189 
1190 	autogroups_max_fte = ft->max_fte - num_reserved_entries;
1191 	if (max_num_groups > autogroups_max_fte)
1192 		goto err_validate;
1193 	if (num_reserved_entries > ft->max_fte)
1194 		goto err_validate;
1195 
1196 	/* Align the number of groups according to the largest group size */
1197 	if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1198 		max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1199 
1200 	ft->autogroup.active = true;
1201 	ft->autogroup.required_groups = max_num_groups;
1202 	ft->autogroup.max_fte = autogroups_max_fte;
1203 	/* We save place for flow groups in addition to max types */
1204 	ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1205 
1206 	return ft;
1207 
1208 err_validate:
1209 	mlx5_destroy_flow_table(ft);
1210 	return ERR_PTR(-ENOSPC);
1211 }
1212 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1213 
1214 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1215 					       u32 *fg_in)
1216 {
1217 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1218 	void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1219 					    fg_in, match_criteria);
1220 	u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1221 					    fg_in,
1222 					    match_criteria_enable);
1223 	int start_index = MLX5_GET(create_flow_group_in, fg_in,
1224 				   start_flow_index);
1225 	int end_index = MLX5_GET(create_flow_group_in, fg_in,
1226 				 end_flow_index);
1227 	struct mlx5_flow_group *fg;
1228 	int err;
1229 
1230 	if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1231 		return ERR_PTR(-EPERM);
1232 
1233 	down_write_ref_node(&ft->node, false);
1234 	fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1235 				     start_index, end_index,
1236 				     ft->node.children.prev);
1237 	up_write_ref_node(&ft->node, false);
1238 	if (IS_ERR(fg))
1239 		return fg;
1240 
1241 	err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1242 	if (err) {
1243 		tree_put_node(&fg->node, false);
1244 		return ERR_PTR(err);
1245 	}
1246 	trace_mlx5_fs_add_fg(fg);
1247 	fg->node.active = true;
1248 
1249 	return fg;
1250 }
1251 EXPORT_SYMBOL(mlx5_create_flow_group);
1252 
1253 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1254 {
1255 	struct mlx5_flow_rule *rule;
1256 
1257 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1258 	if (!rule)
1259 		return NULL;
1260 
1261 	INIT_LIST_HEAD(&rule->next_ft);
1262 	rule->node.type = FS_TYPE_FLOW_DEST;
1263 	if (dest)
1264 		memcpy(&rule->dest_attr, dest, sizeof(*dest));
1265 
1266 	return rule;
1267 }
1268 
1269 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1270 {
1271 	struct mlx5_flow_handle *handle;
1272 
1273 	handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1274 	if (!handle)
1275 		return NULL;
1276 
1277 	handle->num_rules = num_rules;
1278 
1279 	return handle;
1280 }
1281 
1282 static void destroy_flow_handle(struct fs_fte *fte,
1283 				struct mlx5_flow_handle *handle,
1284 				struct mlx5_flow_destination *dest,
1285 				int i)
1286 {
1287 	for (; --i >= 0;) {
1288 		if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1289 			fte->dests_size--;
1290 			list_del(&handle->rule[i]->node.list);
1291 			kfree(handle->rule[i]);
1292 		}
1293 	}
1294 	kfree(handle);
1295 }
1296 
1297 static struct mlx5_flow_handle *
1298 create_flow_handle(struct fs_fte *fte,
1299 		   struct mlx5_flow_destination *dest,
1300 		   int dest_num,
1301 		   int *modify_mask,
1302 		   bool *new_rule)
1303 {
1304 	struct mlx5_flow_handle *handle;
1305 	struct mlx5_flow_rule *rule = NULL;
1306 	static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1307 	static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1308 	int type;
1309 	int i = 0;
1310 
1311 	handle = alloc_handle((dest_num) ? dest_num : 1);
1312 	if (!handle)
1313 		return ERR_PTR(-ENOMEM);
1314 
1315 	do {
1316 		if (dest) {
1317 			rule = find_flow_rule(fte, dest + i);
1318 			if (rule) {
1319 				refcount_inc(&rule->node.refcount);
1320 				goto rule_found;
1321 			}
1322 		}
1323 
1324 		*new_rule = true;
1325 		rule = alloc_rule(dest + i);
1326 		if (!rule)
1327 			goto free_rules;
1328 
1329 		/* Add dest to dests list- we need flow tables to be in the
1330 		 * end of the list for forward to next prio rules.
1331 		 */
1332 		tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1333 		if (dest &&
1334 		    dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1335 			list_add(&rule->node.list, &fte->node.children);
1336 		else
1337 			list_add_tail(&rule->node.list, &fte->node.children);
1338 		if (dest) {
1339 			fte->dests_size++;
1340 
1341 			type = dest[i].type ==
1342 				MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1343 			*modify_mask |= type ? count : dst;
1344 		}
1345 rule_found:
1346 		handle->rule[i] = rule;
1347 	} while (++i < dest_num);
1348 
1349 	return handle;
1350 
1351 free_rules:
1352 	destroy_flow_handle(fte, handle, dest, i);
1353 	return ERR_PTR(-ENOMEM);
1354 }
1355 
1356 /* fte should not be deleted while calling this function */
1357 static struct mlx5_flow_handle *
1358 add_rule_fte(struct fs_fte *fte,
1359 	     struct mlx5_flow_group *fg,
1360 	     struct mlx5_flow_destination *dest,
1361 	     int dest_num,
1362 	     bool update_action)
1363 {
1364 	struct mlx5_flow_root_namespace *root;
1365 	struct mlx5_flow_handle *handle;
1366 	struct mlx5_flow_table *ft;
1367 	int modify_mask = 0;
1368 	int err;
1369 	bool new_rule = false;
1370 
1371 	handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1372 				    &new_rule);
1373 	if (IS_ERR(handle) || !new_rule)
1374 		goto out;
1375 
1376 	if (update_action)
1377 		modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1378 
1379 	fs_get_obj(ft, fg->node.parent);
1380 	root = find_root(&fg->node);
1381 	if (!(fte->status & FS_FTE_STATUS_EXISTING))
1382 		err = root->cmds->create_fte(root, ft, fg, fte);
1383 	else
1384 		err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1385 	if (err)
1386 		goto free_handle;
1387 
1388 	fte->node.active = true;
1389 	fte->status |= FS_FTE_STATUS_EXISTING;
1390 	atomic_inc(&fg->node.version);
1391 
1392 out:
1393 	return handle;
1394 
1395 free_handle:
1396 	destroy_flow_handle(fte, handle, dest, handle->num_rules);
1397 	return ERR_PTR(err);
1398 }
1399 
1400 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table  *ft,
1401 						     const struct mlx5_flow_spec *spec)
1402 {
1403 	struct list_head *prev = &ft->node.children;
1404 	u32 max_fte = ft->autogroup.max_fte;
1405 	unsigned int candidate_index = 0;
1406 	unsigned int group_size = 0;
1407 	struct mlx5_flow_group *fg;
1408 
1409 	if (!ft->autogroup.active)
1410 		return ERR_PTR(-ENOENT);
1411 
1412 	if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1413 		group_size = ft->autogroup.group_size;
1414 
1415 	/*  max_fte == ft->autogroup.max_types */
1416 	if (group_size == 0)
1417 		group_size = 1;
1418 
1419 	/* sorted by start_index */
1420 	fs_for_each_fg(fg, ft) {
1421 		if (candidate_index + group_size > fg->start_index)
1422 			candidate_index = fg->start_index + fg->max_ftes;
1423 		else
1424 			break;
1425 		prev = &fg->node.list;
1426 	}
1427 
1428 	if (candidate_index + group_size > max_fte)
1429 		return ERR_PTR(-ENOSPC);
1430 
1431 	fg = alloc_insert_flow_group(ft,
1432 				     spec->match_criteria_enable,
1433 				     spec->match_criteria,
1434 				     candidate_index,
1435 				     candidate_index + group_size - 1,
1436 				     prev);
1437 	if (IS_ERR(fg))
1438 		goto out;
1439 
1440 	if (group_size == ft->autogroup.group_size)
1441 		ft->autogroup.num_groups++;
1442 
1443 out:
1444 	return fg;
1445 }
1446 
1447 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1448 				  struct mlx5_flow_group *fg)
1449 {
1450 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1451 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1452 	void *match_criteria_addr;
1453 	u8 src_esw_owner_mask_on;
1454 	void *misc;
1455 	int err;
1456 	u32 *in;
1457 
1458 	in = kvzalloc(inlen, GFP_KERNEL);
1459 	if (!in)
1460 		return -ENOMEM;
1461 
1462 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1463 		 fg->mask.match_criteria_enable);
1464 	MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1465 	MLX5_SET(create_flow_group_in, in, end_flow_index,   fg->start_index +
1466 		 fg->max_ftes - 1);
1467 
1468 	misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1469 			    misc_parameters);
1470 	src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1471 					 source_eswitch_owner_vhca_id);
1472 	MLX5_SET(create_flow_group_in, in,
1473 		 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1474 
1475 	match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1476 					   in, match_criteria);
1477 	memcpy(match_criteria_addr, fg->mask.match_criteria,
1478 	       sizeof(fg->mask.match_criteria));
1479 
1480 	err = root->cmds->create_flow_group(root, ft, in, fg);
1481 	if (!err) {
1482 		fg->node.active = true;
1483 		trace_mlx5_fs_add_fg(fg);
1484 	}
1485 
1486 	kvfree(in);
1487 	return err;
1488 }
1489 
1490 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1491 				struct mlx5_flow_destination *d2)
1492 {
1493 	if (d1->type == d2->type) {
1494 		if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1495 		     d1->vport.num == d2->vport.num &&
1496 		     d1->vport.flags == d2->vport.flags &&
1497 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1498 		      (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1499 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1500 		      (d1->vport.pkt_reformat->id ==
1501 		       d2->vport.pkt_reformat->id) : true)) ||
1502 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1503 		     d1->ft == d2->ft) ||
1504 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1505 		     d1->tir_num == d2->tir_num) ||
1506 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1507 		     d1->ft_num == d2->ft_num) ||
1508 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
1509 		     d1->sampler_id == d2->sampler_id))
1510 			return true;
1511 	}
1512 
1513 	return false;
1514 }
1515 
1516 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1517 					     struct mlx5_flow_destination *dest)
1518 {
1519 	struct mlx5_flow_rule *rule;
1520 
1521 	list_for_each_entry(rule, &fte->node.children, node.list) {
1522 		if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1523 			return rule;
1524 	}
1525 	return NULL;
1526 }
1527 
1528 static bool check_conflicting_actions(u32 action1, u32 action2)
1529 {
1530 	u32 xored_actions = action1 ^ action2;
1531 
1532 	/* if one rule only wants to count, it's ok */
1533 	if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1534 	    action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1535 		return false;
1536 
1537 	if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP  |
1538 			     MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1539 			     MLX5_FLOW_CONTEXT_ACTION_DECAP |
1540 			     MLX5_FLOW_CONTEXT_ACTION_MOD_HDR  |
1541 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1542 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1543 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1544 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1545 		return true;
1546 
1547 	return false;
1548 }
1549 
1550 static int check_conflicting_ftes(struct fs_fte *fte,
1551 				  const struct mlx5_flow_context *flow_context,
1552 				  const struct mlx5_flow_act *flow_act)
1553 {
1554 	if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1555 		mlx5_core_warn(get_dev(&fte->node),
1556 			       "Found two FTEs with conflicting actions\n");
1557 		return -EEXIST;
1558 	}
1559 
1560 	if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1561 	    fte->flow_context.flow_tag != flow_context->flow_tag) {
1562 		mlx5_core_warn(get_dev(&fte->node),
1563 			       "FTE flow tag %u already exists with different flow tag %u\n",
1564 			       fte->flow_context.flow_tag,
1565 			       flow_context->flow_tag);
1566 		return -EEXIST;
1567 	}
1568 
1569 	return 0;
1570 }
1571 
1572 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1573 					    const struct mlx5_flow_spec *spec,
1574 					    struct mlx5_flow_act *flow_act,
1575 					    struct mlx5_flow_destination *dest,
1576 					    int dest_num,
1577 					    struct fs_fte *fte)
1578 {
1579 	struct mlx5_flow_handle *handle;
1580 	int old_action;
1581 	int i;
1582 	int ret;
1583 
1584 	ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1585 	if (ret)
1586 		return ERR_PTR(ret);
1587 
1588 	old_action = fte->action.action;
1589 	fte->action.action |= flow_act->action;
1590 	handle = add_rule_fte(fte, fg, dest, dest_num,
1591 			      old_action != flow_act->action);
1592 	if (IS_ERR(handle)) {
1593 		fte->action.action = old_action;
1594 		return handle;
1595 	}
1596 	trace_mlx5_fs_set_fte(fte, false);
1597 
1598 	for (i = 0; i < handle->num_rules; i++) {
1599 		if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1600 			tree_add_node(&handle->rule[i]->node, &fte->node);
1601 			trace_mlx5_fs_add_rule(handle->rule[i]);
1602 		}
1603 	}
1604 	return handle;
1605 }
1606 
1607 static bool counter_is_valid(u32 action)
1608 {
1609 	return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1610 			  MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1611 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1612 }
1613 
1614 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1615 			  struct mlx5_flow_act *flow_act,
1616 			  struct mlx5_flow_table *ft)
1617 {
1618 	bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1619 	u32 action = flow_act->action;
1620 
1621 	if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1622 		return counter_is_valid(action);
1623 
1624 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1625 		return true;
1626 
1627 	if (ignore_level) {
1628 		if (ft->type != FS_FT_FDB &&
1629 		    ft->type != FS_FT_NIC_RX)
1630 			return false;
1631 
1632 		if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1633 		    ft->type != dest->ft->type)
1634 			return false;
1635 	}
1636 
1637 	if (!dest || ((dest->type ==
1638 	    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1639 	    (dest->ft->level <= ft->level && !ignore_level)))
1640 		return false;
1641 	return true;
1642 }
1643 
1644 struct match_list {
1645 	struct list_head	list;
1646 	struct mlx5_flow_group *g;
1647 };
1648 
1649 static void free_match_list(struct match_list *head, bool ft_locked)
1650 {
1651 	struct match_list *iter, *match_tmp;
1652 
1653 	list_for_each_entry_safe(iter, match_tmp, &head->list,
1654 				 list) {
1655 		tree_put_node(&iter->g->node, ft_locked);
1656 		list_del(&iter->list);
1657 		kfree(iter);
1658 	}
1659 }
1660 
1661 static int build_match_list(struct match_list *match_head,
1662 			    struct mlx5_flow_table *ft,
1663 			    const struct mlx5_flow_spec *spec,
1664 			    bool ft_locked)
1665 {
1666 	struct rhlist_head *tmp, *list;
1667 	struct mlx5_flow_group *g;
1668 	int err = 0;
1669 
1670 	rcu_read_lock();
1671 	INIT_LIST_HEAD(&match_head->list);
1672 	/* Collect all fgs which has a matching match_criteria */
1673 	list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1674 	/* RCU is atomic, we can't execute FW commands here */
1675 	rhl_for_each_entry_rcu(g, tmp, list, hash) {
1676 		struct match_list *curr_match;
1677 
1678 		if (unlikely(!tree_get_node(&g->node)))
1679 			continue;
1680 
1681 		curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1682 		if (!curr_match) {
1683 			free_match_list(match_head, ft_locked);
1684 			err = -ENOMEM;
1685 			goto out;
1686 		}
1687 		curr_match->g = g;
1688 		list_add_tail(&curr_match->list, &match_head->list);
1689 	}
1690 out:
1691 	rcu_read_unlock();
1692 	return err;
1693 }
1694 
1695 static u64 matched_fgs_get_version(struct list_head *match_head)
1696 {
1697 	struct match_list *iter;
1698 	u64 version = 0;
1699 
1700 	list_for_each_entry(iter, match_head, list)
1701 		version += (u64)atomic_read(&iter->g->node.version);
1702 	return version;
1703 }
1704 
1705 static struct fs_fte *
1706 lookup_fte_locked(struct mlx5_flow_group *g,
1707 		  const u32 *match_value,
1708 		  bool take_write)
1709 {
1710 	struct fs_fte *fte_tmp;
1711 
1712 	if (take_write)
1713 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1714 	else
1715 		nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1716 	fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1717 					 rhash_fte);
1718 	if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1719 		fte_tmp = NULL;
1720 		goto out;
1721 	}
1722 	if (!fte_tmp->node.active) {
1723 		tree_put_node(&fte_tmp->node, false);
1724 		fte_tmp = NULL;
1725 		goto out;
1726 	}
1727 
1728 	nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1729 out:
1730 	if (take_write)
1731 		up_write_ref_node(&g->node, false);
1732 	else
1733 		up_read_ref_node(&g->node);
1734 	return fte_tmp;
1735 }
1736 
1737 static struct mlx5_flow_handle *
1738 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1739 		       struct list_head *match_head,
1740 		       const struct mlx5_flow_spec *spec,
1741 		       struct mlx5_flow_act *flow_act,
1742 		       struct mlx5_flow_destination *dest,
1743 		       int dest_num,
1744 		       int ft_version)
1745 {
1746 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1747 	struct mlx5_flow_group *g;
1748 	struct mlx5_flow_handle *rule;
1749 	struct match_list *iter;
1750 	bool take_write = false;
1751 	struct fs_fte *fte;
1752 	u64  version = 0;
1753 	int err;
1754 
1755 	fte = alloc_fte(ft, spec, flow_act);
1756 	if (IS_ERR(fte))
1757 		return  ERR_PTR(-ENOMEM);
1758 
1759 search_again_locked:
1760 	if (flow_act->flags & FLOW_ACT_NO_APPEND)
1761 		goto skip_search;
1762 	version = matched_fgs_get_version(match_head);
1763 	/* Try to find an fte with identical match value and attempt update its
1764 	 * action.
1765 	 */
1766 	list_for_each_entry(iter, match_head, list) {
1767 		struct fs_fte *fte_tmp;
1768 
1769 		g = iter->g;
1770 		fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1771 		if (!fte_tmp)
1772 			continue;
1773 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1774 		/* No error check needed here, because insert_fte() is not called */
1775 		up_write_ref_node(&fte_tmp->node, false);
1776 		tree_put_node(&fte_tmp->node, false);
1777 		kmem_cache_free(steering->ftes_cache, fte);
1778 		return rule;
1779 	}
1780 
1781 skip_search:
1782 	/* No group with matching fte found, or we skipped the search.
1783 	 * Try to add a new fte to any matching fg.
1784 	 */
1785 
1786 	/* Check the ft version, for case that new flow group
1787 	 * was added while the fgs weren't locked
1788 	 */
1789 	if (atomic_read(&ft->node.version) != ft_version) {
1790 		rule = ERR_PTR(-EAGAIN);
1791 		goto out;
1792 	}
1793 
1794 	/* Check the fgs version. If version have changed it could be that an
1795 	 * FTE with the same match value was added while the fgs weren't
1796 	 * locked.
1797 	 */
1798 	if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1799 	    version != matched_fgs_get_version(match_head)) {
1800 		take_write = true;
1801 		goto search_again_locked;
1802 	}
1803 
1804 	list_for_each_entry(iter, match_head, list) {
1805 		g = iter->g;
1806 
1807 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1808 
1809 		if (!g->node.active) {
1810 			up_write_ref_node(&g->node, false);
1811 			continue;
1812 		}
1813 
1814 		err = insert_fte(g, fte);
1815 		if (err) {
1816 			up_write_ref_node(&g->node, false);
1817 			if (err == -ENOSPC)
1818 				continue;
1819 			kmem_cache_free(steering->ftes_cache, fte);
1820 			return ERR_PTR(err);
1821 		}
1822 
1823 		nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1824 		up_write_ref_node(&g->node, false);
1825 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1826 		up_write_ref_node(&fte->node, false);
1827 		if (IS_ERR(rule))
1828 			tree_put_node(&fte->node, false);
1829 		return rule;
1830 	}
1831 	rule = ERR_PTR(-ENOENT);
1832 out:
1833 	kmem_cache_free(steering->ftes_cache, fte);
1834 	return rule;
1835 }
1836 
1837 static struct mlx5_flow_handle *
1838 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1839 		     const struct mlx5_flow_spec *spec,
1840 		     struct mlx5_flow_act *flow_act,
1841 		     struct mlx5_flow_destination *dest,
1842 		     int dest_num)
1843 
1844 {
1845 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1846 	struct mlx5_flow_handle *rule;
1847 	struct match_list match_head;
1848 	struct mlx5_flow_group *g;
1849 	bool take_write = false;
1850 	struct fs_fte *fte;
1851 	int version;
1852 	int err;
1853 	int i;
1854 
1855 	if (!check_valid_spec(spec))
1856 		return ERR_PTR(-EINVAL);
1857 
1858 	for (i = 0; i < dest_num; i++) {
1859 		if (!dest_is_valid(&dest[i], flow_act, ft))
1860 			return ERR_PTR(-EINVAL);
1861 	}
1862 	nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1863 search_again_locked:
1864 	version = atomic_read(&ft->node.version);
1865 
1866 	/* Collect all fgs which has a matching match_criteria */
1867 	err = build_match_list(&match_head, ft, spec, take_write);
1868 	if (err) {
1869 		if (take_write)
1870 			up_write_ref_node(&ft->node, false);
1871 		else
1872 			up_read_ref_node(&ft->node);
1873 		return ERR_PTR(err);
1874 	}
1875 
1876 	if (!take_write)
1877 		up_read_ref_node(&ft->node);
1878 
1879 	rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1880 				      dest_num, version);
1881 	free_match_list(&match_head, take_write);
1882 	if (!IS_ERR(rule) ||
1883 	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1884 		if (take_write)
1885 			up_write_ref_node(&ft->node, false);
1886 		return rule;
1887 	}
1888 
1889 	if (!take_write) {
1890 		nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1891 		take_write = true;
1892 	}
1893 
1894 	if (PTR_ERR(rule) == -EAGAIN ||
1895 	    version != atomic_read(&ft->node.version))
1896 		goto search_again_locked;
1897 
1898 	g = alloc_auto_flow_group(ft, spec);
1899 	if (IS_ERR(g)) {
1900 		rule = ERR_CAST(g);
1901 		up_write_ref_node(&ft->node, false);
1902 		return rule;
1903 	}
1904 
1905 	fte = alloc_fte(ft, spec, flow_act);
1906 	if (IS_ERR(fte)) {
1907 		up_write_ref_node(&ft->node, false);
1908 		err = PTR_ERR(fte);
1909 		goto err_alloc_fte;
1910 	}
1911 
1912 	nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1913 	up_write_ref_node(&ft->node, false);
1914 
1915 	err = create_auto_flow_group(ft, g);
1916 	if (err)
1917 		goto err_release_fg;
1918 
1919 	err = insert_fte(g, fte);
1920 	if (err)
1921 		goto err_release_fg;
1922 
1923 	nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1924 	up_write_ref_node(&g->node, false);
1925 	rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1926 	up_write_ref_node(&fte->node, false);
1927 	if (IS_ERR(rule))
1928 		tree_put_node(&fte->node, false);
1929 	tree_put_node(&g->node, false);
1930 	return rule;
1931 
1932 err_release_fg:
1933 	up_write_ref_node(&g->node, false);
1934 	kmem_cache_free(steering->ftes_cache, fte);
1935 err_alloc_fte:
1936 	tree_put_node(&g->node, false);
1937 	return ERR_PTR(err);
1938 }
1939 
1940 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1941 {
1942 	return ((ft->type == FS_FT_NIC_RX) &&
1943 		(MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1944 }
1945 
1946 struct mlx5_flow_handle *
1947 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1948 		    const struct mlx5_flow_spec *spec,
1949 		    struct mlx5_flow_act *flow_act,
1950 		    struct mlx5_flow_destination *dest,
1951 		    int num_dest)
1952 {
1953 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1954 	static const struct mlx5_flow_spec zero_spec = {};
1955 	struct mlx5_flow_destination *gen_dest = NULL;
1956 	struct mlx5_flow_table *next_ft = NULL;
1957 	struct mlx5_flow_handle *handle = NULL;
1958 	u32 sw_action = flow_act->action;
1959 	int i;
1960 
1961 	if (!spec)
1962 		spec = &zero_spec;
1963 
1964 	if (!is_fwd_next_action(sw_action))
1965 		return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1966 
1967 	if (!fwd_next_prio_supported(ft))
1968 		return ERR_PTR(-EOPNOTSUPP);
1969 
1970 	mutex_lock(&root->chain_lock);
1971 	next_ft = find_next_fwd_ft(ft, flow_act);
1972 	if (!next_ft) {
1973 		handle = ERR_PTR(-EOPNOTSUPP);
1974 		goto unlock;
1975 	}
1976 
1977 	gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
1978 			   GFP_KERNEL);
1979 	if (!gen_dest) {
1980 		handle = ERR_PTR(-ENOMEM);
1981 		goto unlock;
1982 	}
1983 	for (i = 0; i < num_dest; i++)
1984 		gen_dest[i] = dest[i];
1985 	gen_dest[i].type =
1986 		MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1987 	gen_dest[i].ft = next_ft;
1988 	dest = gen_dest;
1989 	num_dest++;
1990 	flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
1991 			      MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
1992 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1993 	handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1994 	if (IS_ERR(handle))
1995 		goto unlock;
1996 
1997 	if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
1998 		mutex_lock(&next_ft->lock);
1999 		list_add(&handle->rule[num_dest - 1]->next_ft,
2000 			 &next_ft->fwd_rules);
2001 		mutex_unlock(&next_ft->lock);
2002 		handle->rule[num_dest - 1]->sw_action = sw_action;
2003 		handle->rule[num_dest - 1]->ft = ft;
2004 	}
2005 unlock:
2006 	mutex_unlock(&root->chain_lock);
2007 	kfree(gen_dest);
2008 	return handle;
2009 }
2010 EXPORT_SYMBOL(mlx5_add_flow_rules);
2011 
2012 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2013 {
2014 	struct fs_fte *fte;
2015 	int i;
2016 
2017 	/* In order to consolidate the HW changes we lock the FTE for other
2018 	 * changes, and increase its refcount, in order not to perform the
2019 	 * "del" functions of the FTE. Will handle them here.
2020 	 * The removal of the rules is done under locked FTE.
2021 	 * After removing all the handle's rules, if there are remaining
2022 	 * rules, it means we just need to modify the FTE in FW, and
2023 	 * unlock/decrease the refcount we increased before.
2024 	 * Otherwise, it means the FTE should be deleted. First delete the
2025 	 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2026 	 * the FTE, which will handle the last decrease of the refcount, as
2027 	 * well as required handling of its parent.
2028 	 */
2029 	fs_get_obj(fte, handle->rule[0]->node.parent);
2030 	down_write_ref_node(&fte->node, false);
2031 	for (i = handle->num_rules - 1; i >= 0; i--)
2032 		tree_remove_node(&handle->rule[i]->node, true);
2033 	if (fte->dests_size) {
2034 		if (fte->modify_mask)
2035 			modify_fte(fte);
2036 		up_write_ref_node(&fte->node, false);
2037 	} else if (list_empty(&fte->node.children)) {
2038 		del_hw_fte(&fte->node);
2039 		/* Avoid double call to del_hw_fte */
2040 		fte->node.del_hw_func = NULL;
2041 		up_write_ref_node(&fte->node, false);
2042 		tree_put_node(&fte->node, false);
2043 	}
2044 	kfree(handle);
2045 }
2046 EXPORT_SYMBOL(mlx5_del_flow_rules);
2047 
2048 /* Assuming prio->node.children(flow tables) is sorted by level */
2049 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2050 {
2051 	struct fs_prio *prio;
2052 
2053 	fs_get_obj(prio, ft->node.parent);
2054 
2055 	if (!list_is_last(&ft->node.list, &prio->node.children))
2056 		return list_next_entry(ft, node.list);
2057 	return find_next_chained_ft(prio);
2058 }
2059 
2060 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2061 {
2062 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2063 	struct mlx5_ft_underlay_qp *uqp;
2064 	struct mlx5_flow_table *new_root_ft = NULL;
2065 	int err = 0;
2066 	u32 qpn;
2067 
2068 	if (root->root_ft != ft)
2069 		return 0;
2070 
2071 	new_root_ft = find_next_ft(ft);
2072 	if (!new_root_ft) {
2073 		root->root_ft = NULL;
2074 		return 0;
2075 	}
2076 
2077 	if (list_empty(&root->underlay_qpns)) {
2078 		/* Don't set any QPN (zero) in case QPN list is empty */
2079 		qpn = 0;
2080 		err = root->cmds->update_root_ft(root, new_root_ft,
2081 						 qpn, false);
2082 	} else {
2083 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
2084 			qpn = uqp->qpn;
2085 			err = root->cmds->update_root_ft(root,
2086 							 new_root_ft, qpn,
2087 							 false);
2088 			if (err)
2089 				break;
2090 		}
2091 	}
2092 
2093 	if (err)
2094 		mlx5_core_warn(root->dev,
2095 			       "Update root flow table of id(%u) qpn(%d) failed\n",
2096 			       ft->id, qpn);
2097 	else
2098 		root->root_ft = new_root_ft;
2099 
2100 	return 0;
2101 }
2102 
2103 /* Connect flow table from previous priority to
2104  * the next flow table.
2105  */
2106 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2107 {
2108 	struct mlx5_core_dev *dev = get_dev(&ft->node);
2109 	struct mlx5_flow_table *next_ft;
2110 	struct fs_prio *prio;
2111 	int err = 0;
2112 
2113 	err = update_root_ft_destroy(ft);
2114 	if (err)
2115 		return err;
2116 
2117 	fs_get_obj(prio, ft->node.parent);
2118 	if  (!(list_first_entry(&prio->node.children,
2119 				struct mlx5_flow_table,
2120 				node.list) == ft))
2121 		return 0;
2122 
2123 	next_ft = find_next_chained_ft(prio);
2124 	err = connect_fwd_rules(dev, next_ft, ft);
2125 	if (err)
2126 		return err;
2127 
2128 	err = connect_prev_fts(dev, next_ft, prio);
2129 	if (err)
2130 		mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2131 			       ft->id);
2132 	return err;
2133 }
2134 
2135 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2136 {
2137 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2138 	int err = 0;
2139 
2140 	mutex_lock(&root->chain_lock);
2141 	if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2142 		err = disconnect_flow_table(ft);
2143 	if (err) {
2144 		mutex_unlock(&root->chain_lock);
2145 		return err;
2146 	}
2147 	if (tree_remove_node(&ft->node, false))
2148 		mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2149 			       ft->id);
2150 	mutex_unlock(&root->chain_lock);
2151 
2152 	return err;
2153 }
2154 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2155 
2156 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2157 {
2158 	if (tree_remove_node(&fg->node, false))
2159 		mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2160 			       fg->id);
2161 }
2162 EXPORT_SYMBOL(mlx5_destroy_flow_group);
2163 
2164 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2165 						int n)
2166 {
2167 	struct mlx5_flow_steering *steering = dev->priv.steering;
2168 
2169 	if (!steering || !steering->fdb_sub_ns)
2170 		return NULL;
2171 
2172 	return steering->fdb_sub_ns[n];
2173 }
2174 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2175 
2176 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2177 						    enum mlx5_flow_namespace_type type)
2178 {
2179 	struct mlx5_flow_steering *steering = dev->priv.steering;
2180 	struct mlx5_flow_root_namespace *root_ns;
2181 	int prio = 0;
2182 	struct fs_prio *fs_prio;
2183 	struct mlx5_flow_namespace *ns;
2184 
2185 	if (!steering)
2186 		return NULL;
2187 
2188 	switch (type) {
2189 	case MLX5_FLOW_NAMESPACE_FDB:
2190 		if (steering->fdb_root_ns)
2191 			return &steering->fdb_root_ns->ns;
2192 		return NULL;
2193 	case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2194 		if (steering->sniffer_rx_root_ns)
2195 			return &steering->sniffer_rx_root_ns->ns;
2196 		return NULL;
2197 	case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2198 		if (steering->sniffer_tx_root_ns)
2199 			return &steering->sniffer_tx_root_ns->ns;
2200 		return NULL;
2201 	default:
2202 		break;
2203 	}
2204 
2205 	if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
2206 	    type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
2207 		root_ns = steering->egress_root_ns;
2208 		prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2209 	} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
2210 		root_ns = steering->rdma_rx_root_ns;
2211 		prio = RDMA_RX_BYPASS_PRIO;
2212 	} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
2213 		root_ns = steering->rdma_rx_root_ns;
2214 		prio = RDMA_RX_KERNEL_PRIO;
2215 	} else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
2216 		root_ns = steering->rdma_tx_root_ns;
2217 	} else { /* Must be NIC RX */
2218 		root_ns = steering->root_ns;
2219 		prio = type;
2220 	}
2221 
2222 	if (!root_ns)
2223 		return NULL;
2224 
2225 	fs_prio = find_prio(&root_ns->ns, prio);
2226 	if (!fs_prio)
2227 		return NULL;
2228 
2229 	ns = list_first_entry(&fs_prio->node.children,
2230 			      typeof(*ns),
2231 			      node.list);
2232 
2233 	return ns;
2234 }
2235 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2236 
2237 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2238 							      enum mlx5_flow_namespace_type type,
2239 							      int vport)
2240 {
2241 	struct mlx5_flow_steering *steering = dev->priv.steering;
2242 
2243 	if (!steering)
2244 		return NULL;
2245 
2246 	switch (type) {
2247 	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2248 		if (vport >= steering->esw_egress_acl_vports)
2249 			return NULL;
2250 		if (steering->esw_egress_root_ns &&
2251 		    steering->esw_egress_root_ns[vport])
2252 			return &steering->esw_egress_root_ns[vport]->ns;
2253 		else
2254 			return NULL;
2255 	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2256 		if (vport >= steering->esw_ingress_acl_vports)
2257 			return NULL;
2258 		if (steering->esw_ingress_root_ns &&
2259 		    steering->esw_ingress_root_ns[vport])
2260 			return &steering->esw_ingress_root_ns[vport]->ns;
2261 		else
2262 			return NULL;
2263 	default:
2264 		return NULL;
2265 	}
2266 }
2267 
2268 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2269 				       unsigned int prio,
2270 				       int num_levels,
2271 				       enum fs_node_type type)
2272 {
2273 	struct fs_prio *fs_prio;
2274 
2275 	fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2276 	if (!fs_prio)
2277 		return ERR_PTR(-ENOMEM);
2278 
2279 	fs_prio->node.type = type;
2280 	tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2281 	tree_add_node(&fs_prio->node, &ns->node);
2282 	fs_prio->num_levels = num_levels;
2283 	fs_prio->prio = prio;
2284 	list_add_tail(&fs_prio->node.list, &ns->node.children);
2285 
2286 	return fs_prio;
2287 }
2288 
2289 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2290 					      unsigned int prio,
2291 					      int num_levels)
2292 {
2293 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2294 }
2295 
2296 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2297 				      unsigned int prio, int num_levels)
2298 {
2299 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2300 }
2301 
2302 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2303 						     *ns)
2304 {
2305 	ns->node.type = FS_TYPE_NAMESPACE;
2306 
2307 	return ns;
2308 }
2309 
2310 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2311 						       int def_miss_act)
2312 {
2313 	struct mlx5_flow_namespace	*ns;
2314 
2315 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2316 	if (!ns)
2317 		return ERR_PTR(-ENOMEM);
2318 
2319 	fs_init_namespace(ns);
2320 	ns->def_miss_action = def_miss_act;
2321 	tree_init_node(&ns->node, NULL, del_sw_ns);
2322 	tree_add_node(&ns->node, &prio->node);
2323 	list_add_tail(&ns->node.list, &prio->node.children);
2324 
2325 	return ns;
2326 }
2327 
2328 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2329 			     struct init_tree_node *prio_metadata)
2330 {
2331 	struct fs_prio *fs_prio;
2332 	int i;
2333 
2334 	for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2335 		fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2336 		if (IS_ERR(fs_prio))
2337 			return PTR_ERR(fs_prio);
2338 	}
2339 	return 0;
2340 }
2341 
2342 #define FLOW_TABLE_BIT_SZ 1
2343 #define GET_FLOW_TABLE_CAP(dev, offset) \
2344 	((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) +	\
2345 			offset / 32)) >>					\
2346 	  (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2347 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2348 {
2349 	int i;
2350 
2351 	for (i = 0; i < caps->arr_sz; i++) {
2352 		if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2353 			return false;
2354 	}
2355 	return true;
2356 }
2357 
2358 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2359 				    struct init_tree_node *init_node,
2360 				    struct fs_node *fs_parent_node,
2361 				    struct init_tree_node *init_parent_node,
2362 				    int prio)
2363 {
2364 	int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2365 					      flow_table_properties_nic_receive.
2366 					      max_ft_level);
2367 	struct mlx5_flow_namespace *fs_ns;
2368 	struct fs_prio *fs_prio;
2369 	struct fs_node *base;
2370 	int i;
2371 	int err;
2372 
2373 	if (init_node->type == FS_TYPE_PRIO) {
2374 		if ((init_node->min_ft_level > max_ft_level) ||
2375 		    !has_required_caps(steering->dev, &init_node->caps))
2376 			return 0;
2377 
2378 		fs_get_obj(fs_ns, fs_parent_node);
2379 		if (init_node->num_leaf_prios)
2380 			return create_leaf_prios(fs_ns, prio, init_node);
2381 		fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2382 		if (IS_ERR(fs_prio))
2383 			return PTR_ERR(fs_prio);
2384 		base = &fs_prio->node;
2385 	} else if (init_node->type == FS_TYPE_NAMESPACE) {
2386 		fs_get_obj(fs_prio, fs_parent_node);
2387 		fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2388 		if (IS_ERR(fs_ns))
2389 			return PTR_ERR(fs_ns);
2390 		base = &fs_ns->node;
2391 	} else {
2392 		return -EINVAL;
2393 	}
2394 	prio = 0;
2395 	for (i = 0; i < init_node->ar_size; i++) {
2396 		err = init_root_tree_recursive(steering, &init_node->children[i],
2397 					       base, init_node, prio);
2398 		if (err)
2399 			return err;
2400 		if (init_node->children[i].type == FS_TYPE_PRIO &&
2401 		    init_node->children[i].num_leaf_prios) {
2402 			prio += init_node->children[i].num_leaf_prios;
2403 		}
2404 	}
2405 
2406 	return 0;
2407 }
2408 
2409 static int init_root_tree(struct mlx5_flow_steering *steering,
2410 			  struct init_tree_node *init_node,
2411 			  struct fs_node *fs_parent_node)
2412 {
2413 	int err;
2414 	int i;
2415 
2416 	for (i = 0; i < init_node->ar_size; i++) {
2417 		err = init_root_tree_recursive(steering, &init_node->children[i],
2418 					       fs_parent_node,
2419 					       init_node, i);
2420 		if (err)
2421 			return err;
2422 	}
2423 	return 0;
2424 }
2425 
2426 static void del_sw_root_ns(struct fs_node *node)
2427 {
2428 	struct mlx5_flow_root_namespace *root_ns;
2429 	struct mlx5_flow_namespace *ns;
2430 
2431 	fs_get_obj(ns, node);
2432 	root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2433 	mutex_destroy(&root_ns->chain_lock);
2434 	kfree(node);
2435 }
2436 
2437 static struct mlx5_flow_root_namespace
2438 *create_root_ns(struct mlx5_flow_steering *steering,
2439 		enum fs_flow_table_type table_type)
2440 {
2441 	const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2442 	struct mlx5_flow_root_namespace *root_ns;
2443 	struct mlx5_flow_namespace *ns;
2444 
2445 	if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2446 	    (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2447 		cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2448 
2449 	/* Create the root namespace */
2450 	root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2451 	if (!root_ns)
2452 		return NULL;
2453 
2454 	root_ns->dev = steering->dev;
2455 	root_ns->table_type = table_type;
2456 	root_ns->cmds = cmds;
2457 
2458 	INIT_LIST_HEAD(&root_ns->underlay_qpns);
2459 
2460 	ns = &root_ns->ns;
2461 	fs_init_namespace(ns);
2462 	mutex_init(&root_ns->chain_lock);
2463 	tree_init_node(&ns->node, NULL, del_sw_root_ns);
2464 	tree_add_node(&ns->node, NULL);
2465 
2466 	return root_ns;
2467 }
2468 
2469 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2470 
2471 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2472 {
2473 	struct fs_prio *prio;
2474 
2475 	fs_for_each_prio(prio, ns) {
2476 		 /* This updates prio start_level and num_levels */
2477 		set_prio_attrs_in_prio(prio, acc_level);
2478 		acc_level += prio->num_levels;
2479 	}
2480 	return acc_level;
2481 }
2482 
2483 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2484 {
2485 	struct mlx5_flow_namespace *ns;
2486 	int acc_level_ns = acc_level;
2487 
2488 	prio->start_level = acc_level;
2489 	fs_for_each_ns(ns, prio) {
2490 		/* This updates start_level and num_levels of ns's priority descendants */
2491 		acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2492 
2493 		/* If this a prio with chains, and we can jump from one chain
2494 		 * (namepsace) to another, so we accumulate the levels
2495 		 */
2496 		if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2497 			acc_level = acc_level_ns;
2498 	}
2499 
2500 	if (!prio->num_levels)
2501 		prio->num_levels = acc_level_ns - prio->start_level;
2502 	WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2503 }
2504 
2505 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2506 {
2507 	struct mlx5_flow_namespace *ns = &root_ns->ns;
2508 	struct fs_prio *prio;
2509 	int start_level = 0;
2510 
2511 	fs_for_each_prio(prio, ns) {
2512 		set_prio_attrs_in_prio(prio, start_level);
2513 		start_level += prio->num_levels;
2514 	}
2515 }
2516 
2517 #define ANCHOR_PRIO 0
2518 #define ANCHOR_SIZE 1
2519 #define ANCHOR_LEVEL 0
2520 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2521 {
2522 	struct mlx5_flow_namespace *ns = NULL;
2523 	struct mlx5_flow_table_attr ft_attr = {};
2524 	struct mlx5_flow_table *ft;
2525 
2526 	ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2527 	if (WARN_ON(!ns))
2528 		return -EINVAL;
2529 
2530 	ft_attr.max_fte = ANCHOR_SIZE;
2531 	ft_attr.level   = ANCHOR_LEVEL;
2532 	ft_attr.prio    = ANCHOR_PRIO;
2533 
2534 	ft = mlx5_create_flow_table(ns, &ft_attr);
2535 	if (IS_ERR(ft)) {
2536 		mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2537 		return PTR_ERR(ft);
2538 	}
2539 	return 0;
2540 }
2541 
2542 static int init_root_ns(struct mlx5_flow_steering *steering)
2543 {
2544 	int err;
2545 
2546 	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2547 	if (!steering->root_ns)
2548 		return -ENOMEM;
2549 
2550 	err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2551 	if (err)
2552 		goto out_err;
2553 
2554 	set_prio_attrs(steering->root_ns);
2555 	err = create_anchor_flow_table(steering);
2556 	if (err)
2557 		goto out_err;
2558 
2559 	return 0;
2560 
2561 out_err:
2562 	cleanup_root_ns(steering->root_ns);
2563 	steering->root_ns = NULL;
2564 	return err;
2565 }
2566 
2567 static void clean_tree(struct fs_node *node)
2568 {
2569 	if (node) {
2570 		struct fs_node *iter;
2571 		struct fs_node *temp;
2572 
2573 		tree_get_node(node);
2574 		list_for_each_entry_safe(iter, temp, &node->children, list)
2575 			clean_tree(iter);
2576 		tree_put_node(node, false);
2577 		tree_remove_node(node, false);
2578 	}
2579 }
2580 
2581 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2582 {
2583 	if (!root_ns)
2584 		return;
2585 
2586 	clean_tree(&root_ns->ns.node);
2587 }
2588 
2589 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2590 {
2591 	struct mlx5_flow_steering *steering = dev->priv.steering;
2592 
2593 	cleanup_root_ns(steering->root_ns);
2594 	cleanup_root_ns(steering->fdb_root_ns);
2595 	steering->fdb_root_ns = NULL;
2596 	kfree(steering->fdb_sub_ns);
2597 	steering->fdb_sub_ns = NULL;
2598 	cleanup_root_ns(steering->sniffer_rx_root_ns);
2599 	cleanup_root_ns(steering->sniffer_tx_root_ns);
2600 	cleanup_root_ns(steering->rdma_rx_root_ns);
2601 	cleanup_root_ns(steering->rdma_tx_root_ns);
2602 	cleanup_root_ns(steering->egress_root_ns);
2603 	mlx5_cleanup_fc_stats(dev);
2604 	kmem_cache_destroy(steering->ftes_cache);
2605 	kmem_cache_destroy(steering->fgs_cache);
2606 	mlx5_ft_pool_destroy(dev);
2607 	kfree(steering);
2608 }
2609 
2610 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2611 {
2612 	struct fs_prio *prio;
2613 
2614 	steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2615 	if (!steering->sniffer_tx_root_ns)
2616 		return -ENOMEM;
2617 
2618 	/* Create single prio */
2619 	prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2620 	return PTR_ERR_OR_ZERO(prio);
2621 }
2622 
2623 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2624 {
2625 	struct fs_prio *prio;
2626 
2627 	steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2628 	if (!steering->sniffer_rx_root_ns)
2629 		return -ENOMEM;
2630 
2631 	/* Create single prio */
2632 	prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2633 	return PTR_ERR_OR_ZERO(prio);
2634 }
2635 
2636 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2637 {
2638 	int err;
2639 
2640 	steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2641 	if (!steering->rdma_rx_root_ns)
2642 		return -ENOMEM;
2643 
2644 	err = init_root_tree(steering, &rdma_rx_root_fs,
2645 			     &steering->rdma_rx_root_ns->ns.node);
2646 	if (err)
2647 		goto out_err;
2648 
2649 	set_prio_attrs(steering->rdma_rx_root_ns);
2650 
2651 	return 0;
2652 
2653 out_err:
2654 	cleanup_root_ns(steering->rdma_rx_root_ns);
2655 	steering->rdma_rx_root_ns = NULL;
2656 	return err;
2657 }
2658 
2659 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2660 {
2661 	int err;
2662 
2663 	steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2664 	if (!steering->rdma_tx_root_ns)
2665 		return -ENOMEM;
2666 
2667 	err = init_root_tree(steering, &rdma_tx_root_fs,
2668 			     &steering->rdma_tx_root_ns->ns.node);
2669 	if (err)
2670 		goto out_err;
2671 
2672 	set_prio_attrs(steering->rdma_tx_root_ns);
2673 
2674 	return 0;
2675 
2676 out_err:
2677 	cleanup_root_ns(steering->rdma_tx_root_ns);
2678 	steering->rdma_tx_root_ns = NULL;
2679 	return err;
2680 }
2681 
2682 /* FT and tc chains are stored in the same array so we can re-use the
2683  * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2684  * When creating a new ns for each chain store it in the first available slot.
2685  * Assume tc chains are created and stored first and only then the FT chain.
2686  */
2687 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2688 					struct mlx5_flow_namespace *ns)
2689 {
2690 	int chain = 0;
2691 
2692 	while (steering->fdb_sub_ns[chain])
2693 		++chain;
2694 
2695 	steering->fdb_sub_ns[chain] = ns;
2696 }
2697 
2698 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2699 					struct fs_prio *maj_prio)
2700 {
2701 	struct mlx5_flow_namespace *ns;
2702 	struct fs_prio *min_prio;
2703 	int prio;
2704 
2705 	ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2706 	if (IS_ERR(ns))
2707 		return PTR_ERR(ns);
2708 
2709 	for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2710 		min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2711 		if (IS_ERR(min_prio))
2712 			return PTR_ERR(min_prio);
2713 	}
2714 
2715 	store_fdb_sub_ns_prio_chain(steering, ns);
2716 
2717 	return 0;
2718 }
2719 
2720 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2721 			     int fs_prio,
2722 			     int chains)
2723 {
2724 	struct fs_prio *maj_prio;
2725 	int levels;
2726 	int chain;
2727 	int err;
2728 
2729 	levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2730 	maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2731 					  fs_prio,
2732 					  levels);
2733 	if (IS_ERR(maj_prio))
2734 		return PTR_ERR(maj_prio);
2735 
2736 	for (chain = 0; chain < chains; chain++) {
2737 		err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2738 		if (err)
2739 			return err;
2740 	}
2741 
2742 	return 0;
2743 }
2744 
2745 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2746 {
2747 	int err;
2748 
2749 	steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2750 				       sizeof(*steering->fdb_sub_ns),
2751 				       GFP_KERNEL);
2752 	if (!steering->fdb_sub_ns)
2753 		return -ENOMEM;
2754 
2755 	err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2756 	if (err)
2757 		return err;
2758 
2759 	err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2760 	if (err)
2761 		return err;
2762 
2763 	return 0;
2764 }
2765 
2766 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2767 {
2768 	struct fs_prio *maj_prio;
2769 	int err;
2770 
2771 	steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2772 	if (!steering->fdb_root_ns)
2773 		return -ENOMEM;
2774 
2775 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
2776 				  1);
2777 	if (IS_ERR(maj_prio)) {
2778 		err = PTR_ERR(maj_prio);
2779 		goto out_err;
2780 	}
2781 	err = create_fdb_fast_path(steering);
2782 	if (err)
2783 		goto out_err;
2784 
2785 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
2786 	if (IS_ERR(maj_prio)) {
2787 		err = PTR_ERR(maj_prio);
2788 		goto out_err;
2789 	}
2790 
2791 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
2792 	if (IS_ERR(maj_prio)) {
2793 		err = PTR_ERR(maj_prio);
2794 		goto out_err;
2795 	}
2796 
2797 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2798 	if (IS_ERR(maj_prio)) {
2799 		err = PTR_ERR(maj_prio);
2800 		goto out_err;
2801 	}
2802 
2803 	/* We put this priority last, knowing that nothing will get here
2804 	 * unless explicitly forwarded to. This is possible because the
2805 	 * slow path tables have catch all rules and nothing gets passed
2806 	 * those tables.
2807 	 */
2808 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2809 	if (IS_ERR(maj_prio)) {
2810 		err = PTR_ERR(maj_prio);
2811 		goto out_err;
2812 	}
2813 
2814 	set_prio_attrs(steering->fdb_root_ns);
2815 	return 0;
2816 
2817 out_err:
2818 	cleanup_root_ns(steering->fdb_root_ns);
2819 	kfree(steering->fdb_sub_ns);
2820 	steering->fdb_sub_ns = NULL;
2821 	steering->fdb_root_ns = NULL;
2822 	return err;
2823 }
2824 
2825 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2826 {
2827 	struct fs_prio *prio;
2828 
2829 	steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2830 	if (!steering->esw_egress_root_ns[vport])
2831 		return -ENOMEM;
2832 
2833 	/* create 1 prio*/
2834 	prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2835 	return PTR_ERR_OR_ZERO(prio);
2836 }
2837 
2838 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2839 {
2840 	struct fs_prio *prio;
2841 
2842 	steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2843 	if (!steering->esw_ingress_root_ns[vport])
2844 		return -ENOMEM;
2845 
2846 	/* create 1 prio*/
2847 	prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2848 	return PTR_ERR_OR_ZERO(prio);
2849 }
2850 
2851 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2852 {
2853 	struct mlx5_flow_steering *steering = dev->priv.steering;
2854 	int err;
2855 	int i;
2856 
2857 	steering->esw_egress_root_ns =
2858 			kcalloc(total_vports,
2859 				sizeof(*steering->esw_egress_root_ns),
2860 				GFP_KERNEL);
2861 	if (!steering->esw_egress_root_ns)
2862 		return -ENOMEM;
2863 
2864 	for (i = 0; i < total_vports; i++) {
2865 		err = init_egress_acl_root_ns(steering, i);
2866 		if (err)
2867 			goto cleanup_root_ns;
2868 	}
2869 	steering->esw_egress_acl_vports = total_vports;
2870 	return 0;
2871 
2872 cleanup_root_ns:
2873 	for (i--; i >= 0; i--)
2874 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
2875 	kfree(steering->esw_egress_root_ns);
2876 	steering->esw_egress_root_ns = NULL;
2877 	return err;
2878 }
2879 
2880 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
2881 {
2882 	struct mlx5_flow_steering *steering = dev->priv.steering;
2883 	int i;
2884 
2885 	if (!steering->esw_egress_root_ns)
2886 		return;
2887 
2888 	for (i = 0; i < steering->esw_egress_acl_vports; i++)
2889 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
2890 
2891 	kfree(steering->esw_egress_root_ns);
2892 	steering->esw_egress_root_ns = NULL;
2893 }
2894 
2895 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2896 {
2897 	struct mlx5_flow_steering *steering = dev->priv.steering;
2898 	int err;
2899 	int i;
2900 
2901 	steering->esw_ingress_root_ns =
2902 			kcalloc(total_vports,
2903 				sizeof(*steering->esw_ingress_root_ns),
2904 				GFP_KERNEL);
2905 	if (!steering->esw_ingress_root_ns)
2906 		return -ENOMEM;
2907 
2908 	for (i = 0; i < total_vports; i++) {
2909 		err = init_ingress_acl_root_ns(steering, i);
2910 		if (err)
2911 			goto cleanup_root_ns;
2912 	}
2913 	steering->esw_ingress_acl_vports = total_vports;
2914 	return 0;
2915 
2916 cleanup_root_ns:
2917 	for (i--; i >= 0; i--)
2918 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2919 	kfree(steering->esw_ingress_root_ns);
2920 	steering->esw_ingress_root_ns = NULL;
2921 	return err;
2922 }
2923 
2924 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
2925 {
2926 	struct mlx5_flow_steering *steering = dev->priv.steering;
2927 	int i;
2928 
2929 	if (!steering->esw_ingress_root_ns)
2930 		return;
2931 
2932 	for (i = 0; i < steering->esw_ingress_acl_vports; i++)
2933 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2934 
2935 	kfree(steering->esw_ingress_root_ns);
2936 	steering->esw_ingress_root_ns = NULL;
2937 }
2938 
2939 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2940 {
2941 	int err;
2942 
2943 	steering->egress_root_ns = create_root_ns(steering,
2944 						  FS_FT_NIC_TX);
2945 	if (!steering->egress_root_ns)
2946 		return -ENOMEM;
2947 
2948 	err = init_root_tree(steering, &egress_root_fs,
2949 			     &steering->egress_root_ns->ns.node);
2950 	if (err)
2951 		goto cleanup;
2952 	set_prio_attrs(steering->egress_root_ns);
2953 	return 0;
2954 cleanup:
2955 	cleanup_root_ns(steering->egress_root_ns);
2956 	steering->egress_root_ns = NULL;
2957 	return err;
2958 }
2959 
2960 int mlx5_init_fs(struct mlx5_core_dev *dev)
2961 {
2962 	struct mlx5_flow_steering *steering;
2963 	int err = 0;
2964 
2965 	err = mlx5_init_fc_stats(dev);
2966 	if (err)
2967 		return err;
2968 
2969 	err = mlx5_ft_pool_init(dev);
2970 	if (err)
2971 		return err;
2972 
2973 	steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2974 	if (!steering) {
2975 		err = -ENOMEM;
2976 		goto err;
2977 	}
2978 
2979 	steering->dev = dev;
2980 	dev->priv.steering = steering;
2981 
2982 	steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2983 						sizeof(struct mlx5_flow_group), 0,
2984 						0, NULL);
2985 	steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2986 						 0, NULL);
2987 	if (!steering->ftes_cache || !steering->fgs_cache) {
2988 		err = -ENOMEM;
2989 		goto err;
2990 	}
2991 
2992 	if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2993 	      (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2994 	     ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2995 	      MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
2996 	    MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
2997 		err = init_root_ns(steering);
2998 		if (err)
2999 			goto err;
3000 	}
3001 
3002 	if (MLX5_ESWITCH_MANAGER(dev)) {
3003 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
3004 			err = init_fdb_root_ns(steering);
3005 			if (err)
3006 				goto err;
3007 		}
3008 	}
3009 
3010 	if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3011 		err = init_sniffer_rx_root_ns(steering);
3012 		if (err)
3013 			goto err;
3014 	}
3015 
3016 	if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3017 		err = init_sniffer_tx_root_ns(steering);
3018 		if (err)
3019 			goto err;
3020 	}
3021 
3022 	if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3023 	    MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3024 		err = init_rdma_rx_root_ns(steering);
3025 		if (err)
3026 			goto err;
3027 	}
3028 
3029 	if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3030 		err = init_rdma_tx_root_ns(steering);
3031 		if (err)
3032 			goto err;
3033 	}
3034 
3035 	if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
3036 	    MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3037 		err = init_egress_root_ns(steering);
3038 		if (err)
3039 			goto err;
3040 	}
3041 
3042 	return 0;
3043 err:
3044 	mlx5_cleanup_fs(dev);
3045 	return err;
3046 }
3047 
3048 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3049 {
3050 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3051 	struct mlx5_ft_underlay_qp *new_uqp;
3052 	int err = 0;
3053 
3054 	new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3055 	if (!new_uqp)
3056 		return -ENOMEM;
3057 
3058 	mutex_lock(&root->chain_lock);
3059 
3060 	if (!root->root_ft) {
3061 		err = -EINVAL;
3062 		goto update_ft_fail;
3063 	}
3064 
3065 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3066 					 false);
3067 	if (err) {
3068 		mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3069 			       underlay_qpn, err);
3070 		goto update_ft_fail;
3071 	}
3072 
3073 	new_uqp->qpn = underlay_qpn;
3074 	list_add_tail(&new_uqp->list, &root->underlay_qpns);
3075 
3076 	mutex_unlock(&root->chain_lock);
3077 
3078 	return 0;
3079 
3080 update_ft_fail:
3081 	mutex_unlock(&root->chain_lock);
3082 	kfree(new_uqp);
3083 	return err;
3084 }
3085 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3086 
3087 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3088 {
3089 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3090 	struct mlx5_ft_underlay_qp *uqp;
3091 	bool found = false;
3092 	int err = 0;
3093 
3094 	mutex_lock(&root->chain_lock);
3095 	list_for_each_entry(uqp, &root->underlay_qpns, list) {
3096 		if (uqp->qpn == underlay_qpn) {
3097 			found = true;
3098 			break;
3099 		}
3100 	}
3101 
3102 	if (!found) {
3103 		mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3104 			       underlay_qpn);
3105 		err = -EINVAL;
3106 		goto out;
3107 	}
3108 
3109 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3110 					 true);
3111 	if (err)
3112 		mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3113 			       underlay_qpn, err);
3114 
3115 	list_del(&uqp->list);
3116 	mutex_unlock(&root->chain_lock);
3117 	kfree(uqp);
3118 
3119 	return 0;
3120 
3121 out:
3122 	mutex_unlock(&root->chain_lock);
3123 	return err;
3124 }
3125 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3126 
3127 static struct mlx5_flow_root_namespace
3128 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3129 {
3130 	struct mlx5_flow_namespace *ns;
3131 
3132 	if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3133 	    ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3134 		ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3135 	else
3136 		ns = mlx5_get_flow_namespace(dev, ns_type);
3137 	if (!ns)
3138 		return NULL;
3139 
3140 	return find_root(&ns->node);
3141 }
3142 
3143 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3144 						 u8 ns_type, u8 num_actions,
3145 						 void *modify_actions)
3146 {
3147 	struct mlx5_flow_root_namespace *root;
3148 	struct mlx5_modify_hdr *modify_hdr;
3149 	int err;
3150 
3151 	root = get_root_namespace(dev, ns_type);
3152 	if (!root)
3153 		return ERR_PTR(-EOPNOTSUPP);
3154 
3155 	modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3156 	if (!modify_hdr)
3157 		return ERR_PTR(-ENOMEM);
3158 
3159 	modify_hdr->ns_type = ns_type;
3160 	err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3161 					      modify_actions, modify_hdr);
3162 	if (err) {
3163 		kfree(modify_hdr);
3164 		return ERR_PTR(err);
3165 	}
3166 
3167 	return modify_hdr;
3168 }
3169 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3170 
3171 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3172 				struct mlx5_modify_hdr *modify_hdr)
3173 {
3174 	struct mlx5_flow_root_namespace *root;
3175 
3176 	root = get_root_namespace(dev, modify_hdr->ns_type);
3177 	if (WARN_ON(!root))
3178 		return;
3179 	root->cmds->modify_header_dealloc(root, modify_hdr);
3180 	kfree(modify_hdr);
3181 }
3182 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3183 
3184 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3185 						     struct mlx5_pkt_reformat_params *params,
3186 						     enum mlx5_flow_namespace_type ns_type)
3187 {
3188 	struct mlx5_pkt_reformat *pkt_reformat;
3189 	struct mlx5_flow_root_namespace *root;
3190 	int err;
3191 
3192 	root = get_root_namespace(dev, ns_type);
3193 	if (!root)
3194 		return ERR_PTR(-EOPNOTSUPP);
3195 
3196 	pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3197 	if (!pkt_reformat)
3198 		return ERR_PTR(-ENOMEM);
3199 
3200 	pkt_reformat->ns_type = ns_type;
3201 	pkt_reformat->reformat_type = params->type;
3202 	err = root->cmds->packet_reformat_alloc(root, params, ns_type,
3203 						pkt_reformat);
3204 	if (err) {
3205 		kfree(pkt_reformat);
3206 		return ERR_PTR(err);
3207 	}
3208 
3209 	return pkt_reformat;
3210 }
3211 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3212 
3213 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3214 				  struct mlx5_pkt_reformat *pkt_reformat)
3215 {
3216 	struct mlx5_flow_root_namespace *root;
3217 
3218 	root = get_root_namespace(dev, pkt_reformat->ns_type);
3219 	if (WARN_ON(!root))
3220 		return;
3221 	root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3222 	kfree(pkt_reformat);
3223 }
3224 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3225 
3226 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3227 				 struct mlx5_flow_root_namespace *peer_ns)
3228 {
3229 	if (peer_ns && ns->mode != peer_ns->mode) {
3230 		mlx5_core_err(ns->dev,
3231 			      "Can't peer namespace of different steering mode\n");
3232 		return -EINVAL;
3233 	}
3234 
3235 	return ns->cmds->set_peer(ns, peer_ns);
3236 }
3237 
3238 /* This function should be called only at init stage of the namespace.
3239  * It is not safe to call this function while steering operations
3240  * are executed in the namespace.
3241  */
3242 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3243 				 enum mlx5_flow_steering_mode mode)
3244 {
3245 	struct mlx5_flow_root_namespace *root;
3246 	const struct mlx5_flow_cmds *cmds;
3247 	int err;
3248 
3249 	root = find_root(&ns->node);
3250 	if (&root->ns != ns)
3251 	/* Can't set cmds to non root namespace */
3252 		return -EINVAL;
3253 
3254 	if (root->table_type != FS_FT_FDB)
3255 		return -EOPNOTSUPP;
3256 
3257 	if (root->mode == mode)
3258 		return 0;
3259 
3260 	if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3261 		cmds = mlx5_fs_cmd_get_dr_cmds();
3262 	else
3263 		cmds = mlx5_fs_cmd_get_fw_cmds();
3264 	if (!cmds)
3265 		return -EOPNOTSUPP;
3266 
3267 	err = cmds->create_ns(root);
3268 	if (err) {
3269 		mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3270 			      err);
3271 		return err;
3272 	}
3273 
3274 	root->cmds->destroy_ns(root);
3275 	root->cmds = cmds;
3276 	root->mode = mode;
3277 
3278 	return 0;
3279 }
3280