1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
37 
38 #include "mlx5_core.h"
39 #include "fs_core.h"
40 #include "fs_cmd.h"
41 #include "fs_ft_pool.h"
42 #include "diag/fs_tracepoint.h"
43 
44 #define INIT_TREE_NODE_ARRAY_SIZE(...)	(sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
45 					 sizeof(struct init_tree_node))
46 
47 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
48 		 ...) {.type = FS_TYPE_PRIO,\
49 	.min_ft_level = min_level_val,\
50 	.num_levels = num_levels_val,\
51 	.num_leaf_prios = num_prios_val,\
52 	.caps = caps_val,\
53 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
54 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
55 }
56 
57 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
58 	ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
59 		 __VA_ARGS__)\
60 
61 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE,	\
62 	.def_miss_action = def_miss_act,\
63 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
64 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
65 }
66 
67 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
68 				   sizeof(long))
69 
70 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
71 
72 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
73 			       .caps = (long[]) {__VA_ARGS__} }
74 
75 #define FS_CHAINING_CAPS  FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
76 					   FS_CAP(flow_table_properties_nic_receive.modify_root), \
77 					   FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
78 					   FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
79 
80 #define FS_CHAINING_CAPS_EGRESS                                                \
81 	FS_REQUIRED_CAPS(                                                      \
82 		FS_CAP(flow_table_properties_nic_transmit.flow_modify_en),     \
83 		FS_CAP(flow_table_properties_nic_transmit.modify_root),        \
84 		FS_CAP(flow_table_properties_nic_transmit                      \
85 			       .identified_miss_table_mode),                   \
86 		FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
87 
88 #define FS_CHAINING_CAPS_RDMA_TX                                                \
89 	FS_REQUIRED_CAPS(                                                       \
90 		FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
91 		FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root),    \
92 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
93 			       .identified_miss_table_mode),                    \
94 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
95 			       .flow_table_modify))
96 
97 #define LEFTOVERS_NUM_LEVELS 1
98 #define LEFTOVERS_NUM_PRIOS 1
99 
100 #define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
101 #define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
102 
103 #define BY_PASS_PRIO_NUM_LEVELS 1
104 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
105 			   LEFTOVERS_NUM_PRIOS)
106 
107 #define KERNEL_RX_MACSEC_NUM_PRIOS  1
108 #define KERNEL_RX_MACSEC_NUM_LEVELS 2
109 #define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS)
110 
111 #define ETHTOOL_PRIO_NUM_LEVELS 1
112 #define ETHTOOL_NUM_PRIOS 11
113 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
114 /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy */
115 #define KERNEL_NIC_PRIO_NUM_LEVELS 8
116 #define KERNEL_NIC_NUM_PRIOS 1
117 /* One more level for tc */
118 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
119 
120 #define KERNEL_NIC_TC_NUM_PRIOS  1
121 #define KERNEL_NIC_TC_NUM_LEVELS 3
122 
123 #define ANCHOR_NUM_LEVELS 1
124 #define ANCHOR_NUM_PRIOS 1
125 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
126 
127 #define OFFLOADS_MAX_FT 2
128 #define OFFLOADS_NUM_PRIOS 2
129 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
130 
131 #define LAG_PRIO_NUM_LEVELS 1
132 #define LAG_NUM_PRIOS 1
133 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
134 
135 #define KERNEL_TX_IPSEC_NUM_PRIOS  1
136 #define KERNEL_TX_IPSEC_NUM_LEVELS 2
137 #define KERNEL_TX_IPSEC_MIN_LEVEL        (KERNEL_TX_IPSEC_NUM_LEVELS)
138 
139 #define KERNEL_TX_MACSEC_NUM_PRIOS  1
140 #define KERNEL_TX_MACSEC_NUM_LEVELS 2
141 #define KERNEL_TX_MACSEC_MIN_LEVEL       (KERNEL_TX_IPSEC_MIN_LEVEL + KERNEL_TX_MACSEC_NUM_PRIOS)
142 
143 struct node_caps {
144 	size_t	arr_sz;
145 	long	*caps;
146 };
147 
148 static struct init_tree_node {
149 	enum fs_node_type	type;
150 	struct init_tree_node *children;
151 	int ar_size;
152 	struct node_caps caps;
153 	int min_ft_level;
154 	int num_leaf_prios;
155 	int prio;
156 	int num_levels;
157 	enum mlx5_flow_table_miss_action def_miss_action;
158 } root_fs = {
159 	.type = FS_TYPE_NAMESPACE,
160 	.ar_size = 8,
161 	  .children = (struct init_tree_node[]){
162 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
163 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
164 				  ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
165 						    BY_PASS_PRIO_NUM_LEVELS))),
166 		  ADD_PRIO(0, KERNEL_RX_MACSEC_MIN_LEVEL, 0, FS_CHAINING_CAPS,
167 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
168 				  ADD_MULTIPLE_PRIO(KERNEL_RX_MACSEC_NUM_PRIOS,
169 						    KERNEL_RX_MACSEC_NUM_LEVELS))),
170 		  ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
171 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
172 				  ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
173 						    LAG_PRIO_NUM_LEVELS))),
174 		  ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
175 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
176 				  ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
177 						    OFFLOADS_MAX_FT))),
178 		  ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
179 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
180 				  ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
181 						    ETHTOOL_PRIO_NUM_LEVELS))),
182 		  ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
183 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
184 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
185 						    KERNEL_NIC_TC_NUM_LEVELS),
186 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
187 						    KERNEL_NIC_PRIO_NUM_LEVELS))),
188 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
189 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
190 				  ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
191 						    LEFTOVERS_NUM_LEVELS))),
192 		  ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
193 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
194 				  ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
195 						    ANCHOR_NUM_LEVELS))),
196 	}
197 };
198 
199 static struct init_tree_node egress_root_fs = {
200 	.type = FS_TYPE_NAMESPACE,
201 	.ar_size = 3,
202 	.children = (struct init_tree_node[]) {
203 		ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
204 			 FS_CHAINING_CAPS_EGRESS,
205 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
206 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
207 						  BY_PASS_PRIO_NUM_LEVELS))),
208 		ADD_PRIO(0, KERNEL_TX_IPSEC_MIN_LEVEL, 0,
209 			 FS_CHAINING_CAPS_EGRESS,
210 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
211 				ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
212 						  KERNEL_TX_IPSEC_NUM_LEVELS))),
213 		ADD_PRIO(0, KERNEL_TX_MACSEC_MIN_LEVEL, 0,
214 			 FS_CHAINING_CAPS_EGRESS,
215 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
216 				ADD_MULTIPLE_PRIO(KERNEL_TX_MACSEC_NUM_PRIOS,
217 						  KERNEL_TX_MACSEC_NUM_LEVELS))),
218 	}
219 };
220 
221 enum {
222 	RDMA_RX_COUNTERS_PRIO,
223 	RDMA_RX_BYPASS_PRIO,
224 	RDMA_RX_KERNEL_PRIO,
225 };
226 
227 #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
228 #define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
229 #define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
230 
231 static struct init_tree_node rdma_rx_root_fs = {
232 	.type = FS_TYPE_NAMESPACE,
233 	.ar_size = 3,
234 	.children = (struct init_tree_node[]) {
235 		[RDMA_RX_COUNTERS_PRIO] =
236 		ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
237 			 FS_CHAINING_CAPS,
238 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
239 				ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
240 						  RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
241 		[RDMA_RX_BYPASS_PRIO] =
242 		ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
243 			 FS_CHAINING_CAPS,
244 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
245 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
246 						  BY_PASS_PRIO_NUM_LEVELS))),
247 		[RDMA_RX_KERNEL_PRIO] =
248 		ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
249 			 FS_CHAINING_CAPS,
250 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
251 				ADD_MULTIPLE_PRIO(1, 1))),
252 	}
253 };
254 
255 enum {
256 	RDMA_TX_COUNTERS_PRIO,
257 	RDMA_TX_BYPASS_PRIO,
258 };
259 
260 #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
261 #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
262 
263 static struct init_tree_node rdma_tx_root_fs = {
264 	.type = FS_TYPE_NAMESPACE,
265 	.ar_size = 2,
266 	.children = (struct init_tree_node[]) {
267 		[RDMA_TX_COUNTERS_PRIO] =
268 		ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
269 			 FS_CHAINING_CAPS,
270 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
271 				ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
272 						  RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
273 		[RDMA_TX_BYPASS_PRIO] =
274 		ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
275 			 FS_CHAINING_CAPS_RDMA_TX,
276 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
277 				ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
278 						  BY_PASS_PRIO_NUM_LEVELS))),
279 	}
280 };
281 
282 enum fs_i_lock_class {
283 	FS_LOCK_GRANDPARENT,
284 	FS_LOCK_PARENT,
285 	FS_LOCK_CHILD
286 };
287 
288 static const struct rhashtable_params rhash_fte = {
289 	.key_len = sizeof_field(struct fs_fte, val),
290 	.key_offset = offsetof(struct fs_fte, val),
291 	.head_offset = offsetof(struct fs_fte, hash),
292 	.automatic_shrinking = true,
293 	.min_size = 1,
294 };
295 
296 static const struct rhashtable_params rhash_fg = {
297 	.key_len = sizeof_field(struct mlx5_flow_group, mask),
298 	.key_offset = offsetof(struct mlx5_flow_group, mask),
299 	.head_offset = offsetof(struct mlx5_flow_group, hash),
300 	.automatic_shrinking = true,
301 	.min_size = 1,
302 
303 };
304 
305 static void del_hw_flow_table(struct fs_node *node);
306 static void del_hw_flow_group(struct fs_node *node);
307 static void del_hw_fte(struct fs_node *node);
308 static void del_sw_flow_table(struct fs_node *node);
309 static void del_sw_flow_group(struct fs_node *node);
310 static void del_sw_fte(struct fs_node *node);
311 static void del_sw_prio(struct fs_node *node);
312 static void del_sw_ns(struct fs_node *node);
313 /* Delete rule (destination) is special case that
314  * requires to lock the FTE for all the deletion process.
315  */
316 static void del_sw_hw_rule(struct fs_node *node);
317 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
318 				struct mlx5_flow_destination *d2);
319 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
320 static struct mlx5_flow_rule *
321 find_flow_rule(struct fs_fte *fte,
322 	       struct mlx5_flow_destination *dest);
323 
324 static void tree_init_node(struct fs_node *node,
325 			   void (*del_hw_func)(struct fs_node *),
326 			   void (*del_sw_func)(struct fs_node *))
327 {
328 	refcount_set(&node->refcount, 1);
329 	INIT_LIST_HEAD(&node->list);
330 	INIT_LIST_HEAD(&node->children);
331 	init_rwsem(&node->lock);
332 	node->del_hw_func = del_hw_func;
333 	node->del_sw_func = del_sw_func;
334 	node->active = false;
335 }
336 
337 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
338 {
339 	if (parent)
340 		refcount_inc(&parent->refcount);
341 	node->parent = parent;
342 
343 	/* Parent is the root */
344 	if (!parent)
345 		node->root = node;
346 	else
347 		node->root = parent->root;
348 }
349 
350 static int tree_get_node(struct fs_node *node)
351 {
352 	return refcount_inc_not_zero(&node->refcount);
353 }
354 
355 static void nested_down_read_ref_node(struct fs_node *node,
356 				      enum fs_i_lock_class class)
357 {
358 	if (node) {
359 		down_read_nested(&node->lock, class);
360 		refcount_inc(&node->refcount);
361 	}
362 }
363 
364 static void nested_down_write_ref_node(struct fs_node *node,
365 				       enum fs_i_lock_class class)
366 {
367 	if (node) {
368 		down_write_nested(&node->lock, class);
369 		refcount_inc(&node->refcount);
370 	}
371 }
372 
373 static void down_write_ref_node(struct fs_node *node, bool locked)
374 {
375 	if (node) {
376 		if (!locked)
377 			down_write(&node->lock);
378 		refcount_inc(&node->refcount);
379 	}
380 }
381 
382 static void up_read_ref_node(struct fs_node *node)
383 {
384 	refcount_dec(&node->refcount);
385 	up_read(&node->lock);
386 }
387 
388 static void up_write_ref_node(struct fs_node *node, bool locked)
389 {
390 	refcount_dec(&node->refcount);
391 	if (!locked)
392 		up_write(&node->lock);
393 }
394 
395 static void tree_put_node(struct fs_node *node, bool locked)
396 {
397 	struct fs_node *parent_node = node->parent;
398 
399 	if (refcount_dec_and_test(&node->refcount)) {
400 		if (node->del_hw_func)
401 			node->del_hw_func(node);
402 		if (parent_node) {
403 			down_write_ref_node(parent_node, locked);
404 			list_del_init(&node->list);
405 		}
406 		node->del_sw_func(node);
407 		if (parent_node)
408 			up_write_ref_node(parent_node, locked);
409 		node = NULL;
410 	}
411 	if (!node && parent_node)
412 		tree_put_node(parent_node, locked);
413 }
414 
415 static int tree_remove_node(struct fs_node *node, bool locked)
416 {
417 	if (refcount_read(&node->refcount) > 1) {
418 		refcount_dec(&node->refcount);
419 		return -EEXIST;
420 	}
421 	tree_put_node(node, locked);
422 	return 0;
423 }
424 
425 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
426 				 unsigned int prio)
427 {
428 	struct fs_prio *iter_prio;
429 
430 	fs_for_each_prio(iter_prio, ns) {
431 		if (iter_prio->prio == prio)
432 			return iter_prio;
433 	}
434 
435 	return NULL;
436 }
437 
438 static bool is_fwd_next_action(u32 action)
439 {
440 	return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
441 			 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
442 }
443 
444 static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
445 {
446 	return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM ||
447 		type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ||
448 		type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
449 		type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
450 		type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
451 		type == MLX5_FLOW_DESTINATION_TYPE_TIR ||
452 		type == MLX5_FLOW_DESTINATION_TYPE_RANGE;
453 }
454 
455 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
456 {
457 	int i;
458 
459 	for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
460 		if (spec->match_value[i] & ~spec->match_criteria[i]) {
461 			pr_warn("mlx5_core: match_value differs from match_criteria\n");
462 			return false;
463 		}
464 
465 	return true;
466 }
467 
468 struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
469 {
470 	struct fs_node *root;
471 	struct mlx5_flow_namespace *ns;
472 
473 	root = node->root;
474 
475 	if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
476 		pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
477 		return NULL;
478 	}
479 
480 	ns = container_of(root, struct mlx5_flow_namespace, node);
481 	return container_of(ns, struct mlx5_flow_root_namespace, ns);
482 }
483 
484 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
485 {
486 	struct mlx5_flow_root_namespace *root = find_root(node);
487 
488 	if (root)
489 		return root->dev->priv.steering;
490 	return NULL;
491 }
492 
493 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
494 {
495 	struct mlx5_flow_root_namespace *root = find_root(node);
496 
497 	if (root)
498 		return root->dev;
499 	return NULL;
500 }
501 
502 static void del_sw_ns(struct fs_node *node)
503 {
504 	kfree(node);
505 }
506 
507 static void del_sw_prio(struct fs_node *node)
508 {
509 	kfree(node);
510 }
511 
512 static void del_hw_flow_table(struct fs_node *node)
513 {
514 	struct mlx5_flow_root_namespace *root;
515 	struct mlx5_flow_table *ft;
516 	struct mlx5_core_dev *dev;
517 	int err;
518 
519 	fs_get_obj(ft, node);
520 	dev = get_dev(&ft->node);
521 	root = find_root(&ft->node);
522 	trace_mlx5_fs_del_ft(ft);
523 
524 	if (node->active) {
525 		err = root->cmds->destroy_flow_table(root, ft);
526 		if (err)
527 			mlx5_core_warn(dev, "flow steering can't destroy ft\n");
528 	}
529 }
530 
531 static void del_sw_flow_table(struct fs_node *node)
532 {
533 	struct mlx5_flow_table *ft;
534 	struct fs_prio *prio;
535 
536 	fs_get_obj(ft, node);
537 
538 	rhltable_destroy(&ft->fgs_hash);
539 	if (ft->node.parent) {
540 		fs_get_obj(prio, ft->node.parent);
541 		prio->num_ft--;
542 	}
543 	kfree(ft);
544 }
545 
546 static void modify_fte(struct fs_fte *fte)
547 {
548 	struct mlx5_flow_root_namespace *root;
549 	struct mlx5_flow_table *ft;
550 	struct mlx5_flow_group *fg;
551 	struct mlx5_core_dev *dev;
552 	int err;
553 
554 	fs_get_obj(fg, fte->node.parent);
555 	fs_get_obj(ft, fg->node.parent);
556 	dev = get_dev(&fte->node);
557 
558 	root = find_root(&ft->node);
559 	err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
560 	if (err)
561 		mlx5_core_warn(dev,
562 			       "%s can't del rule fg id=%d fte_index=%d\n",
563 			       __func__, fg->id, fte->index);
564 	fte->modify_mask = 0;
565 }
566 
567 static void del_sw_hw_rule(struct fs_node *node)
568 {
569 	struct mlx5_flow_rule *rule;
570 	struct fs_fte *fte;
571 
572 	fs_get_obj(rule, node);
573 	fs_get_obj(fte, rule->node.parent);
574 	trace_mlx5_fs_del_rule(rule);
575 	if (is_fwd_next_action(rule->sw_action)) {
576 		mutex_lock(&rule->dest_attr.ft->lock);
577 		list_del(&rule->next_ft);
578 		mutex_unlock(&rule->dest_attr.ft->lock);
579 	}
580 
581 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
582 		--fte->dests_size;
583 		fte->modify_mask |=
584 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
585 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
586 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
587 		goto out;
588 	}
589 
590 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
591 		--fte->dests_size;
592 		fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
593 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
594 		goto out;
595 	}
596 
597 	if (is_fwd_dest_type(rule->dest_attr.type)) {
598 		--fte->dests_size;
599 		--fte->fwd_dests;
600 
601 		if (!fte->fwd_dests)
602 			fte->action.action &=
603 				~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
604 		fte->modify_mask |=
605 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
606 		goto out;
607 	}
608 out:
609 	kfree(rule);
610 }
611 
612 static void del_hw_fte(struct fs_node *node)
613 {
614 	struct mlx5_flow_root_namespace *root;
615 	struct mlx5_flow_table *ft;
616 	struct mlx5_flow_group *fg;
617 	struct mlx5_core_dev *dev;
618 	struct fs_fte *fte;
619 	int err;
620 
621 	fs_get_obj(fte, node);
622 	fs_get_obj(fg, fte->node.parent);
623 	fs_get_obj(ft, fg->node.parent);
624 
625 	trace_mlx5_fs_del_fte(fte);
626 	WARN_ON(fte->dests_size);
627 	dev = get_dev(&ft->node);
628 	root = find_root(&ft->node);
629 	if (node->active) {
630 		err = root->cmds->delete_fte(root, ft, fte);
631 		if (err)
632 			mlx5_core_warn(dev,
633 				       "flow steering can't delete fte in index %d of flow group id %d\n",
634 				       fte->index, fg->id);
635 		node->active = false;
636 	}
637 }
638 
639 static void del_sw_fte(struct fs_node *node)
640 {
641 	struct mlx5_flow_steering *steering = get_steering(node);
642 	struct mlx5_flow_group *fg;
643 	struct fs_fte *fte;
644 	int err;
645 
646 	fs_get_obj(fte, node);
647 	fs_get_obj(fg, fte->node.parent);
648 
649 	err = rhashtable_remove_fast(&fg->ftes_hash,
650 				     &fte->hash,
651 				     rhash_fte);
652 	WARN_ON(err);
653 	ida_free(&fg->fte_allocator, fte->index - fg->start_index);
654 	kmem_cache_free(steering->ftes_cache, fte);
655 }
656 
657 static void del_hw_flow_group(struct fs_node *node)
658 {
659 	struct mlx5_flow_root_namespace *root;
660 	struct mlx5_flow_group *fg;
661 	struct mlx5_flow_table *ft;
662 	struct mlx5_core_dev *dev;
663 
664 	fs_get_obj(fg, node);
665 	fs_get_obj(ft, fg->node.parent);
666 	dev = get_dev(&ft->node);
667 	trace_mlx5_fs_del_fg(fg);
668 
669 	root = find_root(&ft->node);
670 	if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
671 		mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
672 			       fg->id, ft->id);
673 }
674 
675 static void del_sw_flow_group(struct fs_node *node)
676 {
677 	struct mlx5_flow_steering *steering = get_steering(node);
678 	struct mlx5_flow_group *fg;
679 	struct mlx5_flow_table *ft;
680 	int err;
681 
682 	fs_get_obj(fg, node);
683 	fs_get_obj(ft, fg->node.parent);
684 
685 	rhashtable_destroy(&fg->ftes_hash);
686 	ida_destroy(&fg->fte_allocator);
687 	if (ft->autogroup.active &&
688 	    fg->max_ftes == ft->autogroup.group_size &&
689 	    fg->start_index < ft->autogroup.max_fte)
690 		ft->autogroup.num_groups--;
691 	err = rhltable_remove(&ft->fgs_hash,
692 			      &fg->hash,
693 			      rhash_fg);
694 	WARN_ON(err);
695 	kmem_cache_free(steering->fgs_cache, fg);
696 }
697 
698 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
699 {
700 	int index;
701 	int ret;
702 
703 	index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
704 	if (index < 0)
705 		return index;
706 
707 	fte->index = index + fg->start_index;
708 	ret = rhashtable_insert_fast(&fg->ftes_hash,
709 				     &fte->hash,
710 				     rhash_fte);
711 	if (ret)
712 		goto err_ida_remove;
713 
714 	tree_add_node(&fte->node, &fg->node);
715 	list_add_tail(&fte->node.list, &fg->node.children);
716 	return 0;
717 
718 err_ida_remove:
719 	ida_free(&fg->fte_allocator, index);
720 	return ret;
721 }
722 
723 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
724 				const struct mlx5_flow_spec *spec,
725 				struct mlx5_flow_act *flow_act)
726 {
727 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
728 	struct fs_fte *fte;
729 
730 	fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
731 	if (!fte)
732 		return ERR_PTR(-ENOMEM);
733 
734 	memcpy(fte->val, &spec->match_value, sizeof(fte->val));
735 	fte->node.type =  FS_TYPE_FLOW_ENTRY;
736 	fte->action = *flow_act;
737 	fte->flow_context = spec->flow_context;
738 
739 	tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
740 
741 	return fte;
742 }
743 
744 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
745 			       struct mlx5_flow_group *fg)
746 {
747 	rhashtable_destroy(&fg->ftes_hash);
748 	kmem_cache_free(steering->fgs_cache, fg);
749 }
750 
751 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
752 						u8 match_criteria_enable,
753 						const void *match_criteria,
754 						int start_index,
755 						int end_index)
756 {
757 	struct mlx5_flow_group *fg;
758 	int ret;
759 
760 	fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
761 	if (!fg)
762 		return ERR_PTR(-ENOMEM);
763 
764 	ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
765 	if (ret) {
766 		kmem_cache_free(steering->fgs_cache, fg);
767 		return ERR_PTR(ret);
768 	}
769 
770 	ida_init(&fg->fte_allocator);
771 	fg->mask.match_criteria_enable = match_criteria_enable;
772 	memcpy(&fg->mask.match_criteria, match_criteria,
773 	       sizeof(fg->mask.match_criteria));
774 	fg->node.type =  FS_TYPE_FLOW_GROUP;
775 	fg->start_index = start_index;
776 	fg->max_ftes = end_index - start_index + 1;
777 
778 	return fg;
779 }
780 
781 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
782 						       u8 match_criteria_enable,
783 						       const void *match_criteria,
784 						       int start_index,
785 						       int end_index,
786 						       struct list_head *prev)
787 {
788 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
789 	struct mlx5_flow_group *fg;
790 	int ret;
791 
792 	fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
793 			      start_index, end_index);
794 	if (IS_ERR(fg))
795 		return fg;
796 
797 	/* initialize refcnt, add to parent list */
798 	ret = rhltable_insert(&ft->fgs_hash,
799 			      &fg->hash,
800 			      rhash_fg);
801 	if (ret) {
802 		dealloc_flow_group(steering, fg);
803 		return ERR_PTR(ret);
804 	}
805 
806 	tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
807 	tree_add_node(&fg->node, &ft->node);
808 	/* Add node to group list */
809 	list_add(&fg->node.list, prev);
810 	atomic_inc(&ft->node.version);
811 
812 	return fg;
813 }
814 
815 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
816 						enum fs_flow_table_type table_type,
817 						enum fs_flow_table_op_mod op_mod,
818 						u32 flags)
819 {
820 	struct mlx5_flow_table *ft;
821 	int ret;
822 
823 	ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
824 	if (!ft)
825 		return ERR_PTR(-ENOMEM);
826 
827 	ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
828 	if (ret) {
829 		kfree(ft);
830 		return ERR_PTR(ret);
831 	}
832 
833 	ft->level = level;
834 	ft->node.type = FS_TYPE_FLOW_TABLE;
835 	ft->op_mod = op_mod;
836 	ft->type = table_type;
837 	ft->vport = vport;
838 	ft->flags = flags;
839 	INIT_LIST_HEAD(&ft->fwd_rules);
840 	mutex_init(&ft->lock);
841 
842 	return ft;
843 }
844 
845 /* If reverse is false, then we search for the first flow table in the
846  * root sub-tree from start(closest from right), else we search for the
847  * last flow table in the root sub-tree till start(closest from left).
848  */
849 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
850 							 struct list_head *start,
851 							 bool reverse)
852 {
853 #define list_advance_entry(pos, reverse)		\
854 	((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
855 
856 #define list_for_each_advance_continue(pos, head, reverse)	\
857 	for (pos = list_advance_entry(pos, reverse);		\
858 	     &pos->list != (head);				\
859 	     pos = list_advance_entry(pos, reverse))
860 
861 	struct fs_node *iter = list_entry(start, struct fs_node, list);
862 	struct mlx5_flow_table *ft = NULL;
863 
864 	if (!root || root->type == FS_TYPE_PRIO_CHAINS)
865 		return NULL;
866 
867 	list_for_each_advance_continue(iter, &root->children, reverse) {
868 		if (iter->type == FS_TYPE_FLOW_TABLE) {
869 			fs_get_obj(ft, iter);
870 			return ft;
871 		}
872 		ft = find_closest_ft_recursive(iter, &iter->children, reverse);
873 		if (ft)
874 			return ft;
875 	}
876 
877 	return ft;
878 }
879 
880 /* If reverse is false then return the first flow table in next priority of
881  * prio in the tree, else return the last flow table in the previous priority
882  * of prio in the tree.
883  */
884 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
885 {
886 	struct mlx5_flow_table *ft = NULL;
887 	struct fs_node *curr_node;
888 	struct fs_node *parent;
889 
890 	parent = prio->node.parent;
891 	curr_node = &prio->node;
892 	while (!ft && parent) {
893 		ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
894 		curr_node = parent;
895 		parent = curr_node->parent;
896 	}
897 	return ft;
898 }
899 
900 /* Assuming all the tree is locked by mutex chain lock */
901 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
902 {
903 	return find_closest_ft(prio, false);
904 }
905 
906 /* Assuming all the tree is locked by mutex chain lock */
907 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
908 {
909 	return find_closest_ft(prio, true);
910 }
911 
912 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
913 						struct mlx5_flow_act *flow_act)
914 {
915 	struct fs_prio *prio;
916 	bool next_ns;
917 
918 	next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
919 	fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
920 
921 	return find_next_chained_ft(prio);
922 }
923 
924 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
925 			       struct fs_prio *prio,
926 			       struct mlx5_flow_table *ft)
927 {
928 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
929 	struct mlx5_flow_table *iter;
930 	int err;
931 
932 	fs_for_each_ft(iter, prio) {
933 		err = root->cmds->modify_flow_table(root, iter, ft);
934 		if (err) {
935 			mlx5_core_err(dev,
936 				      "Failed to modify flow table id %d, type %d, err %d\n",
937 				      iter->id, iter->type, err);
938 			/* The driver is out of sync with the FW */
939 			return err;
940 		}
941 	}
942 	return 0;
943 }
944 
945 /* Connect flow tables from previous priority of prio to ft */
946 static int connect_prev_fts(struct mlx5_core_dev *dev,
947 			    struct mlx5_flow_table *ft,
948 			    struct fs_prio *prio)
949 {
950 	struct mlx5_flow_table *prev_ft;
951 
952 	prev_ft = find_prev_chained_ft(prio);
953 	if (prev_ft) {
954 		struct fs_prio *prev_prio;
955 
956 		fs_get_obj(prev_prio, prev_ft->node.parent);
957 		return connect_fts_in_prio(dev, prev_prio, ft);
958 	}
959 	return 0;
960 }
961 
962 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
963 				 *prio)
964 {
965 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
966 	struct mlx5_ft_underlay_qp *uqp;
967 	int min_level = INT_MAX;
968 	int err = 0;
969 	u32 qpn;
970 
971 	if (root->root_ft)
972 		min_level = root->root_ft->level;
973 
974 	if (ft->level >= min_level)
975 		return 0;
976 
977 	if (list_empty(&root->underlay_qpns)) {
978 		/* Don't set any QPN (zero) in case QPN list is empty */
979 		qpn = 0;
980 		err = root->cmds->update_root_ft(root, ft, qpn, false);
981 	} else {
982 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
983 			qpn = uqp->qpn;
984 			err = root->cmds->update_root_ft(root, ft,
985 							 qpn, false);
986 			if (err)
987 				break;
988 		}
989 	}
990 
991 	if (err)
992 		mlx5_core_warn(root->dev,
993 			       "Update root flow table of id(%u) qpn(%d) failed\n",
994 			       ft->id, qpn);
995 	else
996 		root->root_ft = ft;
997 
998 	return err;
999 }
1000 
1001 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
1002 					 struct mlx5_flow_destination *dest)
1003 {
1004 	struct mlx5_flow_root_namespace *root;
1005 	struct mlx5_flow_table *ft;
1006 	struct mlx5_flow_group *fg;
1007 	struct fs_fte *fte;
1008 	int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1009 	int err = 0;
1010 
1011 	fs_get_obj(fte, rule->node.parent);
1012 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1013 		return -EINVAL;
1014 	down_write_ref_node(&fte->node, false);
1015 	fs_get_obj(fg, fte->node.parent);
1016 	fs_get_obj(ft, fg->node.parent);
1017 
1018 	memcpy(&rule->dest_attr, dest, sizeof(*dest));
1019 	root = find_root(&ft->node);
1020 	err = root->cmds->update_fte(root, ft, fg,
1021 				     modify_mask, fte);
1022 	up_write_ref_node(&fte->node, false);
1023 
1024 	return err;
1025 }
1026 
1027 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
1028 				 struct mlx5_flow_destination *new_dest,
1029 				 struct mlx5_flow_destination *old_dest)
1030 {
1031 	int i;
1032 
1033 	if (!old_dest) {
1034 		if (handle->num_rules != 1)
1035 			return -EINVAL;
1036 		return _mlx5_modify_rule_destination(handle->rule[0],
1037 						     new_dest);
1038 	}
1039 
1040 	for (i = 0; i < handle->num_rules; i++) {
1041 		if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
1042 			return _mlx5_modify_rule_destination(handle->rule[i],
1043 							     new_dest);
1044 	}
1045 
1046 	return -EINVAL;
1047 }
1048 
1049 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft  */
1050 static int connect_fwd_rules(struct mlx5_core_dev *dev,
1051 			     struct mlx5_flow_table *new_next_ft,
1052 			     struct mlx5_flow_table *old_next_ft)
1053 {
1054 	struct mlx5_flow_destination dest = {};
1055 	struct mlx5_flow_rule *iter;
1056 	int err = 0;
1057 
1058 	/* new_next_ft and old_next_ft could be NULL only
1059 	 * when we create/destroy the anchor flow table.
1060 	 */
1061 	if (!new_next_ft || !old_next_ft)
1062 		return 0;
1063 
1064 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1065 	dest.ft = new_next_ft;
1066 
1067 	mutex_lock(&old_next_ft->lock);
1068 	list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1069 	mutex_unlock(&old_next_ft->lock);
1070 	list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1071 		if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1072 		    iter->ft->ns == new_next_ft->ns)
1073 			continue;
1074 
1075 		err = _mlx5_modify_rule_destination(iter, &dest);
1076 		if (err)
1077 			pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1078 			       new_next_ft->id);
1079 	}
1080 	return 0;
1081 }
1082 
1083 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1084 			      struct fs_prio *prio)
1085 {
1086 	struct mlx5_flow_table *next_ft, *first_ft;
1087 	int err = 0;
1088 
1089 	/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1090 
1091 	first_ft = list_first_entry_or_null(&prio->node.children,
1092 					    struct mlx5_flow_table, node.list);
1093 	if (!first_ft || first_ft->level > ft->level) {
1094 		err = connect_prev_fts(dev, ft, prio);
1095 		if (err)
1096 			return err;
1097 
1098 		next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
1099 		err = connect_fwd_rules(dev, ft, next_ft);
1100 		if (err)
1101 			return err;
1102 	}
1103 
1104 	if (MLX5_CAP_FLOWTABLE(dev,
1105 			       flow_table_properties_nic_receive.modify_root))
1106 		err = update_root_ft_create(ft, prio);
1107 	return err;
1108 }
1109 
1110 static void list_add_flow_table(struct mlx5_flow_table *ft,
1111 				struct fs_prio *prio)
1112 {
1113 	struct list_head *prev = &prio->node.children;
1114 	struct mlx5_flow_table *iter;
1115 
1116 	fs_for_each_ft(iter, prio) {
1117 		if (iter->level > ft->level)
1118 			break;
1119 		prev = &iter->node.list;
1120 	}
1121 	list_add(&ft->node.list, prev);
1122 }
1123 
1124 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1125 							struct mlx5_flow_table_attr *ft_attr,
1126 							enum fs_flow_table_op_mod op_mod,
1127 							u16 vport)
1128 {
1129 	struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1130 	bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1131 	struct mlx5_flow_table *next_ft;
1132 	struct fs_prio *fs_prio = NULL;
1133 	struct mlx5_flow_table *ft;
1134 	int err;
1135 
1136 	if (!root) {
1137 		pr_err("mlx5: flow steering failed to find root of namespace\n");
1138 		return ERR_PTR(-ENODEV);
1139 	}
1140 
1141 	mutex_lock(&root->chain_lock);
1142 	fs_prio = find_prio(ns, ft_attr->prio);
1143 	if (!fs_prio) {
1144 		err = -EINVAL;
1145 		goto unlock_root;
1146 	}
1147 	if (!unmanaged) {
1148 		/* The level is related to the
1149 		 * priority level range.
1150 		 */
1151 		if (ft_attr->level >= fs_prio->num_levels) {
1152 			err = -ENOSPC;
1153 			goto unlock_root;
1154 		}
1155 
1156 		ft_attr->level += fs_prio->start_level;
1157 	}
1158 
1159 	/* The level is related to the
1160 	 * priority level range.
1161 	 */
1162 	ft = alloc_flow_table(ft_attr->level,
1163 			      vport,
1164 			      root->table_type,
1165 			      op_mod, ft_attr->flags);
1166 	if (IS_ERR(ft)) {
1167 		err = PTR_ERR(ft);
1168 		goto unlock_root;
1169 	}
1170 
1171 	tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1172 	next_ft = unmanaged ? ft_attr->next_ft :
1173 			      find_next_chained_ft(fs_prio);
1174 	ft->def_miss_action = ns->def_miss_action;
1175 	ft->ns = ns;
1176 	err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
1177 	if (err)
1178 		goto free_ft;
1179 
1180 	if (!unmanaged) {
1181 		err = connect_flow_table(root->dev, ft, fs_prio);
1182 		if (err)
1183 			goto destroy_ft;
1184 	}
1185 
1186 	ft->node.active = true;
1187 	down_write_ref_node(&fs_prio->node, false);
1188 	if (!unmanaged) {
1189 		tree_add_node(&ft->node, &fs_prio->node);
1190 		list_add_flow_table(ft, fs_prio);
1191 	} else {
1192 		ft->node.root = fs_prio->node.root;
1193 	}
1194 	fs_prio->num_ft++;
1195 	up_write_ref_node(&fs_prio->node, false);
1196 	mutex_unlock(&root->chain_lock);
1197 	trace_mlx5_fs_add_ft(ft);
1198 	return ft;
1199 destroy_ft:
1200 	root->cmds->destroy_flow_table(root, ft);
1201 free_ft:
1202 	rhltable_destroy(&ft->fgs_hash);
1203 	kfree(ft);
1204 unlock_root:
1205 	mutex_unlock(&root->chain_lock);
1206 	return ERR_PTR(err);
1207 }
1208 
1209 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1210 					       struct mlx5_flow_table_attr *ft_attr)
1211 {
1212 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1213 }
1214 EXPORT_SYMBOL(mlx5_create_flow_table);
1215 
1216 u32 mlx5_flow_table_id(struct mlx5_flow_table *ft)
1217 {
1218 	return ft->id;
1219 }
1220 EXPORT_SYMBOL(mlx5_flow_table_id);
1221 
1222 struct mlx5_flow_table *
1223 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1224 			     struct mlx5_flow_table_attr *ft_attr, u16 vport)
1225 {
1226 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1227 }
1228 
1229 struct mlx5_flow_table*
1230 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1231 				 int prio, u32 level)
1232 {
1233 	struct mlx5_flow_table_attr ft_attr = {};
1234 
1235 	ft_attr.level = level;
1236 	ft_attr.prio  = prio;
1237 	ft_attr.max_fte = 1;
1238 
1239 	return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1240 }
1241 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1242 
1243 #define MAX_FLOW_GROUP_SIZE BIT(24)
1244 struct mlx5_flow_table*
1245 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1246 				    struct mlx5_flow_table_attr *ft_attr)
1247 {
1248 	int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1249 	int max_num_groups = ft_attr->autogroup.max_num_groups;
1250 	struct mlx5_flow_table *ft;
1251 	int autogroups_max_fte;
1252 
1253 	ft = mlx5_create_flow_table(ns, ft_attr);
1254 	if (IS_ERR(ft))
1255 		return ft;
1256 
1257 	autogroups_max_fte = ft->max_fte - num_reserved_entries;
1258 	if (max_num_groups > autogroups_max_fte)
1259 		goto err_validate;
1260 	if (num_reserved_entries > ft->max_fte)
1261 		goto err_validate;
1262 
1263 	/* Align the number of groups according to the largest group size */
1264 	if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1265 		max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1266 
1267 	ft->autogroup.active = true;
1268 	ft->autogroup.required_groups = max_num_groups;
1269 	ft->autogroup.max_fte = autogroups_max_fte;
1270 	/* We save place for flow groups in addition to max types */
1271 	ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1272 
1273 	return ft;
1274 
1275 err_validate:
1276 	mlx5_destroy_flow_table(ft);
1277 	return ERR_PTR(-ENOSPC);
1278 }
1279 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1280 
1281 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1282 					       u32 *fg_in)
1283 {
1284 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1285 	void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1286 					    fg_in, match_criteria);
1287 	u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1288 					    fg_in,
1289 					    match_criteria_enable);
1290 	int start_index = MLX5_GET(create_flow_group_in, fg_in,
1291 				   start_flow_index);
1292 	int end_index = MLX5_GET(create_flow_group_in, fg_in,
1293 				 end_flow_index);
1294 	struct mlx5_flow_group *fg;
1295 	int err;
1296 
1297 	if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1298 		return ERR_PTR(-EPERM);
1299 
1300 	down_write_ref_node(&ft->node, false);
1301 	fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1302 				     start_index, end_index,
1303 				     ft->node.children.prev);
1304 	up_write_ref_node(&ft->node, false);
1305 	if (IS_ERR(fg))
1306 		return fg;
1307 
1308 	err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1309 	if (err) {
1310 		tree_put_node(&fg->node, false);
1311 		return ERR_PTR(err);
1312 	}
1313 	trace_mlx5_fs_add_fg(fg);
1314 	fg->node.active = true;
1315 
1316 	return fg;
1317 }
1318 EXPORT_SYMBOL(mlx5_create_flow_group);
1319 
1320 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1321 {
1322 	struct mlx5_flow_rule *rule;
1323 
1324 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1325 	if (!rule)
1326 		return NULL;
1327 
1328 	INIT_LIST_HEAD(&rule->next_ft);
1329 	rule->node.type = FS_TYPE_FLOW_DEST;
1330 	if (dest)
1331 		memcpy(&rule->dest_attr, dest, sizeof(*dest));
1332 	else
1333 		rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE;
1334 
1335 	return rule;
1336 }
1337 
1338 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1339 {
1340 	struct mlx5_flow_handle *handle;
1341 
1342 	handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1343 	if (!handle)
1344 		return NULL;
1345 
1346 	handle->num_rules = num_rules;
1347 
1348 	return handle;
1349 }
1350 
1351 static void destroy_flow_handle(struct fs_fte *fte,
1352 				struct mlx5_flow_handle *handle,
1353 				struct mlx5_flow_destination *dest,
1354 				int i)
1355 {
1356 	for (; --i >= 0;) {
1357 		if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1358 			fte->dests_size--;
1359 			list_del(&handle->rule[i]->node.list);
1360 			kfree(handle->rule[i]);
1361 		}
1362 	}
1363 	kfree(handle);
1364 }
1365 
1366 static struct mlx5_flow_handle *
1367 create_flow_handle(struct fs_fte *fte,
1368 		   struct mlx5_flow_destination *dest,
1369 		   int dest_num,
1370 		   int *modify_mask,
1371 		   bool *new_rule)
1372 {
1373 	struct mlx5_flow_handle *handle;
1374 	struct mlx5_flow_rule *rule = NULL;
1375 	static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1376 	static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1377 	int type;
1378 	int i = 0;
1379 
1380 	handle = alloc_handle((dest_num) ? dest_num : 1);
1381 	if (!handle)
1382 		return ERR_PTR(-ENOMEM);
1383 
1384 	do {
1385 		if (dest) {
1386 			rule = find_flow_rule(fte, dest + i);
1387 			if (rule) {
1388 				refcount_inc(&rule->node.refcount);
1389 				goto rule_found;
1390 			}
1391 		}
1392 
1393 		*new_rule = true;
1394 		rule = alloc_rule(dest + i);
1395 		if (!rule)
1396 			goto free_rules;
1397 
1398 		/* Add dest to dests list- we need flow tables to be in the
1399 		 * end of the list for forward to next prio rules.
1400 		 */
1401 		tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1402 		if (dest &&
1403 		    dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1404 			list_add(&rule->node.list, &fte->node.children);
1405 		else
1406 			list_add_tail(&rule->node.list, &fte->node.children);
1407 		if (dest) {
1408 			fte->dests_size++;
1409 
1410 			if (is_fwd_dest_type(dest[i].type))
1411 				fte->fwd_dests++;
1412 
1413 			type = dest[i].type ==
1414 				MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1415 			*modify_mask |= type ? count : dst;
1416 		}
1417 rule_found:
1418 		handle->rule[i] = rule;
1419 	} while (++i < dest_num);
1420 
1421 	return handle;
1422 
1423 free_rules:
1424 	destroy_flow_handle(fte, handle, dest, i);
1425 	return ERR_PTR(-ENOMEM);
1426 }
1427 
1428 /* fte should not be deleted while calling this function */
1429 static struct mlx5_flow_handle *
1430 add_rule_fte(struct fs_fte *fte,
1431 	     struct mlx5_flow_group *fg,
1432 	     struct mlx5_flow_destination *dest,
1433 	     int dest_num,
1434 	     bool update_action)
1435 {
1436 	struct mlx5_flow_root_namespace *root;
1437 	struct mlx5_flow_handle *handle;
1438 	struct mlx5_flow_table *ft;
1439 	int modify_mask = 0;
1440 	int err;
1441 	bool new_rule = false;
1442 
1443 	handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1444 				    &new_rule);
1445 	if (IS_ERR(handle) || !new_rule)
1446 		goto out;
1447 
1448 	if (update_action)
1449 		modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1450 
1451 	fs_get_obj(ft, fg->node.parent);
1452 	root = find_root(&fg->node);
1453 	if (!(fte->status & FS_FTE_STATUS_EXISTING))
1454 		err = root->cmds->create_fte(root, ft, fg, fte);
1455 	else
1456 		err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1457 	if (err)
1458 		goto free_handle;
1459 
1460 	fte->node.active = true;
1461 	fte->status |= FS_FTE_STATUS_EXISTING;
1462 	atomic_inc(&fg->node.version);
1463 
1464 out:
1465 	return handle;
1466 
1467 free_handle:
1468 	destroy_flow_handle(fte, handle, dest, handle->num_rules);
1469 	return ERR_PTR(err);
1470 }
1471 
1472 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table  *ft,
1473 						     const struct mlx5_flow_spec *spec)
1474 {
1475 	struct list_head *prev = &ft->node.children;
1476 	u32 max_fte = ft->autogroup.max_fte;
1477 	unsigned int candidate_index = 0;
1478 	unsigned int group_size = 0;
1479 	struct mlx5_flow_group *fg;
1480 
1481 	if (!ft->autogroup.active)
1482 		return ERR_PTR(-ENOENT);
1483 
1484 	if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1485 		group_size = ft->autogroup.group_size;
1486 
1487 	/*  max_fte == ft->autogroup.max_types */
1488 	if (group_size == 0)
1489 		group_size = 1;
1490 
1491 	/* sorted by start_index */
1492 	fs_for_each_fg(fg, ft) {
1493 		if (candidate_index + group_size > fg->start_index)
1494 			candidate_index = fg->start_index + fg->max_ftes;
1495 		else
1496 			break;
1497 		prev = &fg->node.list;
1498 	}
1499 
1500 	if (candidate_index + group_size > max_fte)
1501 		return ERR_PTR(-ENOSPC);
1502 
1503 	fg = alloc_insert_flow_group(ft,
1504 				     spec->match_criteria_enable,
1505 				     spec->match_criteria,
1506 				     candidate_index,
1507 				     candidate_index + group_size - 1,
1508 				     prev);
1509 	if (IS_ERR(fg))
1510 		goto out;
1511 
1512 	if (group_size == ft->autogroup.group_size)
1513 		ft->autogroup.num_groups++;
1514 
1515 out:
1516 	return fg;
1517 }
1518 
1519 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1520 				  struct mlx5_flow_group *fg)
1521 {
1522 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1523 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1524 	void *match_criteria_addr;
1525 	u8 src_esw_owner_mask_on;
1526 	void *misc;
1527 	int err;
1528 	u32 *in;
1529 
1530 	in = kvzalloc(inlen, GFP_KERNEL);
1531 	if (!in)
1532 		return -ENOMEM;
1533 
1534 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1535 		 fg->mask.match_criteria_enable);
1536 	MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1537 	MLX5_SET(create_flow_group_in, in, end_flow_index,   fg->start_index +
1538 		 fg->max_ftes - 1);
1539 
1540 	misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1541 			    misc_parameters);
1542 	src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1543 					 source_eswitch_owner_vhca_id);
1544 	MLX5_SET(create_flow_group_in, in,
1545 		 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1546 
1547 	match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1548 					   in, match_criteria);
1549 	memcpy(match_criteria_addr, fg->mask.match_criteria,
1550 	       sizeof(fg->mask.match_criteria));
1551 
1552 	err = root->cmds->create_flow_group(root, ft, in, fg);
1553 	if (!err) {
1554 		fg->node.active = true;
1555 		trace_mlx5_fs_add_fg(fg);
1556 	}
1557 
1558 	kvfree(in);
1559 	return err;
1560 }
1561 
1562 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1563 				struct mlx5_flow_destination *d2)
1564 {
1565 	if (d1->type == d2->type) {
1566 		if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
1567 		      d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
1568 		     d1->vport.num == d2->vport.num &&
1569 		     d1->vport.flags == d2->vport.flags &&
1570 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1571 		      (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1572 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1573 		      (d1->vport.pkt_reformat->id ==
1574 		       d2->vport.pkt_reformat->id) : true)) ||
1575 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1576 		     d1->ft == d2->ft) ||
1577 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1578 		     d1->tir_num == d2->tir_num) ||
1579 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1580 		     d1->ft_num == d2->ft_num) ||
1581 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
1582 		     d1->sampler_id == d2->sampler_id) ||
1583 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_RANGE &&
1584 		     d1->range.field == d2->range.field &&
1585 		     d1->range.hit_ft == d2->range.hit_ft &&
1586 		     d1->range.miss_ft == d2->range.miss_ft &&
1587 		     d1->range.min == d2->range.min &&
1588 		     d1->range.max == d2->range.max))
1589 			return true;
1590 	}
1591 
1592 	return false;
1593 }
1594 
1595 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1596 					     struct mlx5_flow_destination *dest)
1597 {
1598 	struct mlx5_flow_rule *rule;
1599 
1600 	list_for_each_entry(rule, &fte->node.children, node.list) {
1601 		if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1602 			return rule;
1603 	}
1604 	return NULL;
1605 }
1606 
1607 static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
1608 					   const struct mlx5_fs_vlan *vlan1)
1609 {
1610 	return vlan0->ethtype != vlan1->ethtype ||
1611 	       vlan0->vid != vlan1->vid ||
1612 	       vlan0->prio != vlan1->prio;
1613 }
1614 
1615 static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
1616 				      const struct mlx5_flow_act *act2)
1617 {
1618 	u32 action1 = act1->action;
1619 	u32 action2 = act2->action;
1620 	u32 xored_actions;
1621 
1622 	xored_actions = action1 ^ action2;
1623 
1624 	/* if one rule only wants to count, it's ok */
1625 	if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1626 	    action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1627 		return false;
1628 
1629 	if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP  |
1630 			     MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1631 			     MLX5_FLOW_CONTEXT_ACTION_DECAP |
1632 			     MLX5_FLOW_CONTEXT_ACTION_MOD_HDR  |
1633 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1634 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1635 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1636 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1637 		return true;
1638 
1639 	if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
1640 	    act1->pkt_reformat != act2->pkt_reformat)
1641 		return true;
1642 
1643 	if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1644 	    act1->modify_hdr != act2->modify_hdr)
1645 		return true;
1646 
1647 	if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
1648 	    check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
1649 		return true;
1650 
1651 	if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
1652 	    check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
1653 		return true;
1654 
1655 	return false;
1656 }
1657 
1658 static int check_conflicting_ftes(struct fs_fte *fte,
1659 				  const struct mlx5_flow_context *flow_context,
1660 				  const struct mlx5_flow_act *flow_act)
1661 {
1662 	if (check_conflicting_actions(flow_act, &fte->action)) {
1663 		mlx5_core_warn(get_dev(&fte->node),
1664 			       "Found two FTEs with conflicting actions\n");
1665 		return -EEXIST;
1666 	}
1667 
1668 	if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1669 	    fte->flow_context.flow_tag != flow_context->flow_tag) {
1670 		mlx5_core_warn(get_dev(&fte->node),
1671 			       "FTE flow tag %u already exists with different flow tag %u\n",
1672 			       fte->flow_context.flow_tag,
1673 			       flow_context->flow_tag);
1674 		return -EEXIST;
1675 	}
1676 
1677 	return 0;
1678 }
1679 
1680 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1681 					    const struct mlx5_flow_spec *spec,
1682 					    struct mlx5_flow_act *flow_act,
1683 					    struct mlx5_flow_destination *dest,
1684 					    int dest_num,
1685 					    struct fs_fte *fte)
1686 {
1687 	struct mlx5_flow_handle *handle;
1688 	int old_action;
1689 	int i;
1690 	int ret;
1691 
1692 	ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1693 	if (ret)
1694 		return ERR_PTR(ret);
1695 
1696 	old_action = fte->action.action;
1697 	fte->action.action |= flow_act->action;
1698 	handle = add_rule_fte(fte, fg, dest, dest_num,
1699 			      old_action != flow_act->action);
1700 	if (IS_ERR(handle)) {
1701 		fte->action.action = old_action;
1702 		return handle;
1703 	}
1704 	trace_mlx5_fs_set_fte(fte, false);
1705 
1706 	for (i = 0; i < handle->num_rules; i++) {
1707 		if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1708 			tree_add_node(&handle->rule[i]->node, &fte->node);
1709 			trace_mlx5_fs_add_rule(handle->rule[i]);
1710 		}
1711 	}
1712 	return handle;
1713 }
1714 
1715 static bool counter_is_valid(u32 action)
1716 {
1717 	return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1718 			  MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1719 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1720 }
1721 
1722 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1723 			  struct mlx5_flow_act *flow_act,
1724 			  struct mlx5_flow_table *ft)
1725 {
1726 	bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1727 	u32 action = flow_act->action;
1728 
1729 	if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1730 		return counter_is_valid(action);
1731 
1732 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1733 		return true;
1734 
1735 	if (ignore_level) {
1736 		if (ft->type != FS_FT_FDB &&
1737 		    ft->type != FS_FT_NIC_RX)
1738 			return false;
1739 
1740 		if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1741 		    ft->type != dest->ft->type)
1742 			return false;
1743 	}
1744 
1745 	if (!dest || ((dest->type ==
1746 	    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1747 	    (dest->ft->level <= ft->level && !ignore_level)))
1748 		return false;
1749 	return true;
1750 }
1751 
1752 struct match_list {
1753 	struct list_head	list;
1754 	struct mlx5_flow_group *g;
1755 };
1756 
1757 static void free_match_list(struct match_list *head, bool ft_locked)
1758 {
1759 	struct match_list *iter, *match_tmp;
1760 
1761 	list_for_each_entry_safe(iter, match_tmp, &head->list,
1762 				 list) {
1763 		tree_put_node(&iter->g->node, ft_locked);
1764 		list_del(&iter->list);
1765 		kfree(iter);
1766 	}
1767 }
1768 
1769 static int build_match_list(struct match_list *match_head,
1770 			    struct mlx5_flow_table *ft,
1771 			    const struct mlx5_flow_spec *spec,
1772 			    struct mlx5_flow_group *fg,
1773 			    bool ft_locked)
1774 {
1775 	struct rhlist_head *tmp, *list;
1776 	struct mlx5_flow_group *g;
1777 	int err = 0;
1778 
1779 	rcu_read_lock();
1780 	INIT_LIST_HEAD(&match_head->list);
1781 	/* Collect all fgs which has a matching match_criteria */
1782 	list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1783 	/* RCU is atomic, we can't execute FW commands here */
1784 	rhl_for_each_entry_rcu(g, tmp, list, hash) {
1785 		struct match_list *curr_match;
1786 
1787 		if (fg && fg != g)
1788 			continue;
1789 
1790 		if (unlikely(!tree_get_node(&g->node)))
1791 			continue;
1792 
1793 		curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1794 		if (!curr_match) {
1795 			rcu_read_unlock();
1796 			free_match_list(match_head, ft_locked);
1797 			return -ENOMEM;
1798 		}
1799 		curr_match->g = g;
1800 		list_add_tail(&curr_match->list, &match_head->list);
1801 	}
1802 	rcu_read_unlock();
1803 	return err;
1804 }
1805 
1806 static u64 matched_fgs_get_version(struct list_head *match_head)
1807 {
1808 	struct match_list *iter;
1809 	u64 version = 0;
1810 
1811 	list_for_each_entry(iter, match_head, list)
1812 		version += (u64)atomic_read(&iter->g->node.version);
1813 	return version;
1814 }
1815 
1816 static struct fs_fte *
1817 lookup_fte_locked(struct mlx5_flow_group *g,
1818 		  const u32 *match_value,
1819 		  bool take_write)
1820 {
1821 	struct fs_fte *fte_tmp;
1822 
1823 	if (take_write)
1824 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1825 	else
1826 		nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1827 	fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1828 					 rhash_fte);
1829 	if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1830 		fte_tmp = NULL;
1831 		goto out;
1832 	}
1833 	if (!fte_tmp->node.active) {
1834 		tree_put_node(&fte_tmp->node, false);
1835 		fte_tmp = NULL;
1836 		goto out;
1837 	}
1838 
1839 	nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1840 out:
1841 	if (take_write)
1842 		up_write_ref_node(&g->node, false);
1843 	else
1844 		up_read_ref_node(&g->node);
1845 	return fte_tmp;
1846 }
1847 
1848 static struct mlx5_flow_handle *
1849 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1850 		       struct list_head *match_head,
1851 		       const struct mlx5_flow_spec *spec,
1852 		       struct mlx5_flow_act *flow_act,
1853 		       struct mlx5_flow_destination *dest,
1854 		       int dest_num,
1855 		       int ft_version)
1856 {
1857 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1858 	struct mlx5_flow_group *g;
1859 	struct mlx5_flow_handle *rule;
1860 	struct match_list *iter;
1861 	bool take_write = false;
1862 	struct fs_fte *fte;
1863 	u64  version = 0;
1864 	int err;
1865 
1866 	fte = alloc_fte(ft, spec, flow_act);
1867 	if (IS_ERR(fte))
1868 		return  ERR_PTR(-ENOMEM);
1869 
1870 search_again_locked:
1871 	if (flow_act->flags & FLOW_ACT_NO_APPEND)
1872 		goto skip_search;
1873 	version = matched_fgs_get_version(match_head);
1874 	/* Try to find an fte with identical match value and attempt update its
1875 	 * action.
1876 	 */
1877 	list_for_each_entry(iter, match_head, list) {
1878 		struct fs_fte *fte_tmp;
1879 
1880 		g = iter->g;
1881 		fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1882 		if (!fte_tmp)
1883 			continue;
1884 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1885 		/* No error check needed here, because insert_fte() is not called */
1886 		up_write_ref_node(&fte_tmp->node, false);
1887 		tree_put_node(&fte_tmp->node, false);
1888 		kmem_cache_free(steering->ftes_cache, fte);
1889 		return rule;
1890 	}
1891 
1892 skip_search:
1893 	/* No group with matching fte found, or we skipped the search.
1894 	 * Try to add a new fte to any matching fg.
1895 	 */
1896 
1897 	/* Check the ft version, for case that new flow group
1898 	 * was added while the fgs weren't locked
1899 	 */
1900 	if (atomic_read(&ft->node.version) != ft_version) {
1901 		rule = ERR_PTR(-EAGAIN);
1902 		goto out;
1903 	}
1904 
1905 	/* Check the fgs version. If version have changed it could be that an
1906 	 * FTE with the same match value was added while the fgs weren't
1907 	 * locked.
1908 	 */
1909 	if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1910 	    version != matched_fgs_get_version(match_head)) {
1911 		take_write = true;
1912 		goto search_again_locked;
1913 	}
1914 
1915 	list_for_each_entry(iter, match_head, list) {
1916 		g = iter->g;
1917 
1918 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1919 
1920 		if (!g->node.active) {
1921 			up_write_ref_node(&g->node, false);
1922 			continue;
1923 		}
1924 
1925 		err = insert_fte(g, fte);
1926 		if (err) {
1927 			up_write_ref_node(&g->node, false);
1928 			if (err == -ENOSPC)
1929 				continue;
1930 			kmem_cache_free(steering->ftes_cache, fte);
1931 			return ERR_PTR(err);
1932 		}
1933 
1934 		nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1935 		up_write_ref_node(&g->node, false);
1936 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1937 		up_write_ref_node(&fte->node, false);
1938 		if (IS_ERR(rule))
1939 			tree_put_node(&fte->node, false);
1940 		return rule;
1941 	}
1942 	rule = ERR_PTR(-ENOENT);
1943 out:
1944 	kmem_cache_free(steering->ftes_cache, fte);
1945 	return rule;
1946 }
1947 
1948 static struct mlx5_flow_handle *
1949 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1950 		     const struct mlx5_flow_spec *spec,
1951 		     struct mlx5_flow_act *flow_act,
1952 		     struct mlx5_flow_destination *dest,
1953 		     int dest_num)
1954 
1955 {
1956 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1957 	struct mlx5_flow_handle *rule;
1958 	struct match_list match_head;
1959 	struct mlx5_flow_group *g;
1960 	bool take_write = false;
1961 	struct fs_fte *fte;
1962 	int version;
1963 	int err;
1964 	int i;
1965 
1966 	if (!check_valid_spec(spec))
1967 		return ERR_PTR(-EINVAL);
1968 
1969 	if (flow_act->fg && ft->autogroup.active)
1970 		return ERR_PTR(-EINVAL);
1971 
1972 	if (dest && dest_num <= 0)
1973 		return ERR_PTR(-EINVAL);
1974 
1975 	for (i = 0; i < dest_num; i++) {
1976 		if (!dest_is_valid(&dest[i], flow_act, ft))
1977 			return ERR_PTR(-EINVAL);
1978 	}
1979 	nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1980 search_again_locked:
1981 	version = atomic_read(&ft->node.version);
1982 
1983 	/* Collect all fgs which has a matching match_criteria */
1984 	err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
1985 	if (err) {
1986 		if (take_write)
1987 			up_write_ref_node(&ft->node, false);
1988 		else
1989 			up_read_ref_node(&ft->node);
1990 		return ERR_PTR(err);
1991 	}
1992 
1993 	if (!take_write)
1994 		up_read_ref_node(&ft->node);
1995 
1996 	rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1997 				      dest_num, version);
1998 	free_match_list(&match_head, take_write);
1999 	if (!IS_ERR(rule) ||
2000 	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
2001 		if (take_write)
2002 			up_write_ref_node(&ft->node, false);
2003 		return rule;
2004 	}
2005 
2006 	if (!take_write) {
2007 		nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
2008 		take_write = true;
2009 	}
2010 
2011 	if (PTR_ERR(rule) == -EAGAIN ||
2012 	    version != atomic_read(&ft->node.version))
2013 		goto search_again_locked;
2014 
2015 	g = alloc_auto_flow_group(ft, spec);
2016 	if (IS_ERR(g)) {
2017 		rule = ERR_CAST(g);
2018 		up_write_ref_node(&ft->node, false);
2019 		return rule;
2020 	}
2021 
2022 	fte = alloc_fte(ft, spec, flow_act);
2023 	if (IS_ERR(fte)) {
2024 		up_write_ref_node(&ft->node, false);
2025 		err = PTR_ERR(fte);
2026 		goto err_alloc_fte;
2027 	}
2028 
2029 	nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
2030 	up_write_ref_node(&ft->node, false);
2031 
2032 	err = create_auto_flow_group(ft, g);
2033 	if (err)
2034 		goto err_release_fg;
2035 
2036 	err = insert_fte(g, fte);
2037 	if (err)
2038 		goto err_release_fg;
2039 
2040 	nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
2041 	up_write_ref_node(&g->node, false);
2042 	rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
2043 	up_write_ref_node(&fte->node, false);
2044 	if (IS_ERR(rule))
2045 		tree_put_node(&fte->node, false);
2046 	tree_put_node(&g->node, false);
2047 	return rule;
2048 
2049 err_release_fg:
2050 	up_write_ref_node(&g->node, false);
2051 	kmem_cache_free(steering->ftes_cache, fte);
2052 err_alloc_fte:
2053 	tree_put_node(&g->node, false);
2054 	return ERR_PTR(err);
2055 }
2056 
2057 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
2058 {
2059 	return ((ft->type == FS_FT_NIC_RX) &&
2060 		(MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
2061 }
2062 
2063 struct mlx5_flow_handle *
2064 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
2065 		    const struct mlx5_flow_spec *spec,
2066 		    struct mlx5_flow_act *flow_act,
2067 		    struct mlx5_flow_destination *dest,
2068 		    int num_dest)
2069 {
2070 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2071 	static const struct mlx5_flow_spec zero_spec = {};
2072 	struct mlx5_flow_destination *gen_dest = NULL;
2073 	struct mlx5_flow_table *next_ft = NULL;
2074 	struct mlx5_flow_handle *handle = NULL;
2075 	u32 sw_action = flow_act->action;
2076 	int i;
2077 
2078 	if (!spec)
2079 		spec = &zero_spec;
2080 
2081 	if (!is_fwd_next_action(sw_action))
2082 		return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2083 
2084 	if (!fwd_next_prio_supported(ft))
2085 		return ERR_PTR(-EOPNOTSUPP);
2086 
2087 	mutex_lock(&root->chain_lock);
2088 	next_ft = find_next_fwd_ft(ft, flow_act);
2089 	if (!next_ft) {
2090 		handle = ERR_PTR(-EOPNOTSUPP);
2091 		goto unlock;
2092 	}
2093 
2094 	gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2095 			   GFP_KERNEL);
2096 	if (!gen_dest) {
2097 		handle = ERR_PTR(-ENOMEM);
2098 		goto unlock;
2099 	}
2100 	for (i = 0; i < num_dest; i++)
2101 		gen_dest[i] = dest[i];
2102 	gen_dest[i].type =
2103 		MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2104 	gen_dest[i].ft = next_ft;
2105 	dest = gen_dest;
2106 	num_dest++;
2107 	flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2108 			      MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
2109 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2110 	handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2111 	if (IS_ERR(handle))
2112 		goto unlock;
2113 
2114 	if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2115 		mutex_lock(&next_ft->lock);
2116 		list_add(&handle->rule[num_dest - 1]->next_ft,
2117 			 &next_ft->fwd_rules);
2118 		mutex_unlock(&next_ft->lock);
2119 		handle->rule[num_dest - 1]->sw_action = sw_action;
2120 		handle->rule[num_dest - 1]->ft = ft;
2121 	}
2122 unlock:
2123 	mutex_unlock(&root->chain_lock);
2124 	kfree(gen_dest);
2125 	return handle;
2126 }
2127 EXPORT_SYMBOL(mlx5_add_flow_rules);
2128 
2129 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2130 {
2131 	struct fs_fte *fte;
2132 	int i;
2133 
2134 	/* In order to consolidate the HW changes we lock the FTE for other
2135 	 * changes, and increase its refcount, in order not to perform the
2136 	 * "del" functions of the FTE. Will handle them here.
2137 	 * The removal of the rules is done under locked FTE.
2138 	 * After removing all the handle's rules, if there are remaining
2139 	 * rules, it means we just need to modify the FTE in FW, and
2140 	 * unlock/decrease the refcount we increased before.
2141 	 * Otherwise, it means the FTE should be deleted. First delete the
2142 	 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2143 	 * the FTE, which will handle the last decrease of the refcount, as
2144 	 * well as required handling of its parent.
2145 	 */
2146 	fs_get_obj(fte, handle->rule[0]->node.parent);
2147 	down_write_ref_node(&fte->node, false);
2148 	for (i = handle->num_rules - 1; i >= 0; i--)
2149 		tree_remove_node(&handle->rule[i]->node, true);
2150 	if (list_empty(&fte->node.children)) {
2151 		fte->node.del_hw_func(&fte->node);
2152 		/* Avoid double call to del_hw_fte */
2153 		fte->node.del_hw_func = NULL;
2154 		up_write_ref_node(&fte->node, false);
2155 		tree_put_node(&fte->node, false);
2156 	} else if (fte->dests_size) {
2157 		if (fte->modify_mask)
2158 			modify_fte(fte);
2159 		up_write_ref_node(&fte->node, false);
2160 	} else {
2161 		up_write_ref_node(&fte->node, false);
2162 	}
2163 	kfree(handle);
2164 }
2165 EXPORT_SYMBOL(mlx5_del_flow_rules);
2166 
2167 /* Assuming prio->node.children(flow tables) is sorted by level */
2168 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2169 {
2170 	struct fs_prio *prio;
2171 
2172 	fs_get_obj(prio, ft->node.parent);
2173 
2174 	if (!list_is_last(&ft->node.list, &prio->node.children))
2175 		return list_next_entry(ft, node.list);
2176 	return find_next_chained_ft(prio);
2177 }
2178 
2179 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2180 {
2181 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2182 	struct mlx5_ft_underlay_qp *uqp;
2183 	struct mlx5_flow_table *new_root_ft = NULL;
2184 	int err = 0;
2185 	u32 qpn;
2186 
2187 	if (root->root_ft != ft)
2188 		return 0;
2189 
2190 	new_root_ft = find_next_ft(ft);
2191 	if (!new_root_ft) {
2192 		root->root_ft = NULL;
2193 		return 0;
2194 	}
2195 
2196 	if (list_empty(&root->underlay_qpns)) {
2197 		/* Don't set any QPN (zero) in case QPN list is empty */
2198 		qpn = 0;
2199 		err = root->cmds->update_root_ft(root, new_root_ft,
2200 						 qpn, false);
2201 	} else {
2202 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
2203 			qpn = uqp->qpn;
2204 			err = root->cmds->update_root_ft(root,
2205 							 new_root_ft, qpn,
2206 							 false);
2207 			if (err)
2208 				break;
2209 		}
2210 	}
2211 
2212 	if (err)
2213 		mlx5_core_warn(root->dev,
2214 			       "Update root flow table of id(%u) qpn(%d) failed\n",
2215 			       ft->id, qpn);
2216 	else
2217 		root->root_ft = new_root_ft;
2218 
2219 	return 0;
2220 }
2221 
2222 /* Connect flow table from previous priority to
2223  * the next flow table.
2224  */
2225 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2226 {
2227 	struct mlx5_core_dev *dev = get_dev(&ft->node);
2228 	struct mlx5_flow_table *next_ft;
2229 	struct fs_prio *prio;
2230 	int err = 0;
2231 
2232 	err = update_root_ft_destroy(ft);
2233 	if (err)
2234 		return err;
2235 
2236 	fs_get_obj(prio, ft->node.parent);
2237 	if  (!(list_first_entry(&prio->node.children,
2238 				struct mlx5_flow_table,
2239 				node.list) == ft))
2240 		return 0;
2241 
2242 	next_ft = find_next_ft(ft);
2243 	err = connect_fwd_rules(dev, next_ft, ft);
2244 	if (err)
2245 		return err;
2246 
2247 	err = connect_prev_fts(dev, next_ft, prio);
2248 	if (err)
2249 		mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2250 			       ft->id);
2251 	return err;
2252 }
2253 
2254 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2255 {
2256 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2257 	int err = 0;
2258 
2259 	mutex_lock(&root->chain_lock);
2260 	if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2261 		err = disconnect_flow_table(ft);
2262 	if (err) {
2263 		mutex_unlock(&root->chain_lock);
2264 		return err;
2265 	}
2266 	if (tree_remove_node(&ft->node, false))
2267 		mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2268 			       ft->id);
2269 	mutex_unlock(&root->chain_lock);
2270 
2271 	return err;
2272 }
2273 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2274 
2275 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2276 {
2277 	if (tree_remove_node(&fg->node, false))
2278 		mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2279 			       fg->id);
2280 }
2281 EXPORT_SYMBOL(mlx5_destroy_flow_group);
2282 
2283 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2284 						int n)
2285 {
2286 	struct mlx5_flow_steering *steering = dev->priv.steering;
2287 
2288 	if (!steering || !steering->fdb_sub_ns)
2289 		return NULL;
2290 
2291 	return steering->fdb_sub_ns[n];
2292 }
2293 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2294 
2295 static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
2296 {
2297 	switch (type) {
2298 	case MLX5_FLOW_NAMESPACE_BYPASS:
2299 	case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
2300 	case MLX5_FLOW_NAMESPACE_LAG:
2301 	case MLX5_FLOW_NAMESPACE_OFFLOADS:
2302 	case MLX5_FLOW_NAMESPACE_ETHTOOL:
2303 	case MLX5_FLOW_NAMESPACE_KERNEL:
2304 	case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2305 	case MLX5_FLOW_NAMESPACE_ANCHOR:
2306 		return true;
2307 	default:
2308 		return false;
2309 	}
2310 }
2311 
2312 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2313 						    enum mlx5_flow_namespace_type type)
2314 {
2315 	struct mlx5_flow_steering *steering = dev->priv.steering;
2316 	struct mlx5_flow_root_namespace *root_ns;
2317 	int prio = 0;
2318 	struct fs_prio *fs_prio;
2319 	struct mlx5_flow_namespace *ns;
2320 
2321 	if (!steering)
2322 		return NULL;
2323 
2324 	switch (type) {
2325 	case MLX5_FLOW_NAMESPACE_FDB:
2326 		if (steering->fdb_root_ns)
2327 			return &steering->fdb_root_ns->ns;
2328 		return NULL;
2329 	case MLX5_FLOW_NAMESPACE_PORT_SEL:
2330 		if (steering->port_sel_root_ns)
2331 			return &steering->port_sel_root_ns->ns;
2332 		return NULL;
2333 	case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2334 		if (steering->sniffer_rx_root_ns)
2335 			return &steering->sniffer_rx_root_ns->ns;
2336 		return NULL;
2337 	case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2338 		if (steering->sniffer_tx_root_ns)
2339 			return &steering->sniffer_tx_root_ns->ns;
2340 		return NULL;
2341 	case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
2342 		root_ns = steering->fdb_root_ns;
2343 		prio =  FDB_BYPASS_PATH;
2344 		break;
2345 	case MLX5_FLOW_NAMESPACE_EGRESS:
2346 	case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
2347 	case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
2348 		root_ns = steering->egress_root_ns;
2349 		prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2350 		break;
2351 	case MLX5_FLOW_NAMESPACE_RDMA_RX:
2352 		root_ns = steering->rdma_rx_root_ns;
2353 		prio = RDMA_RX_BYPASS_PRIO;
2354 		break;
2355 	case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
2356 		root_ns = steering->rdma_rx_root_ns;
2357 		prio = RDMA_RX_KERNEL_PRIO;
2358 		break;
2359 	case MLX5_FLOW_NAMESPACE_RDMA_TX:
2360 		root_ns = steering->rdma_tx_root_ns;
2361 		break;
2362 	case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
2363 		root_ns = steering->rdma_rx_root_ns;
2364 		prio = RDMA_RX_COUNTERS_PRIO;
2365 		break;
2366 	case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
2367 		root_ns = steering->rdma_tx_root_ns;
2368 		prio = RDMA_TX_COUNTERS_PRIO;
2369 		break;
2370 	default: /* Must be NIC RX */
2371 		WARN_ON(!is_nic_rx_ns(type));
2372 		root_ns = steering->root_ns;
2373 		prio = type;
2374 		break;
2375 	}
2376 
2377 	if (!root_ns)
2378 		return NULL;
2379 
2380 	fs_prio = find_prio(&root_ns->ns, prio);
2381 	if (!fs_prio)
2382 		return NULL;
2383 
2384 	ns = list_first_entry(&fs_prio->node.children,
2385 			      typeof(*ns),
2386 			      node.list);
2387 
2388 	return ns;
2389 }
2390 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2391 
2392 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2393 							      enum mlx5_flow_namespace_type type,
2394 							      int vport)
2395 {
2396 	struct mlx5_flow_steering *steering = dev->priv.steering;
2397 
2398 	if (!steering)
2399 		return NULL;
2400 
2401 	switch (type) {
2402 	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2403 		if (vport >= steering->esw_egress_acl_vports)
2404 			return NULL;
2405 		if (steering->esw_egress_root_ns &&
2406 		    steering->esw_egress_root_ns[vport])
2407 			return &steering->esw_egress_root_ns[vport]->ns;
2408 		else
2409 			return NULL;
2410 	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2411 		if (vport >= steering->esw_ingress_acl_vports)
2412 			return NULL;
2413 		if (steering->esw_ingress_root_ns &&
2414 		    steering->esw_ingress_root_ns[vport])
2415 			return &steering->esw_ingress_root_ns[vport]->ns;
2416 		else
2417 			return NULL;
2418 	default:
2419 		return NULL;
2420 	}
2421 }
2422 
2423 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2424 				       unsigned int prio,
2425 				       int num_levels,
2426 				       enum fs_node_type type)
2427 {
2428 	struct fs_prio *fs_prio;
2429 
2430 	fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2431 	if (!fs_prio)
2432 		return ERR_PTR(-ENOMEM);
2433 
2434 	fs_prio->node.type = type;
2435 	tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2436 	tree_add_node(&fs_prio->node, &ns->node);
2437 	fs_prio->num_levels = num_levels;
2438 	fs_prio->prio = prio;
2439 	list_add_tail(&fs_prio->node.list, &ns->node.children);
2440 
2441 	return fs_prio;
2442 }
2443 
2444 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2445 					      unsigned int prio,
2446 					      int num_levels)
2447 {
2448 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2449 }
2450 
2451 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2452 				      unsigned int prio, int num_levels)
2453 {
2454 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2455 }
2456 
2457 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2458 						     *ns)
2459 {
2460 	ns->node.type = FS_TYPE_NAMESPACE;
2461 
2462 	return ns;
2463 }
2464 
2465 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2466 						       int def_miss_act)
2467 {
2468 	struct mlx5_flow_namespace	*ns;
2469 
2470 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2471 	if (!ns)
2472 		return ERR_PTR(-ENOMEM);
2473 
2474 	fs_init_namespace(ns);
2475 	ns->def_miss_action = def_miss_act;
2476 	tree_init_node(&ns->node, NULL, del_sw_ns);
2477 	tree_add_node(&ns->node, &prio->node);
2478 	list_add_tail(&ns->node.list, &prio->node.children);
2479 
2480 	return ns;
2481 }
2482 
2483 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2484 			     struct init_tree_node *prio_metadata)
2485 {
2486 	struct fs_prio *fs_prio;
2487 	int i;
2488 
2489 	for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2490 		fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2491 		if (IS_ERR(fs_prio))
2492 			return PTR_ERR(fs_prio);
2493 	}
2494 	return 0;
2495 }
2496 
2497 #define FLOW_TABLE_BIT_SZ 1
2498 #define GET_FLOW_TABLE_CAP(dev, offset) \
2499 	((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) +	\
2500 			offset / 32)) >>					\
2501 	  (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2502 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2503 {
2504 	int i;
2505 
2506 	for (i = 0; i < caps->arr_sz; i++) {
2507 		if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2508 			return false;
2509 	}
2510 	return true;
2511 }
2512 
2513 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2514 				    struct init_tree_node *init_node,
2515 				    struct fs_node *fs_parent_node,
2516 				    struct init_tree_node *init_parent_node,
2517 				    int prio)
2518 {
2519 	int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2520 					      flow_table_properties_nic_receive.
2521 					      max_ft_level);
2522 	struct mlx5_flow_namespace *fs_ns;
2523 	struct fs_prio *fs_prio;
2524 	struct fs_node *base;
2525 	int i;
2526 	int err;
2527 
2528 	if (init_node->type == FS_TYPE_PRIO) {
2529 		if ((init_node->min_ft_level > max_ft_level) ||
2530 		    !has_required_caps(steering->dev, &init_node->caps))
2531 			return 0;
2532 
2533 		fs_get_obj(fs_ns, fs_parent_node);
2534 		if (init_node->num_leaf_prios)
2535 			return create_leaf_prios(fs_ns, prio, init_node);
2536 		fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2537 		if (IS_ERR(fs_prio))
2538 			return PTR_ERR(fs_prio);
2539 		base = &fs_prio->node;
2540 	} else if (init_node->type == FS_TYPE_NAMESPACE) {
2541 		fs_get_obj(fs_prio, fs_parent_node);
2542 		fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2543 		if (IS_ERR(fs_ns))
2544 			return PTR_ERR(fs_ns);
2545 		base = &fs_ns->node;
2546 	} else {
2547 		return -EINVAL;
2548 	}
2549 	prio = 0;
2550 	for (i = 0; i < init_node->ar_size; i++) {
2551 		err = init_root_tree_recursive(steering, &init_node->children[i],
2552 					       base, init_node, prio);
2553 		if (err)
2554 			return err;
2555 		if (init_node->children[i].type == FS_TYPE_PRIO &&
2556 		    init_node->children[i].num_leaf_prios) {
2557 			prio += init_node->children[i].num_leaf_prios;
2558 		}
2559 	}
2560 
2561 	return 0;
2562 }
2563 
2564 static int init_root_tree(struct mlx5_flow_steering *steering,
2565 			  struct init_tree_node *init_node,
2566 			  struct fs_node *fs_parent_node)
2567 {
2568 	int err;
2569 	int i;
2570 
2571 	for (i = 0; i < init_node->ar_size; i++) {
2572 		err = init_root_tree_recursive(steering, &init_node->children[i],
2573 					       fs_parent_node,
2574 					       init_node, i);
2575 		if (err)
2576 			return err;
2577 	}
2578 	return 0;
2579 }
2580 
2581 static void del_sw_root_ns(struct fs_node *node)
2582 {
2583 	struct mlx5_flow_root_namespace *root_ns;
2584 	struct mlx5_flow_namespace *ns;
2585 
2586 	fs_get_obj(ns, node);
2587 	root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2588 	mutex_destroy(&root_ns->chain_lock);
2589 	kfree(node);
2590 }
2591 
2592 static struct mlx5_flow_root_namespace
2593 *create_root_ns(struct mlx5_flow_steering *steering,
2594 		enum fs_flow_table_type table_type)
2595 {
2596 	const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2597 	struct mlx5_flow_root_namespace *root_ns;
2598 	struct mlx5_flow_namespace *ns;
2599 
2600 	/* Create the root namespace */
2601 	root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2602 	if (!root_ns)
2603 		return NULL;
2604 
2605 	root_ns->dev = steering->dev;
2606 	root_ns->table_type = table_type;
2607 	root_ns->cmds = cmds;
2608 
2609 	INIT_LIST_HEAD(&root_ns->underlay_qpns);
2610 
2611 	ns = &root_ns->ns;
2612 	fs_init_namespace(ns);
2613 	mutex_init(&root_ns->chain_lock);
2614 	tree_init_node(&ns->node, NULL, del_sw_root_ns);
2615 	tree_add_node(&ns->node, NULL);
2616 
2617 	return root_ns;
2618 }
2619 
2620 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2621 
2622 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2623 {
2624 	struct fs_prio *prio;
2625 
2626 	fs_for_each_prio(prio, ns) {
2627 		 /* This updates prio start_level and num_levels */
2628 		set_prio_attrs_in_prio(prio, acc_level);
2629 		acc_level += prio->num_levels;
2630 	}
2631 	return acc_level;
2632 }
2633 
2634 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2635 {
2636 	struct mlx5_flow_namespace *ns;
2637 	int acc_level_ns = acc_level;
2638 
2639 	prio->start_level = acc_level;
2640 	fs_for_each_ns(ns, prio) {
2641 		/* This updates start_level and num_levels of ns's priority descendants */
2642 		acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2643 
2644 		/* If this a prio with chains, and we can jump from one chain
2645 		 * (namespace) to another, so we accumulate the levels
2646 		 */
2647 		if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2648 			acc_level = acc_level_ns;
2649 	}
2650 
2651 	if (!prio->num_levels)
2652 		prio->num_levels = acc_level_ns - prio->start_level;
2653 	WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2654 }
2655 
2656 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2657 {
2658 	struct mlx5_flow_namespace *ns = &root_ns->ns;
2659 	struct fs_prio *prio;
2660 	int start_level = 0;
2661 
2662 	fs_for_each_prio(prio, ns) {
2663 		set_prio_attrs_in_prio(prio, start_level);
2664 		start_level += prio->num_levels;
2665 	}
2666 }
2667 
2668 #define ANCHOR_PRIO 0
2669 #define ANCHOR_SIZE 1
2670 #define ANCHOR_LEVEL 0
2671 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2672 {
2673 	struct mlx5_flow_namespace *ns = NULL;
2674 	struct mlx5_flow_table_attr ft_attr = {};
2675 	struct mlx5_flow_table *ft;
2676 
2677 	ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2678 	if (WARN_ON(!ns))
2679 		return -EINVAL;
2680 
2681 	ft_attr.max_fte = ANCHOR_SIZE;
2682 	ft_attr.level   = ANCHOR_LEVEL;
2683 	ft_attr.prio    = ANCHOR_PRIO;
2684 
2685 	ft = mlx5_create_flow_table(ns, &ft_attr);
2686 	if (IS_ERR(ft)) {
2687 		mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2688 		return PTR_ERR(ft);
2689 	}
2690 	return 0;
2691 }
2692 
2693 static int init_root_ns(struct mlx5_flow_steering *steering)
2694 {
2695 	int err;
2696 
2697 	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2698 	if (!steering->root_ns)
2699 		return -ENOMEM;
2700 
2701 	err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2702 	if (err)
2703 		goto out_err;
2704 
2705 	set_prio_attrs(steering->root_ns);
2706 	err = create_anchor_flow_table(steering);
2707 	if (err)
2708 		goto out_err;
2709 
2710 	return 0;
2711 
2712 out_err:
2713 	cleanup_root_ns(steering->root_ns);
2714 	steering->root_ns = NULL;
2715 	return err;
2716 }
2717 
2718 static void clean_tree(struct fs_node *node)
2719 {
2720 	if (node) {
2721 		struct fs_node *iter;
2722 		struct fs_node *temp;
2723 
2724 		tree_get_node(node);
2725 		list_for_each_entry_safe(iter, temp, &node->children, list)
2726 			clean_tree(iter);
2727 		tree_put_node(node, false);
2728 		tree_remove_node(node, false);
2729 	}
2730 }
2731 
2732 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2733 {
2734 	if (!root_ns)
2735 		return;
2736 
2737 	clean_tree(&root_ns->ns.node);
2738 }
2739 
2740 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2741 {
2742 	struct fs_prio *prio;
2743 
2744 	steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2745 	if (!steering->sniffer_tx_root_ns)
2746 		return -ENOMEM;
2747 
2748 	/* Create single prio */
2749 	prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2750 	return PTR_ERR_OR_ZERO(prio);
2751 }
2752 
2753 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2754 {
2755 	struct fs_prio *prio;
2756 
2757 	steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2758 	if (!steering->sniffer_rx_root_ns)
2759 		return -ENOMEM;
2760 
2761 	/* Create single prio */
2762 	prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2763 	return PTR_ERR_OR_ZERO(prio);
2764 }
2765 
2766 #define PORT_SEL_NUM_LEVELS 3
2767 static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
2768 {
2769 	struct fs_prio *prio;
2770 
2771 	steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
2772 	if (!steering->port_sel_root_ns)
2773 		return -ENOMEM;
2774 
2775 	/* Create single prio */
2776 	prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
2777 			      PORT_SEL_NUM_LEVELS);
2778 	return PTR_ERR_OR_ZERO(prio);
2779 }
2780 
2781 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2782 {
2783 	int err;
2784 
2785 	steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2786 	if (!steering->rdma_rx_root_ns)
2787 		return -ENOMEM;
2788 
2789 	err = init_root_tree(steering, &rdma_rx_root_fs,
2790 			     &steering->rdma_rx_root_ns->ns.node);
2791 	if (err)
2792 		goto out_err;
2793 
2794 	set_prio_attrs(steering->rdma_rx_root_ns);
2795 
2796 	return 0;
2797 
2798 out_err:
2799 	cleanup_root_ns(steering->rdma_rx_root_ns);
2800 	steering->rdma_rx_root_ns = NULL;
2801 	return err;
2802 }
2803 
2804 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2805 {
2806 	int err;
2807 
2808 	steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2809 	if (!steering->rdma_tx_root_ns)
2810 		return -ENOMEM;
2811 
2812 	err = init_root_tree(steering, &rdma_tx_root_fs,
2813 			     &steering->rdma_tx_root_ns->ns.node);
2814 	if (err)
2815 		goto out_err;
2816 
2817 	set_prio_attrs(steering->rdma_tx_root_ns);
2818 
2819 	return 0;
2820 
2821 out_err:
2822 	cleanup_root_ns(steering->rdma_tx_root_ns);
2823 	steering->rdma_tx_root_ns = NULL;
2824 	return err;
2825 }
2826 
2827 /* FT and tc chains are stored in the same array so we can re-use the
2828  * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2829  * When creating a new ns for each chain store it in the first available slot.
2830  * Assume tc chains are created and stored first and only then the FT chain.
2831  */
2832 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2833 					struct mlx5_flow_namespace *ns)
2834 {
2835 	int chain = 0;
2836 
2837 	while (steering->fdb_sub_ns[chain])
2838 		++chain;
2839 
2840 	steering->fdb_sub_ns[chain] = ns;
2841 }
2842 
2843 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2844 					struct fs_prio *maj_prio)
2845 {
2846 	struct mlx5_flow_namespace *ns;
2847 	struct fs_prio *min_prio;
2848 	int prio;
2849 
2850 	ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2851 	if (IS_ERR(ns))
2852 		return PTR_ERR(ns);
2853 
2854 	for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2855 		min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2856 		if (IS_ERR(min_prio))
2857 			return PTR_ERR(min_prio);
2858 	}
2859 
2860 	store_fdb_sub_ns_prio_chain(steering, ns);
2861 
2862 	return 0;
2863 }
2864 
2865 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2866 			     int fs_prio,
2867 			     int chains)
2868 {
2869 	struct fs_prio *maj_prio;
2870 	int levels;
2871 	int chain;
2872 	int err;
2873 
2874 	levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2875 	maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2876 					  fs_prio,
2877 					  levels);
2878 	if (IS_ERR(maj_prio))
2879 		return PTR_ERR(maj_prio);
2880 
2881 	for (chain = 0; chain < chains; chain++) {
2882 		err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2883 		if (err)
2884 			return err;
2885 	}
2886 
2887 	return 0;
2888 }
2889 
2890 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2891 {
2892 	int err;
2893 
2894 	steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2895 				       sizeof(*steering->fdb_sub_ns),
2896 				       GFP_KERNEL);
2897 	if (!steering->fdb_sub_ns)
2898 		return -ENOMEM;
2899 
2900 	err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2901 	if (err)
2902 		return err;
2903 
2904 	err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2905 	if (err)
2906 		return err;
2907 
2908 	return 0;
2909 }
2910 
2911 static int create_fdb_bypass(struct mlx5_flow_steering *steering)
2912 {
2913 	struct mlx5_flow_namespace *ns;
2914 	struct fs_prio *prio;
2915 	int i;
2916 
2917 	prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
2918 	if (IS_ERR(prio))
2919 		return PTR_ERR(prio);
2920 
2921 	ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2922 	if (IS_ERR(ns))
2923 		return PTR_ERR(ns);
2924 
2925 	for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
2926 		prio = fs_create_prio(ns, i, 1);
2927 		if (IS_ERR(prio))
2928 			return PTR_ERR(prio);
2929 	}
2930 	return 0;
2931 }
2932 
2933 static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering)
2934 {
2935 	cleanup_root_ns(steering->fdb_root_ns);
2936 	steering->fdb_root_ns = NULL;
2937 	kfree(steering->fdb_sub_ns);
2938 	steering->fdb_sub_ns = NULL;
2939 }
2940 
2941 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2942 {
2943 	struct fs_prio *maj_prio;
2944 	int err;
2945 
2946 	steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2947 	if (!steering->fdb_root_ns)
2948 		return -ENOMEM;
2949 
2950 	err = create_fdb_bypass(steering);
2951 	if (err)
2952 		goto out_err;
2953 
2954 	err = create_fdb_fast_path(steering);
2955 	if (err)
2956 		goto out_err;
2957 
2958 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
2959 	if (IS_ERR(maj_prio)) {
2960 		err = PTR_ERR(maj_prio);
2961 		goto out_err;
2962 	}
2963 
2964 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
2965 	if (IS_ERR(maj_prio)) {
2966 		err = PTR_ERR(maj_prio);
2967 		goto out_err;
2968 	}
2969 
2970 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2971 	if (IS_ERR(maj_prio)) {
2972 		err = PTR_ERR(maj_prio);
2973 		goto out_err;
2974 	}
2975 
2976 	/* We put this priority last, knowing that nothing will get here
2977 	 * unless explicitly forwarded to. This is possible because the
2978 	 * slow path tables have catch all rules and nothing gets passed
2979 	 * those tables.
2980 	 */
2981 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2982 	if (IS_ERR(maj_prio)) {
2983 		err = PTR_ERR(maj_prio);
2984 		goto out_err;
2985 	}
2986 
2987 	set_prio_attrs(steering->fdb_root_ns);
2988 	return 0;
2989 
2990 out_err:
2991 	cleanup_fdb_root_ns(steering);
2992 	return err;
2993 }
2994 
2995 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2996 {
2997 	struct fs_prio *prio;
2998 
2999 	steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
3000 	if (!steering->esw_egress_root_ns[vport])
3001 		return -ENOMEM;
3002 
3003 	/* create 1 prio*/
3004 	prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
3005 	return PTR_ERR_OR_ZERO(prio);
3006 }
3007 
3008 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
3009 {
3010 	struct fs_prio *prio;
3011 
3012 	steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
3013 	if (!steering->esw_ingress_root_ns[vport])
3014 		return -ENOMEM;
3015 
3016 	/* create 1 prio*/
3017 	prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
3018 	return PTR_ERR_OR_ZERO(prio);
3019 }
3020 
3021 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
3022 {
3023 	struct mlx5_flow_steering *steering = dev->priv.steering;
3024 	int err;
3025 	int i;
3026 
3027 	steering->esw_egress_root_ns =
3028 			kcalloc(total_vports,
3029 				sizeof(*steering->esw_egress_root_ns),
3030 				GFP_KERNEL);
3031 	if (!steering->esw_egress_root_ns)
3032 		return -ENOMEM;
3033 
3034 	for (i = 0; i < total_vports; i++) {
3035 		err = init_egress_acl_root_ns(steering, i);
3036 		if (err)
3037 			goto cleanup_root_ns;
3038 	}
3039 	steering->esw_egress_acl_vports = total_vports;
3040 	return 0;
3041 
3042 cleanup_root_ns:
3043 	for (i--; i >= 0; i--)
3044 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
3045 	kfree(steering->esw_egress_root_ns);
3046 	steering->esw_egress_root_ns = NULL;
3047 	return err;
3048 }
3049 
3050 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
3051 {
3052 	struct mlx5_flow_steering *steering = dev->priv.steering;
3053 	int i;
3054 
3055 	if (!steering->esw_egress_root_ns)
3056 		return;
3057 
3058 	for (i = 0; i < steering->esw_egress_acl_vports; i++)
3059 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
3060 
3061 	kfree(steering->esw_egress_root_ns);
3062 	steering->esw_egress_root_ns = NULL;
3063 }
3064 
3065 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
3066 {
3067 	struct mlx5_flow_steering *steering = dev->priv.steering;
3068 	int err;
3069 	int i;
3070 
3071 	steering->esw_ingress_root_ns =
3072 			kcalloc(total_vports,
3073 				sizeof(*steering->esw_ingress_root_ns),
3074 				GFP_KERNEL);
3075 	if (!steering->esw_ingress_root_ns)
3076 		return -ENOMEM;
3077 
3078 	for (i = 0; i < total_vports; i++) {
3079 		err = init_ingress_acl_root_ns(steering, i);
3080 		if (err)
3081 			goto cleanup_root_ns;
3082 	}
3083 	steering->esw_ingress_acl_vports = total_vports;
3084 	return 0;
3085 
3086 cleanup_root_ns:
3087 	for (i--; i >= 0; i--)
3088 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3089 	kfree(steering->esw_ingress_root_ns);
3090 	steering->esw_ingress_root_ns = NULL;
3091 	return err;
3092 }
3093 
3094 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
3095 {
3096 	struct mlx5_flow_steering *steering = dev->priv.steering;
3097 	int i;
3098 
3099 	if (!steering->esw_ingress_root_ns)
3100 		return;
3101 
3102 	for (i = 0; i < steering->esw_ingress_acl_vports; i++)
3103 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3104 
3105 	kfree(steering->esw_ingress_root_ns);
3106 	steering->esw_ingress_root_ns = NULL;
3107 }
3108 
3109 u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
3110 {
3111 	struct mlx5_flow_root_namespace *root;
3112 	struct mlx5_flow_namespace *ns;
3113 
3114 	ns = mlx5_get_flow_namespace(dev, type);
3115 	if (!ns)
3116 		return 0;
3117 
3118 	root = find_root(&ns->node);
3119 	if (!root)
3120 		return 0;
3121 
3122 	return root->cmds->get_capabilities(root, root->table_type);
3123 }
3124 
3125 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
3126 {
3127 	int err;
3128 
3129 	steering->egress_root_ns = create_root_ns(steering,
3130 						  FS_FT_NIC_TX);
3131 	if (!steering->egress_root_ns)
3132 		return -ENOMEM;
3133 
3134 	err = init_root_tree(steering, &egress_root_fs,
3135 			     &steering->egress_root_ns->ns.node);
3136 	if (err)
3137 		goto cleanup;
3138 	set_prio_attrs(steering->egress_root_ns);
3139 	return 0;
3140 cleanup:
3141 	cleanup_root_ns(steering->egress_root_ns);
3142 	steering->egress_root_ns = NULL;
3143 	return err;
3144 }
3145 
3146 void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
3147 {
3148 	struct mlx5_flow_steering *steering = dev->priv.steering;
3149 
3150 	cleanup_root_ns(steering->root_ns);
3151 	cleanup_fdb_root_ns(steering);
3152 	cleanup_root_ns(steering->port_sel_root_ns);
3153 	cleanup_root_ns(steering->sniffer_rx_root_ns);
3154 	cleanup_root_ns(steering->sniffer_tx_root_ns);
3155 	cleanup_root_ns(steering->rdma_rx_root_ns);
3156 	cleanup_root_ns(steering->rdma_tx_root_ns);
3157 	cleanup_root_ns(steering->egress_root_ns);
3158 }
3159 
3160 int mlx5_fs_core_init(struct mlx5_core_dev *dev)
3161 {
3162 	struct mlx5_flow_steering *steering = dev->priv.steering;
3163 	int err = 0;
3164 
3165 	if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3166 	      (MLX5_CAP_GEN(dev, nic_flow_table))) ||
3167 	     ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
3168 	      MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
3169 	    MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
3170 		err = init_root_ns(steering);
3171 		if (err)
3172 			goto err;
3173 	}
3174 
3175 	if (MLX5_ESWITCH_MANAGER(dev)) {
3176 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
3177 			err = init_fdb_root_ns(steering);
3178 			if (err)
3179 				goto err;
3180 		}
3181 	}
3182 
3183 	if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3184 		err = init_sniffer_rx_root_ns(steering);
3185 		if (err)
3186 			goto err;
3187 	}
3188 
3189 	if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3190 		err = init_sniffer_tx_root_ns(steering);
3191 		if (err)
3192 			goto err;
3193 	}
3194 
3195 	if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
3196 		err = init_port_sel_root_ns(steering);
3197 		if (err)
3198 			goto err;
3199 	}
3200 
3201 	if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3202 	    MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3203 		err = init_rdma_rx_root_ns(steering);
3204 		if (err)
3205 			goto err;
3206 	}
3207 
3208 	if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3209 		err = init_rdma_tx_root_ns(steering);
3210 		if (err)
3211 			goto err;
3212 	}
3213 
3214 	if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3215 		err = init_egress_root_ns(steering);
3216 		if (err)
3217 			goto err;
3218 	}
3219 
3220 	return 0;
3221 
3222 err:
3223 	mlx5_fs_core_cleanup(dev);
3224 	return err;
3225 }
3226 
3227 void mlx5_fs_core_free(struct mlx5_core_dev *dev)
3228 {
3229 	struct mlx5_flow_steering *steering = dev->priv.steering;
3230 
3231 	kmem_cache_destroy(steering->ftes_cache);
3232 	kmem_cache_destroy(steering->fgs_cache);
3233 	kfree(steering);
3234 	mlx5_ft_pool_destroy(dev);
3235 	mlx5_cleanup_fc_stats(dev);
3236 }
3237 
3238 int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
3239 {
3240 	struct mlx5_flow_steering *steering;
3241 	int err = 0;
3242 
3243 	err = mlx5_init_fc_stats(dev);
3244 	if (err)
3245 		return err;
3246 
3247 	err = mlx5_ft_pool_init(dev);
3248 	if (err)
3249 		goto err;
3250 
3251 	steering = kzalloc(sizeof(*steering), GFP_KERNEL);
3252 	if (!steering) {
3253 		err = -ENOMEM;
3254 		goto err;
3255 	}
3256 
3257 	steering->dev = dev;
3258 	dev->priv.steering = steering;
3259 
3260 	if (mlx5_fs_dr_is_supported(dev))
3261 		steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
3262 	else
3263 		steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
3264 
3265 	steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
3266 						sizeof(struct mlx5_flow_group), 0,
3267 						0, NULL);
3268 	steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
3269 						 0, NULL);
3270 	if (!steering->ftes_cache || !steering->fgs_cache) {
3271 		err = -ENOMEM;
3272 		goto err;
3273 	}
3274 
3275 	return 0;
3276 
3277 err:
3278 	mlx5_fs_core_free(dev);
3279 	return err;
3280 }
3281 
3282 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3283 {
3284 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3285 	struct mlx5_ft_underlay_qp *new_uqp;
3286 	int err = 0;
3287 
3288 	new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3289 	if (!new_uqp)
3290 		return -ENOMEM;
3291 
3292 	mutex_lock(&root->chain_lock);
3293 
3294 	if (!root->root_ft) {
3295 		err = -EINVAL;
3296 		goto update_ft_fail;
3297 	}
3298 
3299 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3300 					 false);
3301 	if (err) {
3302 		mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3303 			       underlay_qpn, err);
3304 		goto update_ft_fail;
3305 	}
3306 
3307 	new_uqp->qpn = underlay_qpn;
3308 	list_add_tail(&new_uqp->list, &root->underlay_qpns);
3309 
3310 	mutex_unlock(&root->chain_lock);
3311 
3312 	return 0;
3313 
3314 update_ft_fail:
3315 	mutex_unlock(&root->chain_lock);
3316 	kfree(new_uqp);
3317 	return err;
3318 }
3319 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3320 
3321 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3322 {
3323 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3324 	struct mlx5_ft_underlay_qp *uqp;
3325 	bool found = false;
3326 	int err = 0;
3327 
3328 	mutex_lock(&root->chain_lock);
3329 	list_for_each_entry(uqp, &root->underlay_qpns, list) {
3330 		if (uqp->qpn == underlay_qpn) {
3331 			found = true;
3332 			break;
3333 		}
3334 	}
3335 
3336 	if (!found) {
3337 		mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3338 			       underlay_qpn);
3339 		err = -EINVAL;
3340 		goto out;
3341 	}
3342 
3343 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3344 					 true);
3345 	if (err)
3346 		mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3347 			       underlay_qpn, err);
3348 
3349 	list_del(&uqp->list);
3350 	mutex_unlock(&root->chain_lock);
3351 	kfree(uqp);
3352 
3353 	return 0;
3354 
3355 out:
3356 	mutex_unlock(&root->chain_lock);
3357 	return err;
3358 }
3359 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3360 
3361 static struct mlx5_flow_root_namespace
3362 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3363 {
3364 	struct mlx5_flow_namespace *ns;
3365 
3366 	if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3367 	    ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3368 		ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3369 	else
3370 		ns = mlx5_get_flow_namespace(dev, ns_type);
3371 	if (!ns)
3372 		return NULL;
3373 
3374 	return find_root(&ns->node);
3375 }
3376 
3377 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3378 						 u8 ns_type, u8 num_actions,
3379 						 void *modify_actions)
3380 {
3381 	struct mlx5_flow_root_namespace *root;
3382 	struct mlx5_modify_hdr *modify_hdr;
3383 	int err;
3384 
3385 	root = get_root_namespace(dev, ns_type);
3386 	if (!root)
3387 		return ERR_PTR(-EOPNOTSUPP);
3388 
3389 	modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3390 	if (!modify_hdr)
3391 		return ERR_PTR(-ENOMEM);
3392 
3393 	modify_hdr->ns_type = ns_type;
3394 	err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3395 					      modify_actions, modify_hdr);
3396 	if (err) {
3397 		kfree(modify_hdr);
3398 		return ERR_PTR(err);
3399 	}
3400 
3401 	return modify_hdr;
3402 }
3403 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3404 
3405 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3406 				struct mlx5_modify_hdr *modify_hdr)
3407 {
3408 	struct mlx5_flow_root_namespace *root;
3409 
3410 	root = get_root_namespace(dev, modify_hdr->ns_type);
3411 	if (WARN_ON(!root))
3412 		return;
3413 	root->cmds->modify_header_dealloc(root, modify_hdr);
3414 	kfree(modify_hdr);
3415 }
3416 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3417 
3418 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3419 						     struct mlx5_pkt_reformat_params *params,
3420 						     enum mlx5_flow_namespace_type ns_type)
3421 {
3422 	struct mlx5_pkt_reformat *pkt_reformat;
3423 	struct mlx5_flow_root_namespace *root;
3424 	int err;
3425 
3426 	root = get_root_namespace(dev, ns_type);
3427 	if (!root)
3428 		return ERR_PTR(-EOPNOTSUPP);
3429 
3430 	pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3431 	if (!pkt_reformat)
3432 		return ERR_PTR(-ENOMEM);
3433 
3434 	pkt_reformat->ns_type = ns_type;
3435 	pkt_reformat->reformat_type = params->type;
3436 	err = root->cmds->packet_reformat_alloc(root, params, ns_type,
3437 						pkt_reformat);
3438 	if (err) {
3439 		kfree(pkt_reformat);
3440 		return ERR_PTR(err);
3441 	}
3442 
3443 	return pkt_reformat;
3444 }
3445 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3446 
3447 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3448 				  struct mlx5_pkt_reformat *pkt_reformat)
3449 {
3450 	struct mlx5_flow_root_namespace *root;
3451 
3452 	root = get_root_namespace(dev, pkt_reformat->ns_type);
3453 	if (WARN_ON(!root))
3454 		return;
3455 	root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3456 	kfree(pkt_reformat);
3457 }
3458 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3459 
3460 int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
3461 {
3462 	return definer->id;
3463 }
3464 
3465 struct mlx5_flow_definer *
3466 mlx5_create_match_definer(struct mlx5_core_dev *dev,
3467 			  enum mlx5_flow_namespace_type ns_type, u16 format_id,
3468 			  u32 *match_mask)
3469 {
3470 	struct mlx5_flow_root_namespace *root;
3471 	struct mlx5_flow_definer *definer;
3472 	int id;
3473 
3474 	root = get_root_namespace(dev, ns_type);
3475 	if (!root)
3476 		return ERR_PTR(-EOPNOTSUPP);
3477 
3478 	definer = kzalloc(sizeof(*definer), GFP_KERNEL);
3479 	if (!definer)
3480 		return ERR_PTR(-ENOMEM);
3481 
3482 	definer->ns_type = ns_type;
3483 	id = root->cmds->create_match_definer(root, format_id, match_mask);
3484 	if (id < 0) {
3485 		mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
3486 		kfree(definer);
3487 		return ERR_PTR(id);
3488 	}
3489 	definer->id = id;
3490 	return definer;
3491 }
3492 
3493 void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
3494 				struct mlx5_flow_definer *definer)
3495 {
3496 	struct mlx5_flow_root_namespace *root;
3497 
3498 	root = get_root_namespace(dev, definer->ns_type);
3499 	if (WARN_ON(!root))
3500 		return;
3501 
3502 	root->cmds->destroy_match_definer(root, definer->id);
3503 	kfree(definer);
3504 }
3505 
3506 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3507 				 struct mlx5_flow_root_namespace *peer_ns)
3508 {
3509 	if (peer_ns && ns->mode != peer_ns->mode) {
3510 		mlx5_core_err(ns->dev,
3511 			      "Can't peer namespace of different steering mode\n");
3512 		return -EINVAL;
3513 	}
3514 
3515 	return ns->cmds->set_peer(ns, peer_ns);
3516 }
3517 
3518 /* This function should be called only at init stage of the namespace.
3519  * It is not safe to call this function while steering operations
3520  * are executed in the namespace.
3521  */
3522 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3523 				 enum mlx5_flow_steering_mode mode)
3524 {
3525 	struct mlx5_flow_root_namespace *root;
3526 	const struct mlx5_flow_cmds *cmds;
3527 	int err;
3528 
3529 	root = find_root(&ns->node);
3530 	if (&root->ns != ns)
3531 	/* Can't set cmds to non root namespace */
3532 		return -EINVAL;
3533 
3534 	if (root->table_type != FS_FT_FDB)
3535 		return -EOPNOTSUPP;
3536 
3537 	if (root->mode == mode)
3538 		return 0;
3539 
3540 	if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3541 		cmds = mlx5_fs_cmd_get_dr_cmds();
3542 	else
3543 		cmds = mlx5_fs_cmd_get_fw_cmds();
3544 	if (!cmds)
3545 		return -EOPNOTSUPP;
3546 
3547 	err = cmds->create_ns(root);
3548 	if (err) {
3549 		mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3550 			      err);
3551 		return err;
3552 	}
3553 
3554 	root->cmds->destroy_ns(root);
3555 	root->cmds = cmds;
3556 	root->mode = mode;
3557 
3558 	return 0;
3559 }
3560