xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c (revision 44ad3baf1cca483e418b6aadf2d3994f69e0f16a)
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
37 #include <net/devlink.h>
38 
39 #include "mlx5_core.h"
40 #include "fs_core.h"
41 #include "fs_cmd.h"
42 #include "fs_ft_pool.h"
43 #include "diag/fs_tracepoint.h"
44 #include "devlink.h"
45 
46 #define INIT_TREE_NODE_ARRAY_SIZE(...)	(sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 					 sizeof(struct init_tree_node))
48 
49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 		 ...) {.type = FS_TYPE_PRIO,\
51 	.min_ft_level = min_level_val,\
52 	.num_levels = num_levels_val,\
53 	.num_leaf_prios = num_prios_val,\
54 	.caps = caps_val,\
55 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
56 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57 }
58 
59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 	ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
61 		 __VA_ARGS__)\
62 
63 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE,	\
64 	.def_miss_action = def_miss_act,\
65 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
66 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
67 }
68 
69 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
70 				   sizeof(long))
71 
72 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
73 
74 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 			       .caps = (long[]) {__VA_ARGS__} }
76 
77 #define FS_CHAINING_CAPS  FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 					   FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 					   FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 					   FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
81 
82 #define FS_CHAINING_CAPS_EGRESS                                                \
83 	FS_REQUIRED_CAPS(                                                      \
84 		FS_CAP(flow_table_properties_nic_transmit.flow_modify_en),     \
85 		FS_CAP(flow_table_properties_nic_transmit.modify_root),        \
86 		FS_CAP(flow_table_properties_nic_transmit                      \
87 			       .identified_miss_table_mode),                   \
88 		FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
89 
90 #define FS_CHAINING_CAPS_RDMA_TX                                                \
91 	FS_REQUIRED_CAPS(                                                       \
92 		FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
93 		FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root),    \
94 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
95 			       .identified_miss_table_mode),                    \
96 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
97 			       .flow_table_modify))
98 
99 #define LEFTOVERS_NUM_LEVELS 1
100 #define LEFTOVERS_NUM_PRIOS 1
101 
102 #define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
103 #define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
104 
105 #define BY_PASS_PRIO_NUM_LEVELS 1
106 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
107 			   LEFTOVERS_NUM_PRIOS)
108 
109 #define KERNEL_RX_MACSEC_NUM_PRIOS  1
110 #define KERNEL_RX_MACSEC_NUM_LEVELS 3
111 #define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS)
112 
113 #define ETHTOOL_PRIO_NUM_LEVELS 1
114 #define ETHTOOL_NUM_PRIOS 11
115 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
116 /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
117  * IPsec RoCE policy
118  */
119 #define KERNEL_NIC_PRIO_NUM_LEVELS 9
120 #define KERNEL_NIC_NUM_PRIOS 1
121 /* One more level for tc */
122 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
123 
124 #define KERNEL_NIC_TC_NUM_PRIOS  1
125 #define KERNEL_NIC_TC_NUM_LEVELS 3
126 
127 #define ANCHOR_NUM_LEVELS 1
128 #define ANCHOR_NUM_PRIOS 1
129 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
130 
131 #define OFFLOADS_MAX_FT 2
132 #define OFFLOADS_NUM_PRIOS 2
133 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
134 
135 #define LAG_PRIO_NUM_LEVELS 1
136 #define LAG_NUM_PRIOS 1
137 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
138 
139 #define KERNEL_TX_IPSEC_NUM_PRIOS  1
140 #define KERNEL_TX_IPSEC_NUM_LEVELS 3
141 #define KERNEL_TX_IPSEC_MIN_LEVEL        (KERNEL_TX_IPSEC_NUM_LEVELS)
142 
143 #define KERNEL_TX_MACSEC_NUM_PRIOS  1
144 #define KERNEL_TX_MACSEC_NUM_LEVELS 2
145 #define KERNEL_TX_MACSEC_MIN_LEVEL       (KERNEL_TX_IPSEC_MIN_LEVEL + KERNEL_TX_MACSEC_NUM_PRIOS)
146 
147 struct node_caps {
148 	size_t	arr_sz;
149 	long	*caps;
150 };
151 
152 static struct init_tree_node {
153 	enum fs_node_type	type;
154 	struct init_tree_node *children;
155 	int ar_size;
156 	struct node_caps caps;
157 	int min_ft_level;
158 	int num_leaf_prios;
159 	int prio;
160 	int num_levels;
161 	enum mlx5_flow_table_miss_action def_miss_action;
162 } root_fs = {
163 	.type = FS_TYPE_NAMESPACE,
164 	.ar_size = 8,
165 	  .children = (struct init_tree_node[]){
166 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
167 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
168 				  ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
169 						    BY_PASS_PRIO_NUM_LEVELS))),
170 		  ADD_PRIO(0, KERNEL_RX_MACSEC_MIN_LEVEL, 0, FS_CHAINING_CAPS,
171 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
172 				  ADD_MULTIPLE_PRIO(KERNEL_RX_MACSEC_NUM_PRIOS,
173 						    KERNEL_RX_MACSEC_NUM_LEVELS))),
174 		  ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
175 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
176 				  ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
177 						    LAG_PRIO_NUM_LEVELS))),
178 		  ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
179 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
180 				  ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
181 						    OFFLOADS_MAX_FT))),
182 		  ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
183 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
184 				  ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
185 						    ETHTOOL_PRIO_NUM_LEVELS))),
186 		  ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
187 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
188 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
189 						    KERNEL_NIC_TC_NUM_LEVELS),
190 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
191 						    KERNEL_NIC_PRIO_NUM_LEVELS))),
192 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
193 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
194 				  ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
195 						    LEFTOVERS_NUM_LEVELS))),
196 		  ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
197 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
198 				  ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
199 						    ANCHOR_NUM_LEVELS))),
200 	}
201 };
202 
203 static struct init_tree_node egress_root_fs = {
204 	.type = FS_TYPE_NAMESPACE,
205 	.ar_size = 3,
206 	.children = (struct init_tree_node[]) {
207 		ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
208 			 FS_CHAINING_CAPS_EGRESS,
209 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
210 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
211 						  BY_PASS_PRIO_NUM_LEVELS))),
212 		ADD_PRIO(0, KERNEL_TX_IPSEC_MIN_LEVEL, 0,
213 			 FS_CHAINING_CAPS_EGRESS,
214 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
215 				ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
216 						  KERNEL_TX_IPSEC_NUM_LEVELS))),
217 		ADD_PRIO(0, KERNEL_TX_MACSEC_MIN_LEVEL, 0,
218 			 FS_CHAINING_CAPS_EGRESS,
219 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
220 				ADD_MULTIPLE_PRIO(KERNEL_TX_MACSEC_NUM_PRIOS,
221 						  KERNEL_TX_MACSEC_NUM_LEVELS))),
222 	}
223 };
224 
225 enum {
226 	RDMA_RX_IPSEC_PRIO,
227 	RDMA_RX_MACSEC_PRIO,
228 	RDMA_RX_COUNTERS_PRIO,
229 	RDMA_RX_BYPASS_PRIO,
230 	RDMA_RX_KERNEL_PRIO,
231 };
232 
233 #define RDMA_RX_IPSEC_NUM_PRIOS 1
234 #define RDMA_RX_IPSEC_NUM_LEVELS 2
235 #define RDMA_RX_IPSEC_MIN_LEVEL  (RDMA_RX_IPSEC_NUM_LEVELS)
236 
237 #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
238 #define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
239 #define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
240 
241 #define RDMA_RX_MACSEC_NUM_PRIOS 1
242 #define RDMA_RX_MACSEC_PRIO_NUM_LEVELS 2
243 #define RDMA_RX_MACSEC_MIN_LEVEL  (RDMA_RX_COUNTERS_MIN_LEVEL + RDMA_RX_MACSEC_NUM_PRIOS)
244 
245 static struct init_tree_node rdma_rx_root_fs = {
246 	.type = FS_TYPE_NAMESPACE,
247 	.ar_size = 5,
248 	.children = (struct init_tree_node[]) {
249 		[RDMA_RX_IPSEC_PRIO] =
250 		ADD_PRIO(0, RDMA_RX_IPSEC_MIN_LEVEL, 0,
251 			 FS_CHAINING_CAPS,
252 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
253 				ADD_MULTIPLE_PRIO(RDMA_RX_IPSEC_NUM_PRIOS,
254 						  RDMA_RX_IPSEC_NUM_LEVELS))),
255 		[RDMA_RX_MACSEC_PRIO] =
256 		ADD_PRIO(0, RDMA_RX_MACSEC_MIN_LEVEL, 0,
257 			 FS_CHAINING_CAPS,
258 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
259 				ADD_MULTIPLE_PRIO(RDMA_RX_MACSEC_NUM_PRIOS,
260 						  RDMA_RX_MACSEC_PRIO_NUM_LEVELS))),
261 		[RDMA_RX_COUNTERS_PRIO] =
262 		ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
263 			 FS_CHAINING_CAPS,
264 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
265 				ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
266 						  RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
267 		[RDMA_RX_BYPASS_PRIO] =
268 		ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
269 			 FS_CHAINING_CAPS,
270 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
271 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
272 						  BY_PASS_PRIO_NUM_LEVELS))),
273 		[RDMA_RX_KERNEL_PRIO] =
274 		ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
275 			 FS_CHAINING_CAPS,
276 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
277 				ADD_MULTIPLE_PRIO(1, 1))),
278 	}
279 };
280 
281 enum {
282 	RDMA_TX_COUNTERS_PRIO,
283 	RDMA_TX_IPSEC_PRIO,
284 	RDMA_TX_MACSEC_PRIO,
285 	RDMA_TX_BYPASS_PRIO,
286 };
287 
288 #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
289 #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
290 
291 #define RDMA_TX_IPSEC_NUM_PRIOS 1
292 #define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1
293 #define RDMA_TX_IPSEC_MIN_LEVEL  (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS)
294 
295 #define RDMA_TX_MACSEC_NUM_PRIOS 1
296 #define RDMA_TX_MACESC_PRIO_NUM_LEVELS 1
297 #define RDMA_TX_MACSEC_MIN_LEVEL  (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_MACSEC_NUM_PRIOS)
298 
299 static struct init_tree_node rdma_tx_root_fs = {
300 	.type = FS_TYPE_NAMESPACE,
301 	.ar_size = 4,
302 	.children = (struct init_tree_node[]) {
303 		[RDMA_TX_COUNTERS_PRIO] =
304 		ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
305 			 FS_CHAINING_CAPS,
306 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
307 				ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
308 						  RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
309 		[RDMA_TX_IPSEC_PRIO] =
310 		ADD_PRIO(0, RDMA_TX_IPSEC_MIN_LEVEL, 0,
311 			 FS_CHAINING_CAPS,
312 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
313 				ADD_MULTIPLE_PRIO(RDMA_TX_IPSEC_NUM_PRIOS,
314 						  RDMA_TX_IPSEC_PRIO_NUM_LEVELS))),
315 		[RDMA_TX_MACSEC_PRIO] =
316 		ADD_PRIO(0, RDMA_TX_MACSEC_MIN_LEVEL, 0,
317 			 FS_CHAINING_CAPS,
318 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
319 				ADD_MULTIPLE_PRIO(RDMA_TX_MACSEC_NUM_PRIOS,
320 						  RDMA_TX_MACESC_PRIO_NUM_LEVELS))),
321 		[RDMA_TX_BYPASS_PRIO] =
322 		ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
323 			 FS_CHAINING_CAPS_RDMA_TX,
324 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
325 				ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
326 						  BY_PASS_PRIO_NUM_LEVELS))),
327 	}
328 };
329 
330 enum fs_i_lock_class {
331 	FS_LOCK_GRANDPARENT,
332 	FS_LOCK_PARENT,
333 	FS_LOCK_CHILD
334 };
335 
336 static const struct rhashtable_params rhash_fte = {
337 	.key_len = sizeof_field(struct fs_fte, val),
338 	.key_offset = offsetof(struct fs_fte, val),
339 	.head_offset = offsetof(struct fs_fte, hash),
340 	.automatic_shrinking = true,
341 	.min_size = 1,
342 };
343 
344 static const struct rhashtable_params rhash_fg = {
345 	.key_len = sizeof_field(struct mlx5_flow_group, mask),
346 	.key_offset = offsetof(struct mlx5_flow_group, mask),
347 	.head_offset = offsetof(struct mlx5_flow_group, hash),
348 	.automatic_shrinking = true,
349 	.min_size = 1,
350 
351 };
352 
353 static void del_hw_flow_table(struct fs_node *node);
354 static void del_hw_flow_group(struct fs_node *node);
355 static void del_hw_fte(struct fs_node *node);
356 static void del_sw_flow_table(struct fs_node *node);
357 static void del_sw_flow_group(struct fs_node *node);
358 static void del_sw_fte(struct fs_node *node);
359 static void del_sw_prio(struct fs_node *node);
360 static void del_sw_ns(struct fs_node *node);
361 /* Delete rule (destination) is special case that
362  * requires to lock the FTE for all the deletion process.
363  */
364 static void del_sw_hw_rule(struct fs_node *node);
365 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
366 				struct mlx5_flow_destination *d2);
367 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
368 static struct mlx5_flow_rule *
369 find_flow_rule(struct fs_fte *fte,
370 	       struct mlx5_flow_destination *dest);
371 
tree_init_node(struct fs_node * node,void (* del_hw_func)(struct fs_node *),void (* del_sw_func)(struct fs_node *))372 static void tree_init_node(struct fs_node *node,
373 			   void (*del_hw_func)(struct fs_node *),
374 			   void (*del_sw_func)(struct fs_node *))
375 {
376 	refcount_set(&node->refcount, 1);
377 	INIT_LIST_HEAD(&node->list);
378 	INIT_LIST_HEAD(&node->children);
379 	init_rwsem(&node->lock);
380 	node->del_hw_func = del_hw_func;
381 	node->del_sw_func = del_sw_func;
382 	node->active = false;
383 }
384 
tree_add_node(struct fs_node * node,struct fs_node * parent)385 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
386 {
387 	if (parent)
388 		refcount_inc(&parent->refcount);
389 	node->parent = parent;
390 
391 	/* Parent is the root */
392 	if (!parent)
393 		node->root = node;
394 	else
395 		node->root = parent->root;
396 }
397 
tree_get_node(struct fs_node * node)398 static int tree_get_node(struct fs_node *node)
399 {
400 	return refcount_inc_not_zero(&node->refcount);
401 }
402 
nested_down_read_ref_node(struct fs_node * node,enum fs_i_lock_class class)403 static void nested_down_read_ref_node(struct fs_node *node,
404 				      enum fs_i_lock_class class)
405 {
406 	if (node) {
407 		down_read_nested(&node->lock, class);
408 		refcount_inc(&node->refcount);
409 	}
410 }
411 
nested_down_write_ref_node(struct fs_node * node,enum fs_i_lock_class class)412 static void nested_down_write_ref_node(struct fs_node *node,
413 				       enum fs_i_lock_class class)
414 {
415 	if (node) {
416 		down_write_nested(&node->lock, class);
417 		refcount_inc(&node->refcount);
418 	}
419 }
420 
down_write_ref_node(struct fs_node * node,bool locked)421 static void down_write_ref_node(struct fs_node *node, bool locked)
422 {
423 	if (node) {
424 		if (!locked)
425 			down_write(&node->lock);
426 		refcount_inc(&node->refcount);
427 	}
428 }
429 
up_read_ref_node(struct fs_node * node)430 static void up_read_ref_node(struct fs_node *node)
431 {
432 	refcount_dec(&node->refcount);
433 	up_read(&node->lock);
434 }
435 
up_write_ref_node(struct fs_node * node,bool locked)436 static void up_write_ref_node(struct fs_node *node, bool locked)
437 {
438 	refcount_dec(&node->refcount);
439 	if (!locked)
440 		up_write(&node->lock);
441 }
442 
tree_put_node(struct fs_node * node,bool locked)443 static void tree_put_node(struct fs_node *node, bool locked)
444 {
445 	struct fs_node *parent_node = node->parent;
446 
447 	if (refcount_dec_and_test(&node->refcount)) {
448 		if (node->del_hw_func)
449 			node->del_hw_func(node);
450 		if (parent_node) {
451 			down_write_ref_node(parent_node, locked);
452 			list_del_init(&node->list);
453 		}
454 		node->del_sw_func(node);
455 		if (parent_node)
456 			up_write_ref_node(parent_node, locked);
457 		node = NULL;
458 	}
459 	if (!node && parent_node)
460 		tree_put_node(parent_node, locked);
461 }
462 
tree_remove_node(struct fs_node * node,bool locked)463 static int tree_remove_node(struct fs_node *node, bool locked)
464 {
465 	if (refcount_read(&node->refcount) > 1) {
466 		refcount_dec(&node->refcount);
467 		return -EEXIST;
468 	}
469 	tree_put_node(node, locked);
470 	return 0;
471 }
472 
find_prio(struct mlx5_flow_namespace * ns,unsigned int prio)473 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
474 				 unsigned int prio)
475 {
476 	struct fs_prio *iter_prio;
477 
478 	fs_for_each_prio(iter_prio, ns) {
479 		if (iter_prio->prio == prio)
480 			return iter_prio;
481 	}
482 
483 	return NULL;
484 }
485 
is_fwd_next_action(u32 action)486 static bool is_fwd_next_action(u32 action)
487 {
488 	return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
489 			 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
490 }
491 
is_fwd_dest_type(enum mlx5_flow_destination_type type)492 static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
493 {
494 	return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM ||
495 		type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ||
496 		type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
497 		type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
498 		type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
499 		type == MLX5_FLOW_DESTINATION_TYPE_TIR ||
500 		type == MLX5_FLOW_DESTINATION_TYPE_RANGE ||
501 		type == MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
502 }
503 
check_valid_spec(const struct mlx5_flow_spec * spec)504 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
505 {
506 	int i;
507 
508 	for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
509 		if (spec->match_value[i] & ~spec->match_criteria[i]) {
510 			pr_warn("mlx5_core: match_value differs from match_criteria\n");
511 			return false;
512 		}
513 
514 	return true;
515 }
516 
find_root(struct fs_node * node)517 struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
518 {
519 	struct fs_node *root;
520 	struct mlx5_flow_namespace *ns;
521 
522 	root = node->root;
523 
524 	if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
525 		pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
526 		return NULL;
527 	}
528 
529 	ns = container_of(root, struct mlx5_flow_namespace, node);
530 	return container_of(ns, struct mlx5_flow_root_namespace, ns);
531 }
532 
get_steering(struct fs_node * node)533 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
534 {
535 	struct mlx5_flow_root_namespace *root = find_root(node);
536 
537 	if (root)
538 		return root->dev->priv.steering;
539 	return NULL;
540 }
541 
get_dev(struct fs_node * node)542 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
543 {
544 	struct mlx5_flow_root_namespace *root = find_root(node);
545 
546 	if (root)
547 		return root->dev;
548 	return NULL;
549 }
550 
del_sw_ns(struct fs_node * node)551 static void del_sw_ns(struct fs_node *node)
552 {
553 	kfree(node);
554 }
555 
del_sw_prio(struct fs_node * node)556 static void del_sw_prio(struct fs_node *node)
557 {
558 	kfree(node);
559 }
560 
del_hw_flow_table(struct fs_node * node)561 static void del_hw_flow_table(struct fs_node *node)
562 {
563 	struct mlx5_flow_root_namespace *root;
564 	struct mlx5_flow_table *ft;
565 	struct mlx5_core_dev *dev;
566 	int err;
567 
568 	fs_get_obj(ft, node);
569 	dev = get_dev(&ft->node);
570 	root = find_root(&ft->node);
571 	trace_mlx5_fs_del_ft(ft);
572 
573 	if (node->active) {
574 		err = root->cmds->destroy_flow_table(root, ft);
575 		if (err)
576 			mlx5_core_warn(dev, "flow steering can't destroy ft\n");
577 	}
578 }
579 
del_sw_flow_table(struct fs_node * node)580 static void del_sw_flow_table(struct fs_node *node)
581 {
582 	struct mlx5_flow_table *ft;
583 	struct fs_prio *prio;
584 
585 	fs_get_obj(ft, node);
586 
587 	rhltable_destroy(&ft->fgs_hash);
588 	if (ft->node.parent) {
589 		fs_get_obj(prio, ft->node.parent);
590 		prio->num_ft--;
591 	}
592 	kfree(ft);
593 }
594 
modify_fte(struct fs_fte * fte)595 static void modify_fte(struct fs_fte *fte)
596 {
597 	struct mlx5_flow_root_namespace *root;
598 	struct mlx5_flow_table *ft;
599 	struct mlx5_flow_group *fg;
600 	struct mlx5_core_dev *dev;
601 	int err;
602 
603 	fs_get_obj(fg, fte->node.parent);
604 	fs_get_obj(ft, fg->node.parent);
605 	dev = get_dev(&fte->node);
606 
607 	root = find_root(&ft->node);
608 	err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
609 	if (err)
610 		mlx5_core_warn(dev,
611 			       "%s can't del rule fg id=%d fte_index=%d\n",
612 			       __func__, fg->id, fte->index);
613 	fte->modify_mask = 0;
614 }
615 
del_sw_hw_rule(struct fs_node * node)616 static void del_sw_hw_rule(struct fs_node *node)
617 {
618 	struct mlx5_flow_rule *rule;
619 	struct fs_fte *fte;
620 
621 	fs_get_obj(rule, node);
622 	fs_get_obj(fte, rule->node.parent);
623 	trace_mlx5_fs_del_rule(rule);
624 	if (is_fwd_next_action(rule->sw_action)) {
625 		mutex_lock(&rule->dest_attr.ft->lock);
626 		list_del(&rule->next_ft);
627 		mutex_unlock(&rule->dest_attr.ft->lock);
628 	}
629 
630 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
631 		--fte->dests_size;
632 		fte->modify_mask |=
633 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
634 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
635 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
636 		goto out;
637 	}
638 
639 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
640 		--fte->dests_size;
641 		fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
642 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
643 		goto out;
644 	}
645 
646 	if (is_fwd_dest_type(rule->dest_attr.type)) {
647 		--fte->dests_size;
648 		--fte->fwd_dests;
649 
650 		if (!fte->fwd_dests)
651 			fte->action.action &=
652 				~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
653 		fte->modify_mask |=
654 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
655 		goto out;
656 	}
657 out:
658 	kfree(rule);
659 }
660 
del_hw_fte(struct fs_node * node)661 static void del_hw_fte(struct fs_node *node)
662 {
663 	struct mlx5_flow_root_namespace *root;
664 	struct mlx5_flow_table *ft;
665 	struct mlx5_flow_group *fg;
666 	struct mlx5_core_dev *dev;
667 	struct fs_fte *fte;
668 	int err;
669 
670 	fs_get_obj(fte, node);
671 	fs_get_obj(fg, fte->node.parent);
672 	fs_get_obj(ft, fg->node.parent);
673 
674 	trace_mlx5_fs_del_fte(fte);
675 	WARN_ON(fte->dests_size);
676 	dev = get_dev(&ft->node);
677 	root = find_root(&ft->node);
678 	if (node->active) {
679 		err = root->cmds->delete_fte(root, ft, fte);
680 		if (err)
681 			mlx5_core_warn(dev,
682 				       "flow steering can't delete fte in index %d of flow group id %d\n",
683 				       fte->index, fg->id);
684 		node->active = false;
685 	}
686 }
687 
del_sw_fte(struct fs_node * node)688 static void del_sw_fte(struct fs_node *node)
689 {
690 	struct mlx5_flow_steering *steering = get_steering(node);
691 	struct mlx5_flow_group *fg;
692 	struct fs_fte *fte;
693 	int err;
694 
695 	fs_get_obj(fte, node);
696 	fs_get_obj(fg, fte->node.parent);
697 
698 	err = rhashtable_remove_fast(&fg->ftes_hash,
699 				     &fte->hash,
700 				     rhash_fte);
701 	WARN_ON(err);
702 	ida_free(&fg->fte_allocator, fte->index - fg->start_index);
703 	kmem_cache_free(steering->ftes_cache, fte);
704 }
705 
del_hw_flow_group(struct fs_node * node)706 static void del_hw_flow_group(struct fs_node *node)
707 {
708 	struct mlx5_flow_root_namespace *root;
709 	struct mlx5_flow_group *fg;
710 	struct mlx5_flow_table *ft;
711 	struct mlx5_core_dev *dev;
712 
713 	fs_get_obj(fg, node);
714 	fs_get_obj(ft, fg->node.parent);
715 	dev = get_dev(&ft->node);
716 	trace_mlx5_fs_del_fg(fg);
717 
718 	root = find_root(&ft->node);
719 	if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
720 		mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
721 			       fg->id, ft->id);
722 }
723 
del_sw_flow_group(struct fs_node * node)724 static void del_sw_flow_group(struct fs_node *node)
725 {
726 	struct mlx5_flow_steering *steering = get_steering(node);
727 	struct mlx5_flow_group *fg;
728 	struct mlx5_flow_table *ft;
729 	int err;
730 
731 	fs_get_obj(fg, node);
732 	fs_get_obj(ft, fg->node.parent);
733 
734 	rhashtable_destroy(&fg->ftes_hash);
735 	ida_destroy(&fg->fte_allocator);
736 	if (ft->autogroup.active &&
737 	    fg->max_ftes == ft->autogroup.group_size &&
738 	    fg->start_index < ft->autogroup.max_fte)
739 		ft->autogroup.num_groups--;
740 	err = rhltable_remove(&ft->fgs_hash,
741 			      &fg->hash,
742 			      rhash_fg);
743 	WARN_ON(err);
744 	kmem_cache_free(steering->fgs_cache, fg);
745 }
746 
insert_fte(struct mlx5_flow_group * fg,struct fs_fte * fte)747 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
748 {
749 	int index;
750 	int ret;
751 
752 	index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
753 	if (index < 0)
754 		return index;
755 
756 	fte->index = index + fg->start_index;
757 	ret = rhashtable_insert_fast(&fg->ftes_hash,
758 				     &fte->hash,
759 				     rhash_fte);
760 	if (ret)
761 		goto err_ida_remove;
762 
763 	tree_add_node(&fte->node, &fg->node);
764 	list_add_tail(&fte->node.list, &fg->node.children);
765 	return 0;
766 
767 err_ida_remove:
768 	ida_free(&fg->fte_allocator, index);
769 	return ret;
770 }
771 
alloc_fte(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act)772 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
773 				const struct mlx5_flow_spec *spec,
774 				struct mlx5_flow_act *flow_act)
775 {
776 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
777 	struct fs_fte *fte;
778 
779 	fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
780 	if (!fte)
781 		return ERR_PTR(-ENOMEM);
782 
783 	memcpy(fte->val, &spec->match_value, sizeof(fte->val));
784 	fte->node.type =  FS_TYPE_FLOW_ENTRY;
785 	fte->action = *flow_act;
786 	fte->flow_context = spec->flow_context;
787 
788 	tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
789 
790 	return fte;
791 }
792 
dealloc_flow_group(struct mlx5_flow_steering * steering,struct mlx5_flow_group * fg)793 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
794 			       struct mlx5_flow_group *fg)
795 {
796 	rhashtable_destroy(&fg->ftes_hash);
797 	kmem_cache_free(steering->fgs_cache, fg);
798 }
799 
alloc_flow_group(struct mlx5_flow_steering * steering,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index)800 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
801 						u8 match_criteria_enable,
802 						const void *match_criteria,
803 						int start_index,
804 						int end_index)
805 {
806 	struct mlx5_flow_group *fg;
807 	int ret;
808 
809 	fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
810 	if (!fg)
811 		return ERR_PTR(-ENOMEM);
812 
813 	ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
814 	if (ret) {
815 		kmem_cache_free(steering->fgs_cache, fg);
816 		return ERR_PTR(ret);
817 	}
818 
819 	ida_init(&fg->fte_allocator);
820 	fg->mask.match_criteria_enable = match_criteria_enable;
821 	memcpy(&fg->mask.match_criteria, match_criteria,
822 	       sizeof(fg->mask.match_criteria));
823 	fg->node.type =  FS_TYPE_FLOW_GROUP;
824 	fg->start_index = start_index;
825 	fg->max_ftes = end_index - start_index + 1;
826 
827 	return fg;
828 }
829 
alloc_insert_flow_group(struct mlx5_flow_table * ft,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index,struct list_head * prev)830 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
831 						       u8 match_criteria_enable,
832 						       const void *match_criteria,
833 						       int start_index,
834 						       int end_index,
835 						       struct list_head *prev)
836 {
837 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
838 	struct mlx5_flow_group *fg;
839 	int ret;
840 
841 	fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
842 			      start_index, end_index);
843 	if (IS_ERR(fg))
844 		return fg;
845 
846 	/* initialize refcnt, add to parent list */
847 	ret = rhltable_insert(&ft->fgs_hash,
848 			      &fg->hash,
849 			      rhash_fg);
850 	if (ret) {
851 		dealloc_flow_group(steering, fg);
852 		return ERR_PTR(ret);
853 	}
854 
855 	tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
856 	tree_add_node(&fg->node, &ft->node);
857 	/* Add node to group list */
858 	list_add(&fg->node.list, prev);
859 	atomic_inc(&ft->node.version);
860 
861 	return fg;
862 }
863 
alloc_flow_table(int level,u16 vport,enum fs_flow_table_type table_type,enum fs_flow_table_op_mod op_mod,u32 flags)864 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
865 						enum fs_flow_table_type table_type,
866 						enum fs_flow_table_op_mod op_mod,
867 						u32 flags)
868 {
869 	struct mlx5_flow_table *ft;
870 	int ret;
871 
872 	ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
873 	if (!ft)
874 		return ERR_PTR(-ENOMEM);
875 
876 	ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
877 	if (ret) {
878 		kfree(ft);
879 		return ERR_PTR(ret);
880 	}
881 
882 	ft->level = level;
883 	ft->node.type = FS_TYPE_FLOW_TABLE;
884 	ft->op_mod = op_mod;
885 	ft->type = table_type;
886 	ft->vport = vport;
887 	ft->flags = flags;
888 	INIT_LIST_HEAD(&ft->fwd_rules);
889 	mutex_init(&ft->lock);
890 
891 	return ft;
892 }
893 
894 /* If reverse is false, then we search for the first flow table in the
895  * root sub-tree from start(closest from right), else we search for the
896  * last flow table in the root sub-tree till start(closest from left).
897  */
find_closest_ft_recursive(struct fs_node * root,struct list_head * start,bool reverse)898 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
899 							 struct list_head *start,
900 							 bool reverse)
901 {
902 #define list_advance_entry(pos, reverse)		\
903 	((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
904 
905 #define list_for_each_advance_continue(pos, head, reverse)	\
906 	for (pos = list_advance_entry(pos, reverse);		\
907 	     &pos->list != (head);				\
908 	     pos = list_advance_entry(pos, reverse))
909 
910 	struct fs_node *iter = list_entry(start, struct fs_node, list);
911 	struct mlx5_flow_table *ft = NULL;
912 
913 	if (!root)
914 		return NULL;
915 
916 	list_for_each_advance_continue(iter, &root->children, reverse) {
917 		if (iter->type == FS_TYPE_FLOW_TABLE) {
918 			fs_get_obj(ft, iter);
919 			return ft;
920 		}
921 		ft = find_closest_ft_recursive(iter, &iter->children, reverse);
922 		if (ft)
923 			return ft;
924 	}
925 
926 	return ft;
927 }
928 
find_prio_chains_parent(struct fs_node * parent,struct fs_node ** child)929 static struct fs_node *find_prio_chains_parent(struct fs_node *parent,
930 					       struct fs_node **child)
931 {
932 	struct fs_node *node = NULL;
933 
934 	while (parent && parent->type != FS_TYPE_PRIO_CHAINS) {
935 		node = parent;
936 		parent = parent->parent;
937 	}
938 
939 	if (child)
940 		*child = node;
941 
942 	return parent;
943 }
944 
945 /* If reverse is false then return the first flow table next to the passed node
946  * in the tree, else return the last flow table before the node in the tree.
947  * If skip is true, skip the flow tables in the same prio_chains prio.
948  */
find_closest_ft(struct fs_node * node,bool reverse,bool skip)949 static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse,
950 					       bool skip)
951 {
952 	struct fs_node *prio_chains_parent = NULL;
953 	struct mlx5_flow_table *ft = NULL;
954 	struct fs_node *curr_node;
955 	struct fs_node *parent;
956 
957 	if (skip)
958 		prio_chains_parent = find_prio_chains_parent(node, NULL);
959 	parent = node->parent;
960 	curr_node = node;
961 	while (!ft && parent) {
962 		if (parent != prio_chains_parent)
963 			ft = find_closest_ft_recursive(parent, &curr_node->list,
964 						       reverse);
965 		curr_node = parent;
966 		parent = curr_node->parent;
967 	}
968 	return ft;
969 }
970 
971 /* Assuming all the tree is locked by mutex chain lock */
find_next_chained_ft(struct fs_node * node)972 static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node)
973 {
974 	return find_closest_ft(node, false, true);
975 }
976 
977 /* Assuming all the tree is locked by mutex chain lock */
find_prev_chained_ft(struct fs_node * node)978 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node)
979 {
980 	return find_closest_ft(node, true, true);
981 }
982 
find_next_fwd_ft(struct mlx5_flow_table * ft,struct mlx5_flow_act * flow_act)983 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
984 						struct mlx5_flow_act *flow_act)
985 {
986 	struct fs_prio *prio;
987 	bool next_ns;
988 
989 	next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
990 	fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
991 
992 	return find_next_chained_ft(&prio->node);
993 }
994 
connect_fts_in_prio(struct mlx5_core_dev * dev,struct fs_prio * prio,struct mlx5_flow_table * ft)995 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
996 			       struct fs_prio *prio,
997 			       struct mlx5_flow_table *ft)
998 {
999 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
1000 	struct mlx5_flow_table *iter;
1001 	int err;
1002 
1003 	fs_for_each_ft(iter, prio) {
1004 		err = root->cmds->modify_flow_table(root, iter, ft);
1005 		if (err) {
1006 			mlx5_core_err(dev,
1007 				      "Failed to modify flow table id %d, type %d, err %d\n",
1008 				      iter->id, iter->type, err);
1009 			/* The driver is out of sync with the FW */
1010 			return err;
1011 		}
1012 	}
1013 	return 0;
1014 }
1015 
find_closet_ft_prio_chains(struct fs_node * node,struct fs_node * parent,struct fs_node ** child,bool reverse)1016 static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node,
1017 							  struct fs_node *parent,
1018 							  struct fs_node **child,
1019 							  bool reverse)
1020 {
1021 	struct mlx5_flow_table *ft;
1022 
1023 	ft = find_closest_ft(node, reverse, false);
1024 
1025 	if (ft && parent == find_prio_chains_parent(&ft->node, child))
1026 		return ft;
1027 
1028 	return NULL;
1029 }
1030 
1031 /* Connect flow tables from previous priority of prio to ft */
connect_prev_fts(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)1032 static int connect_prev_fts(struct mlx5_core_dev *dev,
1033 			    struct mlx5_flow_table *ft,
1034 			    struct fs_prio *prio)
1035 {
1036 	struct fs_node *prio_parent, *parent = NULL, *child, *node;
1037 	struct mlx5_flow_table *prev_ft;
1038 	int err = 0;
1039 
1040 	prio_parent = find_prio_chains_parent(&prio->node, &child);
1041 
1042 	/* return directly if not under the first sub ns of prio_chains prio */
1043 	if (prio_parent && !list_is_first(&child->list, &prio_parent->children))
1044 		return 0;
1045 
1046 	prev_ft = find_prev_chained_ft(&prio->node);
1047 	while (prev_ft) {
1048 		struct fs_prio *prev_prio;
1049 
1050 		fs_get_obj(prev_prio, prev_ft->node.parent);
1051 		err = connect_fts_in_prio(dev, prev_prio, ft);
1052 		if (err)
1053 			break;
1054 
1055 		if (!parent) {
1056 			parent = find_prio_chains_parent(&prev_prio->node, &child);
1057 			if (!parent)
1058 				break;
1059 		}
1060 
1061 		node = child;
1062 		prev_ft = find_closet_ft_prio_chains(node, parent, &child, true);
1063 	}
1064 	return err;
1065 }
1066 
update_root_ft_create(struct mlx5_flow_table * ft,struct fs_prio * prio)1067 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
1068 				 *prio)
1069 {
1070 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
1071 	struct mlx5_ft_underlay_qp *uqp;
1072 	int min_level = INT_MAX;
1073 	int err = 0;
1074 	u32 qpn;
1075 
1076 	if (root->root_ft)
1077 		min_level = root->root_ft->level;
1078 
1079 	if (ft->level >= min_level)
1080 		return 0;
1081 
1082 	if (list_empty(&root->underlay_qpns)) {
1083 		/* Don't set any QPN (zero) in case QPN list is empty */
1084 		qpn = 0;
1085 		err = root->cmds->update_root_ft(root, ft, qpn, false);
1086 	} else {
1087 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
1088 			qpn = uqp->qpn;
1089 			err = root->cmds->update_root_ft(root, ft,
1090 							 qpn, false);
1091 			if (err)
1092 				break;
1093 		}
1094 	}
1095 
1096 	if (err)
1097 		mlx5_core_warn(root->dev,
1098 			       "Update root flow table of id(%u) qpn(%d) failed\n",
1099 			       ft->id, qpn);
1100 	else
1101 		root->root_ft = ft;
1102 
1103 	return err;
1104 }
1105 
_mlx5_modify_rule_destination(struct mlx5_flow_rule * rule,struct mlx5_flow_destination * dest)1106 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
1107 					 struct mlx5_flow_destination *dest)
1108 {
1109 	struct mlx5_flow_root_namespace *root;
1110 	struct mlx5_flow_table *ft;
1111 	struct mlx5_flow_group *fg;
1112 	struct fs_fte *fte;
1113 	int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1114 	int err = 0;
1115 
1116 	fs_get_obj(fte, rule->node.parent);
1117 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1118 		return -EINVAL;
1119 	down_write_ref_node(&fte->node, false);
1120 	fs_get_obj(fg, fte->node.parent);
1121 	fs_get_obj(ft, fg->node.parent);
1122 
1123 	memcpy(&rule->dest_attr, dest, sizeof(*dest));
1124 	root = find_root(&ft->node);
1125 	err = root->cmds->update_fte(root, ft, fg,
1126 				     modify_mask, fte);
1127 	up_write_ref_node(&fte->node, false);
1128 
1129 	return err;
1130 }
1131 
mlx5_modify_rule_destination(struct mlx5_flow_handle * handle,struct mlx5_flow_destination * new_dest,struct mlx5_flow_destination * old_dest)1132 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
1133 				 struct mlx5_flow_destination *new_dest,
1134 				 struct mlx5_flow_destination *old_dest)
1135 {
1136 	int i;
1137 
1138 	if (!old_dest) {
1139 		if (handle->num_rules != 1)
1140 			return -EINVAL;
1141 		return _mlx5_modify_rule_destination(handle->rule[0],
1142 						     new_dest);
1143 	}
1144 
1145 	for (i = 0; i < handle->num_rules; i++) {
1146 		if (mlx5_flow_dests_cmp(old_dest, &handle->rule[i]->dest_attr))
1147 			return _mlx5_modify_rule_destination(handle->rule[i],
1148 							     new_dest);
1149 	}
1150 
1151 	return -EINVAL;
1152 }
1153 
1154 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft  */
connect_fwd_rules(struct mlx5_core_dev * dev,struct mlx5_flow_table * new_next_ft,struct mlx5_flow_table * old_next_ft)1155 static int connect_fwd_rules(struct mlx5_core_dev *dev,
1156 			     struct mlx5_flow_table *new_next_ft,
1157 			     struct mlx5_flow_table *old_next_ft)
1158 {
1159 	struct mlx5_flow_destination dest = {};
1160 	struct mlx5_flow_rule *iter;
1161 	int err = 0;
1162 
1163 	/* new_next_ft and old_next_ft could be NULL only
1164 	 * when we create/destroy the anchor flow table.
1165 	 */
1166 	if (!new_next_ft || !old_next_ft)
1167 		return 0;
1168 
1169 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1170 	dest.ft = new_next_ft;
1171 
1172 	mutex_lock(&old_next_ft->lock);
1173 	list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1174 	mutex_unlock(&old_next_ft->lock);
1175 	list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1176 		if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1177 		    iter->ft->ns == new_next_ft->ns)
1178 			continue;
1179 
1180 		err = _mlx5_modify_rule_destination(iter, &dest);
1181 		if (err)
1182 			pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1183 			       new_next_ft->id);
1184 	}
1185 	return 0;
1186 }
1187 
connect_flow_table(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)1188 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1189 			      struct fs_prio *prio)
1190 {
1191 	struct mlx5_flow_table *next_ft, *first_ft;
1192 	int err = 0;
1193 
1194 	/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1195 
1196 	first_ft = list_first_entry_or_null(&prio->node.children,
1197 					    struct mlx5_flow_table, node.list);
1198 	if (!first_ft || first_ft->level > ft->level) {
1199 		err = connect_prev_fts(dev, ft, prio);
1200 		if (err)
1201 			return err;
1202 
1203 		next_ft = first_ft ? first_ft : find_next_chained_ft(&prio->node);
1204 		err = connect_fwd_rules(dev, ft, next_ft);
1205 		if (err)
1206 			return err;
1207 	}
1208 
1209 	if (MLX5_CAP_FLOWTABLE(dev,
1210 			       flow_table_properties_nic_receive.modify_root))
1211 		err = update_root_ft_create(ft, prio);
1212 	return err;
1213 }
1214 
list_add_flow_table(struct mlx5_flow_table * ft,struct fs_prio * prio)1215 static void list_add_flow_table(struct mlx5_flow_table *ft,
1216 				struct fs_prio *prio)
1217 {
1218 	struct list_head *prev = &prio->node.children;
1219 	struct mlx5_flow_table *iter;
1220 
1221 	fs_for_each_ft(iter, prio) {
1222 		if (iter->level > ft->level)
1223 			break;
1224 		prev = &iter->node.list;
1225 	}
1226 	list_add(&ft->node.list, prev);
1227 }
1228 
__mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,enum fs_flow_table_op_mod op_mod,u16 vport)1229 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1230 							struct mlx5_flow_table_attr *ft_attr,
1231 							enum fs_flow_table_op_mod op_mod,
1232 							u16 vport)
1233 {
1234 	struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1235 	bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1236 	struct mlx5_flow_table *next_ft;
1237 	struct fs_prio *fs_prio = NULL;
1238 	struct mlx5_flow_table *ft;
1239 	int err;
1240 
1241 	if (!root) {
1242 		pr_err("mlx5: flow steering failed to find root of namespace\n");
1243 		return ERR_PTR(-ENODEV);
1244 	}
1245 
1246 	mutex_lock(&root->chain_lock);
1247 	fs_prio = find_prio(ns, ft_attr->prio);
1248 	if (!fs_prio) {
1249 		err = -EINVAL;
1250 		goto unlock_root;
1251 	}
1252 	if (!unmanaged) {
1253 		/* The level is related to the
1254 		 * priority level range.
1255 		 */
1256 		if (ft_attr->level >= fs_prio->num_levels) {
1257 			err = -ENOSPC;
1258 			goto unlock_root;
1259 		}
1260 
1261 		ft_attr->level += fs_prio->start_level;
1262 	}
1263 
1264 	/* The level is related to the
1265 	 * priority level range.
1266 	 */
1267 	ft = alloc_flow_table(ft_attr->level,
1268 			      vport,
1269 			      root->table_type,
1270 			      op_mod, ft_attr->flags);
1271 	if (IS_ERR(ft)) {
1272 		err = PTR_ERR(ft);
1273 		goto unlock_root;
1274 	}
1275 
1276 	tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1277 	next_ft = unmanaged ? ft_attr->next_ft :
1278 			      find_next_chained_ft(&fs_prio->node);
1279 	ft->def_miss_action = ns->def_miss_action;
1280 	ft->ns = ns;
1281 	err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
1282 	if (err)
1283 		goto free_ft;
1284 
1285 	if (!unmanaged) {
1286 		err = connect_flow_table(root->dev, ft, fs_prio);
1287 		if (err)
1288 			goto destroy_ft;
1289 	}
1290 
1291 	ft->node.active = true;
1292 	down_write_ref_node(&fs_prio->node, false);
1293 	if (!unmanaged) {
1294 		tree_add_node(&ft->node, &fs_prio->node);
1295 		list_add_flow_table(ft, fs_prio);
1296 	} else {
1297 		ft->node.root = fs_prio->node.root;
1298 	}
1299 	fs_prio->num_ft++;
1300 	up_write_ref_node(&fs_prio->node, false);
1301 	mutex_unlock(&root->chain_lock);
1302 	trace_mlx5_fs_add_ft(ft);
1303 	return ft;
1304 destroy_ft:
1305 	root->cmds->destroy_flow_table(root, ft);
1306 free_ft:
1307 	rhltable_destroy(&ft->fgs_hash);
1308 	kfree(ft);
1309 unlock_root:
1310 	mutex_unlock(&root->chain_lock);
1311 	return ERR_PTR(err);
1312 }
1313 
mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1314 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1315 					       struct mlx5_flow_table_attr *ft_attr)
1316 {
1317 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1318 }
1319 EXPORT_SYMBOL(mlx5_create_flow_table);
1320 
mlx5_flow_table_id(struct mlx5_flow_table * ft)1321 u32 mlx5_flow_table_id(struct mlx5_flow_table *ft)
1322 {
1323 	return ft->id;
1324 }
1325 EXPORT_SYMBOL(mlx5_flow_table_id);
1326 
1327 struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,u16 vport)1328 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1329 			     struct mlx5_flow_table_attr *ft_attr, u16 vport)
1330 {
1331 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1332 }
1333 
1334 struct mlx5_flow_table*
mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace * ns,int prio,u32 level)1335 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1336 				 int prio, u32 level)
1337 {
1338 	struct mlx5_flow_table_attr ft_attr = {};
1339 
1340 	ft_attr.level = level;
1341 	ft_attr.prio  = prio;
1342 	ft_attr.max_fte = 1;
1343 
1344 	return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1345 }
1346 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1347 
1348 #define MAX_FLOW_GROUP_SIZE BIT(24)
1349 struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1350 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1351 				    struct mlx5_flow_table_attr *ft_attr)
1352 {
1353 	int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1354 	int max_num_groups = ft_attr->autogroup.max_num_groups;
1355 	struct mlx5_flow_table *ft;
1356 	int autogroups_max_fte;
1357 
1358 	ft = mlx5_create_flow_table(ns, ft_attr);
1359 	if (IS_ERR(ft))
1360 		return ft;
1361 
1362 	autogroups_max_fte = ft->max_fte - num_reserved_entries;
1363 	if (max_num_groups > autogroups_max_fte)
1364 		goto err_validate;
1365 	if (num_reserved_entries > ft->max_fte)
1366 		goto err_validate;
1367 
1368 	/* Align the number of groups according to the largest group size */
1369 	if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1370 		max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1371 
1372 	ft->autogroup.active = true;
1373 	ft->autogroup.required_groups = max_num_groups;
1374 	ft->autogroup.max_fte = autogroups_max_fte;
1375 	/* We save place for flow groups in addition to max types */
1376 	ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1377 
1378 	return ft;
1379 
1380 err_validate:
1381 	mlx5_destroy_flow_table(ft);
1382 	return ERR_PTR(-ENOSPC);
1383 }
1384 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1385 
mlx5_create_flow_group(struct mlx5_flow_table * ft,u32 * fg_in)1386 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1387 					       u32 *fg_in)
1388 {
1389 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1390 	void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1391 					    fg_in, match_criteria);
1392 	u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1393 					    fg_in,
1394 					    match_criteria_enable);
1395 	int start_index = MLX5_GET(create_flow_group_in, fg_in,
1396 				   start_flow_index);
1397 	int end_index = MLX5_GET(create_flow_group_in, fg_in,
1398 				 end_flow_index);
1399 	struct mlx5_flow_group *fg;
1400 	int err;
1401 
1402 	if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1403 		return ERR_PTR(-EPERM);
1404 
1405 	down_write_ref_node(&ft->node, false);
1406 	fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1407 				     start_index, end_index,
1408 				     ft->node.children.prev);
1409 	up_write_ref_node(&ft->node, false);
1410 	if (IS_ERR(fg))
1411 		return fg;
1412 
1413 	err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1414 	if (err) {
1415 		tree_put_node(&fg->node, false);
1416 		return ERR_PTR(err);
1417 	}
1418 	trace_mlx5_fs_add_fg(fg);
1419 	fg->node.active = true;
1420 
1421 	return fg;
1422 }
1423 EXPORT_SYMBOL(mlx5_create_flow_group);
1424 
alloc_rule(struct mlx5_flow_destination * dest)1425 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1426 {
1427 	struct mlx5_flow_rule *rule;
1428 
1429 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1430 	if (!rule)
1431 		return NULL;
1432 
1433 	INIT_LIST_HEAD(&rule->next_ft);
1434 	rule->node.type = FS_TYPE_FLOW_DEST;
1435 	if (dest)
1436 		memcpy(&rule->dest_attr, dest, sizeof(*dest));
1437 	else
1438 		rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE;
1439 
1440 	return rule;
1441 }
1442 
alloc_handle(int num_rules)1443 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1444 {
1445 	struct mlx5_flow_handle *handle;
1446 
1447 	handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1448 	if (!handle)
1449 		return NULL;
1450 
1451 	handle->num_rules = num_rules;
1452 
1453 	return handle;
1454 }
1455 
destroy_flow_handle(struct fs_fte * fte,struct mlx5_flow_handle * handle,struct mlx5_flow_destination * dest,int i)1456 static void destroy_flow_handle(struct fs_fte *fte,
1457 				struct mlx5_flow_handle *handle,
1458 				struct mlx5_flow_destination *dest,
1459 				int i)
1460 {
1461 	for (; --i >= 0;) {
1462 		if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1463 			fte->dests_size--;
1464 			list_del(&handle->rule[i]->node.list);
1465 			kfree(handle->rule[i]);
1466 		}
1467 	}
1468 	kfree(handle);
1469 }
1470 
1471 static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte * fte,struct mlx5_flow_destination * dest,int dest_num,int * modify_mask,bool * new_rule)1472 create_flow_handle(struct fs_fte *fte,
1473 		   struct mlx5_flow_destination *dest,
1474 		   int dest_num,
1475 		   int *modify_mask,
1476 		   bool *new_rule)
1477 {
1478 	struct mlx5_flow_handle *handle;
1479 	struct mlx5_flow_rule *rule = NULL;
1480 	static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1481 	static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1482 	int type;
1483 	int i = 0;
1484 
1485 	handle = alloc_handle((dest_num) ? dest_num : 1);
1486 	if (!handle)
1487 		return ERR_PTR(-ENOMEM);
1488 
1489 	do {
1490 		if (dest) {
1491 			rule = find_flow_rule(fte, dest + i);
1492 			if (rule) {
1493 				refcount_inc(&rule->node.refcount);
1494 				goto rule_found;
1495 			}
1496 		}
1497 
1498 		*new_rule = true;
1499 		rule = alloc_rule(dest + i);
1500 		if (!rule)
1501 			goto free_rules;
1502 
1503 		/* Add dest to dests list- we need flow tables to be in the
1504 		 * end of the list for forward to next prio rules.
1505 		 */
1506 		tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1507 		if (dest &&
1508 		    dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1509 			list_add(&rule->node.list, &fte->node.children);
1510 		else
1511 			list_add_tail(&rule->node.list, &fte->node.children);
1512 		if (dest) {
1513 			fte->dests_size++;
1514 
1515 			if (is_fwd_dest_type(dest[i].type))
1516 				fte->fwd_dests++;
1517 
1518 			type = dest[i].type ==
1519 				MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1520 			*modify_mask |= type ? count : dst;
1521 		}
1522 rule_found:
1523 		handle->rule[i] = rule;
1524 	} while (++i < dest_num);
1525 
1526 	return handle;
1527 
1528 free_rules:
1529 	destroy_flow_handle(fte, handle, dest, i);
1530 	return ERR_PTR(-ENOMEM);
1531 }
1532 
1533 /* fte should not be deleted while calling this function */
1534 static struct mlx5_flow_handle *
add_rule_fte(struct fs_fte * fte,struct mlx5_flow_group * fg,struct mlx5_flow_destination * dest,int dest_num,bool update_action)1535 add_rule_fte(struct fs_fte *fte,
1536 	     struct mlx5_flow_group *fg,
1537 	     struct mlx5_flow_destination *dest,
1538 	     int dest_num,
1539 	     bool update_action)
1540 {
1541 	struct mlx5_flow_root_namespace *root;
1542 	struct mlx5_flow_handle *handle;
1543 	struct mlx5_flow_table *ft;
1544 	int modify_mask = 0;
1545 	int err;
1546 	bool new_rule = false;
1547 
1548 	handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1549 				    &new_rule);
1550 	if (IS_ERR(handle) || !new_rule)
1551 		goto out;
1552 
1553 	if (update_action)
1554 		modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1555 
1556 	fs_get_obj(ft, fg->node.parent);
1557 	root = find_root(&fg->node);
1558 	if (!(fte->status & FS_FTE_STATUS_EXISTING))
1559 		err = root->cmds->create_fte(root, ft, fg, fte);
1560 	else
1561 		err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1562 	if (err)
1563 		goto free_handle;
1564 
1565 	fte->node.active = true;
1566 	fte->status |= FS_FTE_STATUS_EXISTING;
1567 	atomic_inc(&fg->node.version);
1568 
1569 out:
1570 	return handle;
1571 
1572 free_handle:
1573 	destroy_flow_handle(fte, handle, dest, handle->num_rules);
1574 	return ERR_PTR(err);
1575 }
1576 
alloc_auto_flow_group(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec)1577 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table  *ft,
1578 						     const struct mlx5_flow_spec *spec)
1579 {
1580 	struct list_head *prev = &ft->node.children;
1581 	u32 max_fte = ft->autogroup.max_fte;
1582 	unsigned int candidate_index = 0;
1583 	unsigned int group_size = 0;
1584 	struct mlx5_flow_group *fg;
1585 
1586 	if (!ft->autogroup.active)
1587 		return ERR_PTR(-ENOENT);
1588 
1589 	if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1590 		group_size = ft->autogroup.group_size;
1591 
1592 	/*  max_fte == ft->autogroup.max_types */
1593 	if (group_size == 0)
1594 		group_size = 1;
1595 
1596 	/* sorted by start_index */
1597 	fs_for_each_fg(fg, ft) {
1598 		if (candidate_index + group_size > fg->start_index)
1599 			candidate_index = fg->start_index + fg->max_ftes;
1600 		else
1601 			break;
1602 		prev = &fg->node.list;
1603 	}
1604 
1605 	if (candidate_index + group_size > max_fte)
1606 		return ERR_PTR(-ENOSPC);
1607 
1608 	fg = alloc_insert_flow_group(ft,
1609 				     spec->match_criteria_enable,
1610 				     spec->match_criteria,
1611 				     candidate_index,
1612 				     candidate_index + group_size - 1,
1613 				     prev);
1614 	if (IS_ERR(fg))
1615 		goto out;
1616 
1617 	if (group_size == ft->autogroup.group_size)
1618 		ft->autogroup.num_groups++;
1619 
1620 out:
1621 	return fg;
1622 }
1623 
create_auto_flow_group(struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)1624 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1625 				  struct mlx5_flow_group *fg)
1626 {
1627 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1628 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1629 	void *match_criteria_addr;
1630 	u8 src_esw_owner_mask_on;
1631 	void *misc;
1632 	int err;
1633 	u32 *in;
1634 
1635 	in = kvzalloc(inlen, GFP_KERNEL);
1636 	if (!in)
1637 		return -ENOMEM;
1638 
1639 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1640 		 fg->mask.match_criteria_enable);
1641 	MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1642 	MLX5_SET(create_flow_group_in, in, end_flow_index,   fg->start_index +
1643 		 fg->max_ftes - 1);
1644 
1645 	misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1646 			    misc_parameters);
1647 	src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1648 					 source_eswitch_owner_vhca_id);
1649 	MLX5_SET(create_flow_group_in, in,
1650 		 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1651 
1652 	match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1653 					   in, match_criteria);
1654 	memcpy(match_criteria_addr, fg->mask.match_criteria,
1655 	       sizeof(fg->mask.match_criteria));
1656 
1657 	err = root->cmds->create_flow_group(root, ft, in, fg);
1658 	if (!err) {
1659 		fg->node.active = true;
1660 		trace_mlx5_fs_add_fg(fg);
1661 	}
1662 
1663 	kvfree(in);
1664 	return err;
1665 }
1666 
mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat * p1,struct mlx5_pkt_reformat * p2)1667 static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
1668 				  struct mlx5_pkt_reformat *p2)
1669 {
1670 	return p1->owner == p2->owner &&
1671 		(p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
1672 		 p1->id == p2->id :
1673 		 mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
1674 		 mlx5_fs_dr_action_get_pkt_reformat_id(p2));
1675 }
1676 
mlx5_flow_dests_cmp(struct mlx5_flow_destination * d1,struct mlx5_flow_destination * d2)1677 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1678 				struct mlx5_flow_destination *d2)
1679 {
1680 	if (d1->type == d2->type) {
1681 		if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
1682 		      d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
1683 		     d1->vport.num == d2->vport.num &&
1684 		     d1->vport.flags == d2->vport.flags &&
1685 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1686 		      (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1687 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1688 		      mlx5_pkt_reformat_cmp(d1->vport.pkt_reformat,
1689 					    d2->vport.pkt_reformat) : true)) ||
1690 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1691 		     d1->ft == d2->ft) ||
1692 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1693 		     d1->tir_num == d2->tir_num) ||
1694 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1695 		     d1->ft_num == d2->ft_num) ||
1696 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
1697 		     d1->sampler_id == d2->sampler_id) ||
1698 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_RANGE &&
1699 		     d1->range.field == d2->range.field &&
1700 		     d1->range.hit_ft == d2->range.hit_ft &&
1701 		     d1->range.miss_ft == d2->range.miss_ft &&
1702 		     d1->range.min == d2->range.min &&
1703 		     d1->range.max == d2->range.max))
1704 			return true;
1705 	}
1706 
1707 	return false;
1708 }
1709 
find_flow_rule(struct fs_fte * fte,struct mlx5_flow_destination * dest)1710 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1711 					     struct mlx5_flow_destination *dest)
1712 {
1713 	struct mlx5_flow_rule *rule;
1714 
1715 	list_for_each_entry(rule, &fte->node.children, node.list) {
1716 		if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1717 			return rule;
1718 	}
1719 	return NULL;
1720 }
1721 
check_conflicting_actions_vlan(const struct mlx5_fs_vlan * vlan0,const struct mlx5_fs_vlan * vlan1)1722 static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
1723 					   const struct mlx5_fs_vlan *vlan1)
1724 {
1725 	return vlan0->ethtype != vlan1->ethtype ||
1726 	       vlan0->vid != vlan1->vid ||
1727 	       vlan0->prio != vlan1->prio;
1728 }
1729 
check_conflicting_actions(const struct mlx5_flow_act * act1,const struct mlx5_flow_act * act2)1730 static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
1731 				      const struct mlx5_flow_act *act2)
1732 {
1733 	u32 action1 = act1->action;
1734 	u32 action2 = act2->action;
1735 	u32 xored_actions;
1736 
1737 	xored_actions = action1 ^ action2;
1738 
1739 	/* if one rule only wants to count, it's ok */
1740 	if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1741 	    action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1742 		return false;
1743 
1744 	if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP  |
1745 			     MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1746 			     MLX5_FLOW_CONTEXT_ACTION_DECAP |
1747 			     MLX5_FLOW_CONTEXT_ACTION_MOD_HDR  |
1748 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1749 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1750 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1751 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1752 		return true;
1753 
1754 	if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
1755 	    act1->pkt_reformat != act2->pkt_reformat)
1756 		return true;
1757 
1758 	if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1759 	    act1->modify_hdr != act2->modify_hdr)
1760 		return true;
1761 
1762 	if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
1763 	    check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
1764 		return true;
1765 
1766 	if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
1767 	    check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
1768 		return true;
1769 
1770 	return false;
1771 }
1772 
check_conflicting_ftes(struct fs_fte * fte,const struct mlx5_flow_context * flow_context,const struct mlx5_flow_act * flow_act)1773 static int check_conflicting_ftes(struct fs_fte *fte,
1774 				  const struct mlx5_flow_context *flow_context,
1775 				  const struct mlx5_flow_act *flow_act)
1776 {
1777 	if (check_conflicting_actions(flow_act, &fte->action)) {
1778 		mlx5_core_warn(get_dev(&fte->node),
1779 			       "Found two FTEs with conflicting actions\n");
1780 		return -EEXIST;
1781 	}
1782 
1783 	if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1784 	    fte->flow_context.flow_tag != flow_context->flow_tag) {
1785 		mlx5_core_warn(get_dev(&fte->node),
1786 			       "FTE flow tag %u already exists with different flow tag %u\n",
1787 			       fte->flow_context.flow_tag,
1788 			       flow_context->flow_tag);
1789 		return -EEXIST;
1790 	}
1791 
1792 	return 0;
1793 }
1794 
add_rule_fg(struct mlx5_flow_group * fg,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,struct fs_fte * fte)1795 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1796 					    const struct mlx5_flow_spec *spec,
1797 					    struct mlx5_flow_act *flow_act,
1798 					    struct mlx5_flow_destination *dest,
1799 					    int dest_num,
1800 					    struct fs_fte *fte)
1801 {
1802 	struct mlx5_flow_handle *handle;
1803 	int old_action;
1804 	int i;
1805 	int ret;
1806 
1807 	ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1808 	if (ret)
1809 		return ERR_PTR(ret);
1810 
1811 	old_action = fte->action.action;
1812 	fte->action.action |= flow_act->action;
1813 	handle = add_rule_fte(fte, fg, dest, dest_num,
1814 			      old_action != flow_act->action);
1815 	if (IS_ERR(handle)) {
1816 		fte->action.action = old_action;
1817 		return handle;
1818 	}
1819 	trace_mlx5_fs_set_fte(fte, false);
1820 
1821 	/* Link newly added rules into the tree. */
1822 	for (i = 0; i < handle->num_rules; i++) {
1823 		if (!handle->rule[i]->node.parent) {
1824 			tree_add_node(&handle->rule[i]->node, &fte->node);
1825 			trace_mlx5_fs_add_rule(handle->rule[i]);
1826 		}
1827 	}
1828 	return handle;
1829 }
1830 
counter_is_valid(u32 action)1831 static bool counter_is_valid(u32 action)
1832 {
1833 	return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1834 			  MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1835 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1836 }
1837 
dest_is_valid(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_flow_table * ft)1838 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1839 			  struct mlx5_flow_act *flow_act,
1840 			  struct mlx5_flow_table *ft)
1841 {
1842 	bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1843 	u32 action = flow_act->action;
1844 
1845 	if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1846 		return counter_is_valid(action);
1847 
1848 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1849 		return true;
1850 
1851 	if (ignore_level) {
1852 		if (ft->type != FS_FT_FDB &&
1853 		    ft->type != FS_FT_NIC_RX &&
1854 		    ft->type != FS_FT_NIC_TX)
1855 			return false;
1856 
1857 		if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1858 		    ft->type != dest->ft->type)
1859 			return false;
1860 	}
1861 
1862 	if (!dest || ((dest->type ==
1863 	    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1864 	    (dest->ft->level <= ft->level && !ignore_level)))
1865 		return false;
1866 	return true;
1867 }
1868 
1869 struct match_list {
1870 	struct list_head	list;
1871 	struct mlx5_flow_group *g;
1872 };
1873 
free_match_list(struct match_list * head,bool ft_locked)1874 static void free_match_list(struct match_list *head, bool ft_locked)
1875 {
1876 	struct match_list *iter, *match_tmp;
1877 
1878 	list_for_each_entry_safe(iter, match_tmp, &head->list,
1879 				 list) {
1880 		tree_put_node(&iter->g->node, ft_locked);
1881 		list_del(&iter->list);
1882 		kfree(iter);
1883 	}
1884 }
1885 
build_match_list(struct match_list * match_head,struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_group * fg,bool ft_locked)1886 static int build_match_list(struct match_list *match_head,
1887 			    struct mlx5_flow_table *ft,
1888 			    const struct mlx5_flow_spec *spec,
1889 			    struct mlx5_flow_group *fg,
1890 			    bool ft_locked)
1891 {
1892 	struct rhlist_head *tmp, *list;
1893 	struct mlx5_flow_group *g;
1894 
1895 	rcu_read_lock();
1896 	INIT_LIST_HEAD(&match_head->list);
1897 	/* Collect all fgs which has a matching match_criteria */
1898 	list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1899 	/* RCU is atomic, we can't execute FW commands here */
1900 	rhl_for_each_entry_rcu(g, tmp, list, hash) {
1901 		struct match_list *curr_match;
1902 
1903 		if (fg && fg != g)
1904 			continue;
1905 
1906 		if (unlikely(!tree_get_node(&g->node)))
1907 			continue;
1908 
1909 		curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1910 		if (!curr_match) {
1911 			rcu_read_unlock();
1912 			free_match_list(match_head, ft_locked);
1913 			return -ENOMEM;
1914 		}
1915 		curr_match->g = g;
1916 		list_add_tail(&curr_match->list, &match_head->list);
1917 	}
1918 	rcu_read_unlock();
1919 	return 0;
1920 }
1921 
matched_fgs_get_version(struct list_head * match_head)1922 static u64 matched_fgs_get_version(struct list_head *match_head)
1923 {
1924 	struct match_list *iter;
1925 	u64 version = 0;
1926 
1927 	list_for_each_entry(iter, match_head, list)
1928 		version += (u64)atomic_read(&iter->g->node.version);
1929 	return version;
1930 }
1931 
1932 static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group * g,const u32 * match_value,bool take_write)1933 lookup_fte_locked(struct mlx5_flow_group *g,
1934 		  const u32 *match_value,
1935 		  bool take_write)
1936 {
1937 	struct fs_fte *fte_tmp;
1938 
1939 	if (take_write)
1940 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1941 	else
1942 		nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1943 	fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1944 					 rhash_fte);
1945 	if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1946 		fte_tmp = NULL;
1947 		goto out;
1948 	}
1949 
1950 	nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1951 
1952 	if (!fte_tmp->node.active) {
1953 		up_write_ref_node(&fte_tmp->node, false);
1954 
1955 		if (take_write)
1956 			up_write_ref_node(&g->node, false);
1957 		else
1958 			up_read_ref_node(&g->node);
1959 
1960 		tree_put_node(&fte_tmp->node, false);
1961 
1962 		return NULL;
1963 	}
1964 
1965 out:
1966 	if (take_write)
1967 		up_write_ref_node(&g->node, false);
1968 	else
1969 		up_read_ref_node(&g->node);
1970 	return fte_tmp;
1971 }
1972 
1973 static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table * ft,struct list_head * match_head,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,int ft_version)1974 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1975 		       struct list_head *match_head,
1976 		       const struct mlx5_flow_spec *spec,
1977 		       struct mlx5_flow_act *flow_act,
1978 		       struct mlx5_flow_destination *dest,
1979 		       int dest_num,
1980 		       int ft_version)
1981 {
1982 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1983 	struct mlx5_flow_group *g;
1984 	struct mlx5_flow_handle *rule;
1985 	struct match_list *iter;
1986 	bool take_write = false;
1987 	bool try_again = false;
1988 	struct fs_fte *fte;
1989 	u64  version = 0;
1990 	int err;
1991 
1992 	fte = alloc_fte(ft, spec, flow_act);
1993 	if (IS_ERR(fte))
1994 		return  ERR_PTR(-ENOMEM);
1995 
1996 search_again_locked:
1997 	if (flow_act->flags & FLOW_ACT_NO_APPEND)
1998 		goto skip_search;
1999 	version = matched_fgs_get_version(match_head);
2000 	/* Try to find an fte with identical match value and attempt update its
2001 	 * action.
2002 	 */
2003 	list_for_each_entry(iter, match_head, list) {
2004 		struct fs_fte *fte_tmp;
2005 
2006 		g = iter->g;
2007 		fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
2008 		if (!fte_tmp)
2009 			continue;
2010 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
2011 		/* No error check needed here, because insert_fte() is not called */
2012 		up_write_ref_node(&fte_tmp->node, false);
2013 		tree_put_node(&fte_tmp->node, false);
2014 		kmem_cache_free(steering->ftes_cache, fte);
2015 		return rule;
2016 	}
2017 
2018 skip_search:
2019 	/* No group with matching fte found, or we skipped the search.
2020 	 * Try to add a new fte to any matching fg.
2021 	 */
2022 
2023 	/* Check the ft version, for case that new flow group
2024 	 * was added while the fgs weren't locked
2025 	 */
2026 	if (atomic_read(&ft->node.version) != ft_version) {
2027 		rule = ERR_PTR(-EAGAIN);
2028 		goto out;
2029 	}
2030 
2031 	/* Check the fgs version. If version have changed it could be that an
2032 	 * FTE with the same match value was added while the fgs weren't
2033 	 * locked.
2034 	 */
2035 	if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
2036 	    version != matched_fgs_get_version(match_head)) {
2037 		take_write = true;
2038 		goto search_again_locked;
2039 	}
2040 
2041 	list_for_each_entry(iter, match_head, list) {
2042 		g = iter->g;
2043 
2044 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
2045 
2046 		if (!g->node.active) {
2047 			try_again = true;
2048 			up_write_ref_node(&g->node, false);
2049 			continue;
2050 		}
2051 
2052 		err = insert_fte(g, fte);
2053 		if (err) {
2054 			up_write_ref_node(&g->node, false);
2055 			if (err == -ENOSPC)
2056 				continue;
2057 			kmem_cache_free(steering->ftes_cache, fte);
2058 			return ERR_PTR(err);
2059 		}
2060 
2061 		nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
2062 		up_write_ref_node(&g->node, false);
2063 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
2064 		up_write_ref_node(&fte->node, false);
2065 		if (IS_ERR(rule))
2066 			tree_put_node(&fte->node, false);
2067 		return rule;
2068 	}
2069 	err = try_again ? -EAGAIN : -ENOENT;
2070 	rule = ERR_PTR(err);
2071 out:
2072 	kmem_cache_free(steering->ftes_cache, fte);
2073 	return rule;
2074 }
2075 
2076 static struct mlx5_flow_handle *
_mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num)2077 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
2078 		     const struct mlx5_flow_spec *spec,
2079 		     struct mlx5_flow_act *flow_act,
2080 		     struct mlx5_flow_destination *dest,
2081 		     int dest_num)
2082 
2083 {
2084 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
2085 	struct mlx5_flow_handle *rule;
2086 	struct match_list match_head;
2087 	struct mlx5_flow_group *g;
2088 	bool take_write = false;
2089 	struct fs_fte *fte;
2090 	int version;
2091 	int err;
2092 	int i;
2093 
2094 	if (!check_valid_spec(spec))
2095 		return ERR_PTR(-EINVAL);
2096 
2097 	if (flow_act->fg && ft->autogroup.active)
2098 		return ERR_PTR(-EINVAL);
2099 
2100 	if (dest && dest_num <= 0)
2101 		return ERR_PTR(-EINVAL);
2102 
2103 	for (i = 0; i < dest_num; i++) {
2104 		if (!dest_is_valid(&dest[i], flow_act, ft))
2105 			return ERR_PTR(-EINVAL);
2106 	}
2107 	nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
2108 search_again_locked:
2109 	version = atomic_read(&ft->node.version);
2110 
2111 	/* Collect all fgs which has a matching match_criteria */
2112 	err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
2113 	if (err) {
2114 		if (take_write)
2115 			up_write_ref_node(&ft->node, false);
2116 		else
2117 			up_read_ref_node(&ft->node);
2118 		return ERR_PTR(err);
2119 	}
2120 
2121 	if (!take_write)
2122 		up_read_ref_node(&ft->node);
2123 
2124 	rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
2125 				      dest_num, version);
2126 	free_match_list(&match_head, take_write);
2127 	if (!IS_ERR(rule) ||
2128 	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
2129 		if (take_write)
2130 			up_write_ref_node(&ft->node, false);
2131 		return rule;
2132 	}
2133 
2134 	if (!take_write) {
2135 		nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
2136 		take_write = true;
2137 	}
2138 
2139 	if (PTR_ERR(rule) == -EAGAIN ||
2140 	    version != atomic_read(&ft->node.version))
2141 		goto search_again_locked;
2142 
2143 	g = alloc_auto_flow_group(ft, spec);
2144 	if (IS_ERR(g)) {
2145 		rule = ERR_CAST(g);
2146 		up_write_ref_node(&ft->node, false);
2147 		return rule;
2148 	}
2149 
2150 	fte = alloc_fte(ft, spec, flow_act);
2151 	if (IS_ERR(fte)) {
2152 		up_write_ref_node(&ft->node, false);
2153 		err = PTR_ERR(fte);
2154 		goto err_alloc_fte;
2155 	}
2156 
2157 	nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
2158 	up_write_ref_node(&ft->node, false);
2159 
2160 	err = create_auto_flow_group(ft, g);
2161 	if (err)
2162 		goto err_release_fg;
2163 
2164 	err = insert_fte(g, fte);
2165 	if (err)
2166 		goto err_release_fg;
2167 
2168 	nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
2169 	up_write_ref_node(&g->node, false);
2170 	rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
2171 	up_write_ref_node(&fte->node, false);
2172 	if (IS_ERR(rule))
2173 		tree_put_node(&fte->node, false);
2174 	tree_put_node(&g->node, false);
2175 	return rule;
2176 
2177 err_release_fg:
2178 	up_write_ref_node(&g->node, false);
2179 	kmem_cache_free(steering->ftes_cache, fte);
2180 err_alloc_fte:
2181 	tree_put_node(&g->node, false);
2182 	return ERR_PTR(err);
2183 }
2184 
fwd_next_prio_supported(struct mlx5_flow_table * ft)2185 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
2186 {
2187 	return ((ft->type == FS_FT_NIC_RX) &&
2188 		(MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
2189 }
2190 
2191 struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int num_dest)2192 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
2193 		    const struct mlx5_flow_spec *spec,
2194 		    struct mlx5_flow_act *flow_act,
2195 		    struct mlx5_flow_destination *dest,
2196 		    int num_dest)
2197 {
2198 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2199 	static const struct mlx5_flow_spec zero_spec = {};
2200 	struct mlx5_flow_destination *gen_dest = NULL;
2201 	struct mlx5_flow_table *next_ft = NULL;
2202 	struct mlx5_flow_handle *handle = NULL;
2203 	u32 sw_action = flow_act->action;
2204 	int i;
2205 
2206 	if (!spec)
2207 		spec = &zero_spec;
2208 
2209 	if (!is_fwd_next_action(sw_action))
2210 		return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2211 
2212 	if (!fwd_next_prio_supported(ft))
2213 		return ERR_PTR(-EOPNOTSUPP);
2214 
2215 	mutex_lock(&root->chain_lock);
2216 	next_ft = find_next_fwd_ft(ft, flow_act);
2217 	if (!next_ft) {
2218 		handle = ERR_PTR(-EOPNOTSUPP);
2219 		goto unlock;
2220 	}
2221 
2222 	gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2223 			   GFP_KERNEL);
2224 	if (!gen_dest) {
2225 		handle = ERR_PTR(-ENOMEM);
2226 		goto unlock;
2227 	}
2228 	for (i = 0; i < num_dest; i++)
2229 		gen_dest[i] = dest[i];
2230 	gen_dest[i].type =
2231 		MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2232 	gen_dest[i].ft = next_ft;
2233 	dest = gen_dest;
2234 	num_dest++;
2235 	flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2236 			      MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
2237 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2238 	handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2239 	if (IS_ERR(handle))
2240 		goto unlock;
2241 
2242 	if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2243 		mutex_lock(&next_ft->lock);
2244 		list_add(&handle->rule[num_dest - 1]->next_ft,
2245 			 &next_ft->fwd_rules);
2246 		mutex_unlock(&next_ft->lock);
2247 		handle->rule[num_dest - 1]->sw_action = sw_action;
2248 		handle->rule[num_dest - 1]->ft = ft;
2249 	}
2250 unlock:
2251 	mutex_unlock(&root->chain_lock);
2252 	kfree(gen_dest);
2253 	return handle;
2254 }
2255 EXPORT_SYMBOL(mlx5_add_flow_rules);
2256 
mlx5_del_flow_rules(struct mlx5_flow_handle * handle)2257 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2258 {
2259 	struct fs_fte *fte;
2260 	int i;
2261 
2262 	/* In order to consolidate the HW changes we lock the FTE for other
2263 	 * changes, and increase its refcount, in order not to perform the
2264 	 * "del" functions of the FTE. Will handle them here.
2265 	 * The removal of the rules is done under locked FTE.
2266 	 * After removing all the handle's rules, if there are remaining
2267 	 * rules, it means we just need to modify the FTE in FW, and
2268 	 * unlock/decrease the refcount we increased before.
2269 	 * Otherwise, it means the FTE should be deleted. First delete the
2270 	 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2271 	 * the FTE, which will handle the last decrease of the refcount, as
2272 	 * well as required handling of its parent.
2273 	 */
2274 	fs_get_obj(fte, handle->rule[0]->node.parent);
2275 	down_write_ref_node(&fte->node, false);
2276 	for (i = handle->num_rules - 1; i >= 0; i--)
2277 		tree_remove_node(&handle->rule[i]->node, true);
2278 	if (list_empty(&fte->node.children)) {
2279 		fte->node.del_hw_func(&fte->node);
2280 		/* Avoid double call to del_hw_fte */
2281 		fte->node.del_hw_func = NULL;
2282 		up_write_ref_node(&fte->node, false);
2283 		tree_put_node(&fte->node, false);
2284 	} else if (fte->dests_size) {
2285 		if (fte->modify_mask)
2286 			modify_fte(fte);
2287 		up_write_ref_node(&fte->node, false);
2288 	} else {
2289 		up_write_ref_node(&fte->node, false);
2290 	}
2291 	kfree(handle);
2292 }
2293 EXPORT_SYMBOL(mlx5_del_flow_rules);
2294 
2295 /* Assuming prio->node.children(flow tables) is sorted by level */
find_next_ft(struct mlx5_flow_table * ft)2296 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2297 {
2298 	struct fs_node *prio_parent, *child;
2299 	struct fs_prio *prio;
2300 
2301 	fs_get_obj(prio, ft->node.parent);
2302 
2303 	if (!list_is_last(&ft->node.list, &prio->node.children))
2304 		return list_next_entry(ft, node.list);
2305 
2306 	prio_parent = find_prio_chains_parent(&prio->node, &child);
2307 
2308 	if (prio_parent && list_is_first(&child->list, &prio_parent->children))
2309 		return find_closest_ft(&prio->node, false, false);
2310 
2311 	return find_next_chained_ft(&prio->node);
2312 }
2313 
update_root_ft_destroy(struct mlx5_flow_table * ft)2314 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2315 {
2316 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2317 	struct mlx5_ft_underlay_qp *uqp;
2318 	struct mlx5_flow_table *new_root_ft = NULL;
2319 	int err = 0;
2320 	u32 qpn;
2321 
2322 	if (root->root_ft != ft)
2323 		return 0;
2324 
2325 	new_root_ft = find_next_ft(ft);
2326 	if (!new_root_ft) {
2327 		root->root_ft = NULL;
2328 		return 0;
2329 	}
2330 
2331 	if (list_empty(&root->underlay_qpns)) {
2332 		/* Don't set any QPN (zero) in case QPN list is empty */
2333 		qpn = 0;
2334 		err = root->cmds->update_root_ft(root, new_root_ft,
2335 						 qpn, false);
2336 	} else {
2337 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
2338 			qpn = uqp->qpn;
2339 			err = root->cmds->update_root_ft(root,
2340 							 new_root_ft, qpn,
2341 							 false);
2342 			if (err)
2343 				break;
2344 		}
2345 	}
2346 
2347 	if (err)
2348 		mlx5_core_warn(root->dev,
2349 			       "Update root flow table of id(%u) qpn(%d) failed\n",
2350 			       ft->id, qpn);
2351 	else
2352 		root->root_ft = new_root_ft;
2353 
2354 	return 0;
2355 }
2356 
2357 /* Connect flow table from previous priority to
2358  * the next flow table.
2359  */
disconnect_flow_table(struct mlx5_flow_table * ft)2360 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2361 {
2362 	struct mlx5_core_dev *dev = get_dev(&ft->node);
2363 	struct mlx5_flow_table *next_ft;
2364 	struct fs_prio *prio;
2365 	int err = 0;
2366 
2367 	err = update_root_ft_destroy(ft);
2368 	if (err)
2369 		return err;
2370 
2371 	fs_get_obj(prio, ft->node.parent);
2372 	if  (!(list_first_entry(&prio->node.children,
2373 				struct mlx5_flow_table,
2374 				node.list) == ft))
2375 		return 0;
2376 
2377 	next_ft = find_next_ft(ft);
2378 	err = connect_fwd_rules(dev, next_ft, ft);
2379 	if (err)
2380 		return err;
2381 
2382 	err = connect_prev_fts(dev, next_ft, prio);
2383 	if (err)
2384 		mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2385 			       ft->id);
2386 	return err;
2387 }
2388 
mlx5_destroy_flow_table(struct mlx5_flow_table * ft)2389 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2390 {
2391 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2392 	int err = 0;
2393 
2394 	mutex_lock(&root->chain_lock);
2395 	if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2396 		err = disconnect_flow_table(ft);
2397 	if (err) {
2398 		mutex_unlock(&root->chain_lock);
2399 		return err;
2400 	}
2401 	if (tree_remove_node(&ft->node, false))
2402 		mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2403 			       ft->id);
2404 	mutex_unlock(&root->chain_lock);
2405 
2406 	return err;
2407 }
2408 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2409 
mlx5_destroy_flow_group(struct mlx5_flow_group * fg)2410 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2411 {
2412 	if (tree_remove_node(&fg->node, false))
2413 		mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2414 			       fg->id);
2415 }
2416 EXPORT_SYMBOL(mlx5_destroy_flow_group);
2417 
mlx5_get_fdb_sub_ns(struct mlx5_core_dev * dev,int n)2418 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2419 						int n)
2420 {
2421 	struct mlx5_flow_steering *steering = dev->priv.steering;
2422 
2423 	if (!steering || !steering->fdb_sub_ns)
2424 		return NULL;
2425 
2426 	return steering->fdb_sub_ns[n];
2427 }
2428 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2429 
is_nic_rx_ns(enum mlx5_flow_namespace_type type)2430 static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
2431 {
2432 	switch (type) {
2433 	case MLX5_FLOW_NAMESPACE_BYPASS:
2434 	case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
2435 	case MLX5_FLOW_NAMESPACE_LAG:
2436 	case MLX5_FLOW_NAMESPACE_OFFLOADS:
2437 	case MLX5_FLOW_NAMESPACE_ETHTOOL:
2438 	case MLX5_FLOW_NAMESPACE_KERNEL:
2439 	case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2440 	case MLX5_FLOW_NAMESPACE_ANCHOR:
2441 		return true;
2442 	default:
2443 		return false;
2444 	}
2445 }
2446 
mlx5_get_flow_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type)2447 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2448 						    enum mlx5_flow_namespace_type type)
2449 {
2450 	struct mlx5_flow_steering *steering = dev->priv.steering;
2451 	struct mlx5_flow_root_namespace *root_ns;
2452 	int prio = 0;
2453 	struct fs_prio *fs_prio;
2454 	struct mlx5_flow_namespace *ns;
2455 
2456 	if (!steering)
2457 		return NULL;
2458 
2459 	switch (type) {
2460 	case MLX5_FLOW_NAMESPACE_FDB:
2461 		if (steering->fdb_root_ns)
2462 			return &steering->fdb_root_ns->ns;
2463 		return NULL;
2464 	case MLX5_FLOW_NAMESPACE_PORT_SEL:
2465 		if (steering->port_sel_root_ns)
2466 			return &steering->port_sel_root_ns->ns;
2467 		return NULL;
2468 	case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2469 		if (steering->sniffer_rx_root_ns)
2470 			return &steering->sniffer_rx_root_ns->ns;
2471 		return NULL;
2472 	case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2473 		if (steering->sniffer_tx_root_ns)
2474 			return &steering->sniffer_tx_root_ns->ns;
2475 		return NULL;
2476 	case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
2477 		root_ns = steering->fdb_root_ns;
2478 		prio =  FDB_BYPASS_PATH;
2479 		break;
2480 	case MLX5_FLOW_NAMESPACE_EGRESS:
2481 	case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
2482 	case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
2483 		root_ns = steering->egress_root_ns;
2484 		prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2485 		break;
2486 	case MLX5_FLOW_NAMESPACE_RDMA_RX:
2487 		root_ns = steering->rdma_rx_root_ns;
2488 		prio = RDMA_RX_BYPASS_PRIO;
2489 		break;
2490 	case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
2491 		root_ns = steering->rdma_rx_root_ns;
2492 		prio = RDMA_RX_KERNEL_PRIO;
2493 		break;
2494 	case MLX5_FLOW_NAMESPACE_RDMA_TX:
2495 		root_ns = steering->rdma_tx_root_ns;
2496 		prio = RDMA_TX_BYPASS_PRIO;
2497 		break;
2498 	case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
2499 		root_ns = steering->rdma_rx_root_ns;
2500 		prio = RDMA_RX_COUNTERS_PRIO;
2501 		break;
2502 	case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
2503 		root_ns = steering->rdma_tx_root_ns;
2504 		prio = RDMA_TX_COUNTERS_PRIO;
2505 		break;
2506 	case MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC:
2507 		root_ns = steering->rdma_rx_root_ns;
2508 		prio = RDMA_RX_IPSEC_PRIO;
2509 		break;
2510 	case MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC:
2511 		root_ns = steering->rdma_tx_root_ns;
2512 		prio = RDMA_TX_IPSEC_PRIO;
2513 		break;
2514 	case MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC:
2515 		root_ns = steering->rdma_rx_root_ns;
2516 		prio = RDMA_RX_MACSEC_PRIO;
2517 		break;
2518 	case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC:
2519 		root_ns = steering->rdma_tx_root_ns;
2520 		prio = RDMA_TX_MACSEC_PRIO;
2521 		break;
2522 	default: /* Must be NIC RX */
2523 		WARN_ON(!is_nic_rx_ns(type));
2524 		root_ns = steering->root_ns;
2525 		prio = type;
2526 		break;
2527 	}
2528 
2529 	if (!root_ns)
2530 		return NULL;
2531 
2532 	fs_prio = find_prio(&root_ns->ns, prio);
2533 	if (!fs_prio)
2534 		return NULL;
2535 
2536 	ns = list_first_entry(&fs_prio->node.children,
2537 			      typeof(*ns),
2538 			      node.list);
2539 
2540 	return ns;
2541 }
2542 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2543 
mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type,int vport)2544 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2545 							      enum mlx5_flow_namespace_type type,
2546 							      int vport)
2547 {
2548 	struct mlx5_flow_steering *steering = dev->priv.steering;
2549 
2550 	if (!steering)
2551 		return NULL;
2552 
2553 	switch (type) {
2554 	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2555 		if (vport >= steering->esw_egress_acl_vports)
2556 			return NULL;
2557 		if (steering->esw_egress_root_ns &&
2558 		    steering->esw_egress_root_ns[vport])
2559 			return &steering->esw_egress_root_ns[vport]->ns;
2560 		else
2561 			return NULL;
2562 	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2563 		if (vport >= steering->esw_ingress_acl_vports)
2564 			return NULL;
2565 		if (steering->esw_ingress_root_ns &&
2566 		    steering->esw_ingress_root_ns[vport])
2567 			return &steering->esw_ingress_root_ns[vport]->ns;
2568 		else
2569 			return NULL;
2570 	default:
2571 		return NULL;
2572 	}
2573 }
2574 
_fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels,enum fs_node_type type)2575 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2576 				       unsigned int prio,
2577 				       int num_levels,
2578 				       enum fs_node_type type)
2579 {
2580 	struct fs_prio *fs_prio;
2581 
2582 	fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2583 	if (!fs_prio)
2584 		return ERR_PTR(-ENOMEM);
2585 
2586 	fs_prio->node.type = type;
2587 	tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2588 	tree_add_node(&fs_prio->node, &ns->node);
2589 	fs_prio->num_levels = num_levels;
2590 	fs_prio->prio = prio;
2591 	list_add_tail(&fs_prio->node.list, &ns->node.children);
2592 
2593 	return fs_prio;
2594 }
2595 
fs_create_prio_chained(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2596 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2597 					      unsigned int prio,
2598 					      int num_levels)
2599 {
2600 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2601 }
2602 
fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2603 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2604 				      unsigned int prio, int num_levels)
2605 {
2606 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2607 }
2608 
fs_init_namespace(struct mlx5_flow_namespace * ns)2609 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2610 						     *ns)
2611 {
2612 	ns->node.type = FS_TYPE_NAMESPACE;
2613 
2614 	return ns;
2615 }
2616 
fs_create_namespace(struct fs_prio * prio,int def_miss_act)2617 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2618 						       int def_miss_act)
2619 {
2620 	struct mlx5_flow_namespace	*ns;
2621 
2622 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2623 	if (!ns)
2624 		return ERR_PTR(-ENOMEM);
2625 
2626 	fs_init_namespace(ns);
2627 	ns->def_miss_action = def_miss_act;
2628 	tree_init_node(&ns->node, NULL, del_sw_ns);
2629 	tree_add_node(&ns->node, &prio->node);
2630 	list_add_tail(&ns->node.list, &prio->node.children);
2631 
2632 	return ns;
2633 }
2634 
create_leaf_prios(struct mlx5_flow_namespace * ns,int prio,struct init_tree_node * prio_metadata)2635 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2636 			     struct init_tree_node *prio_metadata)
2637 {
2638 	struct fs_prio *fs_prio;
2639 	int i;
2640 
2641 	for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2642 		fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2643 		if (IS_ERR(fs_prio))
2644 			return PTR_ERR(fs_prio);
2645 	}
2646 	return 0;
2647 }
2648 
2649 #define FLOW_TABLE_BIT_SZ 1
2650 #define GET_FLOW_TABLE_CAP(dev, offset) \
2651 	((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) +	\
2652 			offset / 32)) >>					\
2653 	  (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
has_required_caps(struct mlx5_core_dev * dev,struct node_caps * caps)2654 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2655 {
2656 	int i;
2657 
2658 	for (i = 0; i < caps->arr_sz; i++) {
2659 		if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2660 			return false;
2661 	}
2662 	return true;
2663 }
2664 
init_root_tree_recursive(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node,struct init_tree_node * init_parent_node,int prio)2665 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2666 				    struct init_tree_node *init_node,
2667 				    struct fs_node *fs_parent_node,
2668 				    struct init_tree_node *init_parent_node,
2669 				    int prio)
2670 {
2671 	int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2672 					      flow_table_properties_nic_receive.
2673 					      max_ft_level);
2674 	struct mlx5_flow_namespace *fs_ns;
2675 	struct fs_prio *fs_prio;
2676 	struct fs_node *base;
2677 	int i;
2678 	int err;
2679 
2680 	if (init_node->type == FS_TYPE_PRIO) {
2681 		if ((init_node->min_ft_level > max_ft_level) ||
2682 		    !has_required_caps(steering->dev, &init_node->caps))
2683 			return 0;
2684 
2685 		fs_get_obj(fs_ns, fs_parent_node);
2686 		if (init_node->num_leaf_prios)
2687 			return create_leaf_prios(fs_ns, prio, init_node);
2688 		fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2689 		if (IS_ERR(fs_prio))
2690 			return PTR_ERR(fs_prio);
2691 		base = &fs_prio->node;
2692 	} else if (init_node->type == FS_TYPE_NAMESPACE) {
2693 		fs_get_obj(fs_prio, fs_parent_node);
2694 		fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2695 		if (IS_ERR(fs_ns))
2696 			return PTR_ERR(fs_ns);
2697 		base = &fs_ns->node;
2698 	} else {
2699 		return -EINVAL;
2700 	}
2701 	prio = 0;
2702 	for (i = 0; i < init_node->ar_size; i++) {
2703 		err = init_root_tree_recursive(steering, &init_node->children[i],
2704 					       base, init_node, prio);
2705 		if (err)
2706 			return err;
2707 		if (init_node->children[i].type == FS_TYPE_PRIO &&
2708 		    init_node->children[i].num_leaf_prios) {
2709 			prio += init_node->children[i].num_leaf_prios;
2710 		}
2711 	}
2712 
2713 	return 0;
2714 }
2715 
init_root_tree(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node)2716 static int init_root_tree(struct mlx5_flow_steering *steering,
2717 			  struct init_tree_node *init_node,
2718 			  struct fs_node *fs_parent_node)
2719 {
2720 	int err;
2721 	int i;
2722 
2723 	for (i = 0; i < init_node->ar_size; i++) {
2724 		err = init_root_tree_recursive(steering, &init_node->children[i],
2725 					       fs_parent_node,
2726 					       init_node, i);
2727 		if (err)
2728 			return err;
2729 	}
2730 	return 0;
2731 }
2732 
del_sw_root_ns(struct fs_node * node)2733 static void del_sw_root_ns(struct fs_node *node)
2734 {
2735 	struct mlx5_flow_root_namespace *root_ns;
2736 	struct mlx5_flow_namespace *ns;
2737 
2738 	fs_get_obj(ns, node);
2739 	root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2740 	mutex_destroy(&root_ns->chain_lock);
2741 	kfree(node);
2742 }
2743 
2744 static struct mlx5_flow_root_namespace
create_root_ns(struct mlx5_flow_steering * steering,enum fs_flow_table_type table_type)2745 *create_root_ns(struct mlx5_flow_steering *steering,
2746 		enum fs_flow_table_type table_type)
2747 {
2748 	const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2749 	struct mlx5_flow_root_namespace *root_ns;
2750 	struct mlx5_flow_namespace *ns;
2751 
2752 	/* Create the root namespace */
2753 	root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2754 	if (!root_ns)
2755 		return NULL;
2756 
2757 	root_ns->dev = steering->dev;
2758 	root_ns->table_type = table_type;
2759 	root_ns->cmds = cmds;
2760 
2761 	INIT_LIST_HEAD(&root_ns->underlay_qpns);
2762 
2763 	ns = &root_ns->ns;
2764 	fs_init_namespace(ns);
2765 	mutex_init(&root_ns->chain_lock);
2766 	tree_init_node(&ns->node, NULL, del_sw_root_ns);
2767 	tree_add_node(&ns->node, NULL);
2768 
2769 	return root_ns;
2770 }
2771 
2772 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2773 
set_prio_attrs_in_ns(struct mlx5_flow_namespace * ns,int acc_level)2774 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2775 {
2776 	struct fs_prio *prio;
2777 
2778 	fs_for_each_prio(prio, ns) {
2779 		 /* This updates prio start_level and num_levels */
2780 		set_prio_attrs_in_prio(prio, acc_level);
2781 		acc_level += prio->num_levels;
2782 	}
2783 	return acc_level;
2784 }
2785 
set_prio_attrs_in_prio(struct fs_prio * prio,int acc_level)2786 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2787 {
2788 	struct mlx5_flow_namespace *ns;
2789 	int acc_level_ns = acc_level;
2790 
2791 	prio->start_level = acc_level;
2792 	fs_for_each_ns(ns, prio) {
2793 		/* This updates start_level and num_levels of ns's priority descendants */
2794 		acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2795 
2796 		/* If this a prio with chains, and we can jump from one chain
2797 		 * (namespace) to another, so we accumulate the levels
2798 		 */
2799 		if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2800 			acc_level = acc_level_ns;
2801 	}
2802 
2803 	if (!prio->num_levels)
2804 		prio->num_levels = acc_level_ns - prio->start_level;
2805 	WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2806 }
2807 
set_prio_attrs(struct mlx5_flow_root_namespace * root_ns)2808 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2809 {
2810 	struct mlx5_flow_namespace *ns = &root_ns->ns;
2811 	struct fs_prio *prio;
2812 	int start_level = 0;
2813 
2814 	fs_for_each_prio(prio, ns) {
2815 		set_prio_attrs_in_prio(prio, start_level);
2816 		start_level += prio->num_levels;
2817 	}
2818 }
2819 
2820 #define ANCHOR_PRIO 0
2821 #define ANCHOR_SIZE 1
2822 #define ANCHOR_LEVEL 0
create_anchor_flow_table(struct mlx5_flow_steering * steering)2823 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2824 {
2825 	struct mlx5_flow_namespace *ns = NULL;
2826 	struct mlx5_flow_table_attr ft_attr = {};
2827 	struct mlx5_flow_table *ft;
2828 
2829 	ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2830 	if (WARN_ON(!ns))
2831 		return -EINVAL;
2832 
2833 	ft_attr.max_fte = ANCHOR_SIZE;
2834 	ft_attr.level   = ANCHOR_LEVEL;
2835 	ft_attr.prio    = ANCHOR_PRIO;
2836 
2837 	ft = mlx5_create_flow_table(ns, &ft_attr);
2838 	if (IS_ERR(ft)) {
2839 		mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2840 		return PTR_ERR(ft);
2841 	}
2842 	return 0;
2843 }
2844 
init_root_ns(struct mlx5_flow_steering * steering)2845 static int init_root_ns(struct mlx5_flow_steering *steering)
2846 {
2847 	int err;
2848 
2849 	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2850 	if (!steering->root_ns)
2851 		return -ENOMEM;
2852 
2853 	err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2854 	if (err)
2855 		goto out_err;
2856 
2857 	set_prio_attrs(steering->root_ns);
2858 	err = create_anchor_flow_table(steering);
2859 	if (err)
2860 		goto out_err;
2861 
2862 	return 0;
2863 
2864 out_err:
2865 	cleanup_root_ns(steering->root_ns);
2866 	steering->root_ns = NULL;
2867 	return err;
2868 }
2869 
clean_tree(struct fs_node * node)2870 static void clean_tree(struct fs_node *node)
2871 {
2872 	if (node) {
2873 		struct fs_node *iter;
2874 		struct fs_node *temp;
2875 
2876 		tree_get_node(node);
2877 		list_for_each_entry_safe(iter, temp, &node->children, list)
2878 			clean_tree(iter);
2879 		tree_put_node(node, false);
2880 		tree_remove_node(node, false);
2881 	}
2882 }
2883 
cleanup_root_ns(struct mlx5_flow_root_namespace * root_ns)2884 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2885 {
2886 	if (!root_ns)
2887 		return;
2888 
2889 	clean_tree(&root_ns->ns.node);
2890 }
2891 
init_sniffer_tx_root_ns(struct mlx5_flow_steering * steering)2892 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2893 {
2894 	struct fs_prio *prio;
2895 
2896 	steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2897 	if (!steering->sniffer_tx_root_ns)
2898 		return -ENOMEM;
2899 
2900 	/* Create single prio */
2901 	prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2902 	return PTR_ERR_OR_ZERO(prio);
2903 }
2904 
init_sniffer_rx_root_ns(struct mlx5_flow_steering * steering)2905 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2906 {
2907 	struct fs_prio *prio;
2908 
2909 	steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2910 	if (!steering->sniffer_rx_root_ns)
2911 		return -ENOMEM;
2912 
2913 	/* Create single prio */
2914 	prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2915 	return PTR_ERR_OR_ZERO(prio);
2916 }
2917 
2918 #define PORT_SEL_NUM_LEVELS 3
init_port_sel_root_ns(struct mlx5_flow_steering * steering)2919 static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
2920 {
2921 	struct fs_prio *prio;
2922 
2923 	steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
2924 	if (!steering->port_sel_root_ns)
2925 		return -ENOMEM;
2926 
2927 	/* Create single prio */
2928 	prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
2929 			      PORT_SEL_NUM_LEVELS);
2930 	return PTR_ERR_OR_ZERO(prio);
2931 }
2932 
init_rdma_rx_root_ns(struct mlx5_flow_steering * steering)2933 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2934 {
2935 	int err;
2936 
2937 	steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2938 	if (!steering->rdma_rx_root_ns)
2939 		return -ENOMEM;
2940 
2941 	err = init_root_tree(steering, &rdma_rx_root_fs,
2942 			     &steering->rdma_rx_root_ns->ns.node);
2943 	if (err)
2944 		goto out_err;
2945 
2946 	set_prio_attrs(steering->rdma_rx_root_ns);
2947 
2948 	return 0;
2949 
2950 out_err:
2951 	cleanup_root_ns(steering->rdma_rx_root_ns);
2952 	steering->rdma_rx_root_ns = NULL;
2953 	return err;
2954 }
2955 
init_rdma_tx_root_ns(struct mlx5_flow_steering * steering)2956 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2957 {
2958 	int err;
2959 
2960 	steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2961 	if (!steering->rdma_tx_root_ns)
2962 		return -ENOMEM;
2963 
2964 	err = init_root_tree(steering, &rdma_tx_root_fs,
2965 			     &steering->rdma_tx_root_ns->ns.node);
2966 	if (err)
2967 		goto out_err;
2968 
2969 	set_prio_attrs(steering->rdma_tx_root_ns);
2970 
2971 	return 0;
2972 
2973 out_err:
2974 	cleanup_root_ns(steering->rdma_tx_root_ns);
2975 	steering->rdma_tx_root_ns = NULL;
2976 	return err;
2977 }
2978 
2979 /* FT and tc chains are stored in the same array so we can re-use the
2980  * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2981  * When creating a new ns for each chain store it in the first available slot.
2982  * Assume tc chains are created and stored first and only then the FT chain.
2983  */
store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct mlx5_flow_namespace * ns)2984 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2985 					struct mlx5_flow_namespace *ns)
2986 {
2987 	int chain = 0;
2988 
2989 	while (steering->fdb_sub_ns[chain])
2990 		++chain;
2991 
2992 	steering->fdb_sub_ns[chain] = ns;
2993 }
2994 
create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct fs_prio * maj_prio)2995 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2996 					struct fs_prio *maj_prio)
2997 {
2998 	struct mlx5_flow_namespace *ns;
2999 	struct fs_prio *min_prio;
3000 	int prio;
3001 
3002 	ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
3003 	if (IS_ERR(ns))
3004 		return PTR_ERR(ns);
3005 
3006 	for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
3007 		min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
3008 		if (IS_ERR(min_prio))
3009 			return PTR_ERR(min_prio);
3010 	}
3011 
3012 	store_fdb_sub_ns_prio_chain(steering, ns);
3013 
3014 	return 0;
3015 }
3016 
create_fdb_chains(struct mlx5_flow_steering * steering,int fs_prio,int chains)3017 static int create_fdb_chains(struct mlx5_flow_steering *steering,
3018 			     int fs_prio,
3019 			     int chains)
3020 {
3021 	struct fs_prio *maj_prio;
3022 	int levels;
3023 	int chain;
3024 	int err;
3025 
3026 	levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
3027 	maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
3028 					  fs_prio,
3029 					  levels);
3030 	if (IS_ERR(maj_prio))
3031 		return PTR_ERR(maj_prio);
3032 
3033 	for (chain = 0; chain < chains; chain++) {
3034 		err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
3035 		if (err)
3036 			return err;
3037 	}
3038 
3039 	return 0;
3040 }
3041 
create_fdb_fast_path(struct mlx5_flow_steering * steering)3042 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
3043 {
3044 	int err;
3045 
3046 	steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
3047 				       sizeof(*steering->fdb_sub_ns),
3048 				       GFP_KERNEL);
3049 	if (!steering->fdb_sub_ns)
3050 		return -ENOMEM;
3051 
3052 	err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
3053 	if (err)
3054 		return err;
3055 
3056 	err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
3057 	if (err)
3058 		return err;
3059 
3060 	return 0;
3061 }
3062 
create_fdb_bypass(struct mlx5_flow_steering * steering)3063 static int create_fdb_bypass(struct mlx5_flow_steering *steering)
3064 {
3065 	struct mlx5_flow_namespace *ns;
3066 	struct fs_prio *prio;
3067 	int i;
3068 
3069 	prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
3070 	if (IS_ERR(prio))
3071 		return PTR_ERR(prio);
3072 
3073 	ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
3074 	if (IS_ERR(ns))
3075 		return PTR_ERR(ns);
3076 
3077 	for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
3078 		prio = fs_create_prio(ns, i, 1);
3079 		if (IS_ERR(prio))
3080 			return PTR_ERR(prio);
3081 	}
3082 	return 0;
3083 }
3084 
cleanup_fdb_root_ns(struct mlx5_flow_steering * steering)3085 static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering)
3086 {
3087 	cleanup_root_ns(steering->fdb_root_ns);
3088 	steering->fdb_root_ns = NULL;
3089 	kfree(steering->fdb_sub_ns);
3090 	steering->fdb_sub_ns = NULL;
3091 }
3092 
init_fdb_root_ns(struct mlx5_flow_steering * steering)3093 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
3094 {
3095 	struct fs_prio *maj_prio;
3096 	int err;
3097 
3098 	steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
3099 	if (!steering->fdb_root_ns)
3100 		return -ENOMEM;
3101 
3102 	err = create_fdb_bypass(steering);
3103 	if (err)
3104 		goto out_err;
3105 
3106 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_CRYPTO_INGRESS, 3);
3107 	if (IS_ERR(maj_prio)) {
3108 		err = PTR_ERR(maj_prio);
3109 		goto out_err;
3110 	}
3111 
3112 	err = create_fdb_fast_path(steering);
3113 	if (err)
3114 		goto out_err;
3115 
3116 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
3117 	if (IS_ERR(maj_prio)) {
3118 		err = PTR_ERR(maj_prio);
3119 		goto out_err;
3120 	}
3121 
3122 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 4);
3123 	if (IS_ERR(maj_prio)) {
3124 		err = PTR_ERR(maj_prio);
3125 		goto out_err;
3126 	}
3127 
3128 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
3129 	if (IS_ERR(maj_prio)) {
3130 		err = PTR_ERR(maj_prio);
3131 		goto out_err;
3132 	}
3133 
3134 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_CRYPTO_EGRESS, 3);
3135 	if (IS_ERR(maj_prio)) {
3136 		err = PTR_ERR(maj_prio);
3137 		goto out_err;
3138 	}
3139 
3140 	/* We put this priority last, knowing that nothing will get here
3141 	 * unless explicitly forwarded to. This is possible because the
3142 	 * slow path tables have catch all rules and nothing gets passed
3143 	 * those tables.
3144 	 */
3145 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
3146 	if (IS_ERR(maj_prio)) {
3147 		err = PTR_ERR(maj_prio);
3148 		goto out_err;
3149 	}
3150 
3151 	set_prio_attrs(steering->fdb_root_ns);
3152 	return 0;
3153 
3154 out_err:
3155 	cleanup_fdb_root_ns(steering);
3156 	return err;
3157 }
3158 
init_egress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)3159 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
3160 {
3161 	struct fs_prio *prio;
3162 
3163 	steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
3164 	if (!steering->esw_egress_root_ns[vport])
3165 		return -ENOMEM;
3166 
3167 	/* create 1 prio*/
3168 	prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
3169 	return PTR_ERR_OR_ZERO(prio);
3170 }
3171 
init_ingress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)3172 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
3173 {
3174 	struct fs_prio *prio;
3175 
3176 	steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
3177 	if (!steering->esw_ingress_root_ns[vport])
3178 		return -ENOMEM;
3179 
3180 	/* create 1 prio*/
3181 	prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
3182 	return PTR_ERR_OR_ZERO(prio);
3183 }
3184 
mlx5_fs_egress_acls_init(struct mlx5_core_dev * dev,int total_vports)3185 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
3186 {
3187 	struct mlx5_flow_steering *steering = dev->priv.steering;
3188 	int err;
3189 	int i;
3190 
3191 	steering->esw_egress_root_ns =
3192 			kcalloc(total_vports,
3193 				sizeof(*steering->esw_egress_root_ns),
3194 				GFP_KERNEL);
3195 	if (!steering->esw_egress_root_ns)
3196 		return -ENOMEM;
3197 
3198 	for (i = 0; i < total_vports; i++) {
3199 		err = init_egress_acl_root_ns(steering, i);
3200 		if (err)
3201 			goto cleanup_root_ns;
3202 	}
3203 	steering->esw_egress_acl_vports = total_vports;
3204 	return 0;
3205 
3206 cleanup_root_ns:
3207 	for (i--; i >= 0; i--)
3208 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
3209 	kfree(steering->esw_egress_root_ns);
3210 	steering->esw_egress_root_ns = NULL;
3211 	return err;
3212 }
3213 
mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev * dev)3214 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
3215 {
3216 	struct mlx5_flow_steering *steering = dev->priv.steering;
3217 	int i;
3218 
3219 	if (!steering->esw_egress_root_ns)
3220 		return;
3221 
3222 	for (i = 0; i < steering->esw_egress_acl_vports; i++)
3223 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
3224 
3225 	kfree(steering->esw_egress_root_ns);
3226 	steering->esw_egress_root_ns = NULL;
3227 }
3228 
mlx5_fs_ingress_acls_init(struct mlx5_core_dev * dev,int total_vports)3229 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
3230 {
3231 	struct mlx5_flow_steering *steering = dev->priv.steering;
3232 	int err;
3233 	int i;
3234 
3235 	steering->esw_ingress_root_ns =
3236 			kcalloc(total_vports,
3237 				sizeof(*steering->esw_ingress_root_ns),
3238 				GFP_KERNEL);
3239 	if (!steering->esw_ingress_root_ns)
3240 		return -ENOMEM;
3241 
3242 	for (i = 0; i < total_vports; i++) {
3243 		err = init_ingress_acl_root_ns(steering, i);
3244 		if (err)
3245 			goto cleanup_root_ns;
3246 	}
3247 	steering->esw_ingress_acl_vports = total_vports;
3248 	return 0;
3249 
3250 cleanup_root_ns:
3251 	for (i--; i >= 0; i--)
3252 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3253 	kfree(steering->esw_ingress_root_ns);
3254 	steering->esw_ingress_root_ns = NULL;
3255 	return err;
3256 }
3257 
mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev * dev)3258 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
3259 {
3260 	struct mlx5_flow_steering *steering = dev->priv.steering;
3261 	int i;
3262 
3263 	if (!steering->esw_ingress_root_ns)
3264 		return;
3265 
3266 	for (i = 0; i < steering->esw_ingress_acl_vports; i++)
3267 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3268 
3269 	kfree(steering->esw_ingress_root_ns);
3270 	steering->esw_ingress_root_ns = NULL;
3271 }
3272 
mlx5_fs_get_capabilities(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type)3273 u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
3274 {
3275 	struct mlx5_flow_root_namespace *root;
3276 	struct mlx5_flow_namespace *ns;
3277 
3278 	ns = mlx5_get_flow_namespace(dev, type);
3279 	if (!ns)
3280 		return 0;
3281 
3282 	root = find_root(&ns->node);
3283 	if (!root)
3284 		return 0;
3285 
3286 	return root->cmds->get_capabilities(root, root->table_type);
3287 }
3288 
init_egress_root_ns(struct mlx5_flow_steering * steering)3289 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
3290 {
3291 	int err;
3292 
3293 	steering->egress_root_ns = create_root_ns(steering,
3294 						  FS_FT_NIC_TX);
3295 	if (!steering->egress_root_ns)
3296 		return -ENOMEM;
3297 
3298 	err = init_root_tree(steering, &egress_root_fs,
3299 			     &steering->egress_root_ns->ns.node);
3300 	if (err)
3301 		goto cleanup;
3302 	set_prio_attrs(steering->egress_root_ns);
3303 	return 0;
3304 cleanup:
3305 	cleanup_root_ns(steering->egress_root_ns);
3306 	steering->egress_root_ns = NULL;
3307 	return err;
3308 }
3309 
mlx5_fs_mode_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)3310 static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
3311 				 union devlink_param_value val,
3312 				 struct netlink_ext_ack *extack)
3313 {
3314 	struct mlx5_core_dev *dev = devlink_priv(devlink);
3315 	char *value = val.vstr;
3316 	int err = 0;
3317 
3318 	if (!strcmp(value, "dmfs")) {
3319 		return 0;
3320 	} else if (!strcmp(value, "smfs")) {
3321 		u8 eswitch_mode;
3322 		bool smfs_cap;
3323 
3324 		eswitch_mode = mlx5_eswitch_mode(dev);
3325 		smfs_cap = mlx5_fs_dr_is_supported(dev);
3326 
3327 		if (!smfs_cap) {
3328 			err = -EOPNOTSUPP;
3329 			NL_SET_ERR_MSG_MOD(extack,
3330 					   "Software managed steering is not supported by current device");
3331 		}
3332 
3333 		else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
3334 			NL_SET_ERR_MSG_MOD(extack,
3335 					   "Software managed steering is not supported when eswitch offloads enabled.");
3336 			err = -EOPNOTSUPP;
3337 		}
3338 	} else {
3339 		NL_SET_ERR_MSG_MOD(extack,
3340 				   "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
3341 		err = -EINVAL;
3342 	}
3343 
3344 	return err;
3345 }
3346 
mlx5_fs_mode_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)3347 static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
3348 			    struct devlink_param_gset_ctx *ctx)
3349 {
3350 	struct mlx5_core_dev *dev = devlink_priv(devlink);
3351 	enum mlx5_flow_steering_mode mode;
3352 
3353 	if (!strcmp(ctx->val.vstr, "smfs"))
3354 		mode = MLX5_FLOW_STEERING_MODE_SMFS;
3355 	else
3356 		mode = MLX5_FLOW_STEERING_MODE_DMFS;
3357 	dev->priv.steering->mode = mode;
3358 
3359 	return 0;
3360 }
3361 
mlx5_fs_mode_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)3362 static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
3363 			    struct devlink_param_gset_ctx *ctx)
3364 {
3365 	struct mlx5_core_dev *dev = devlink_priv(devlink);
3366 
3367 	if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
3368 		strcpy(ctx->val.vstr, "smfs");
3369 	else
3370 		strcpy(ctx->val.vstr, "dmfs");
3371 	return 0;
3372 }
3373 
3374 static const struct devlink_param mlx5_fs_params[] = {
3375 	DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
3376 			     "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
3377 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3378 			     mlx5_fs_mode_get, mlx5_fs_mode_set,
3379 			     mlx5_fs_mode_validate),
3380 };
3381 
mlx5_fs_core_cleanup(struct mlx5_core_dev * dev)3382 void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
3383 {
3384 	struct mlx5_flow_steering *steering = dev->priv.steering;
3385 
3386 	cleanup_root_ns(steering->root_ns);
3387 	cleanup_fdb_root_ns(steering);
3388 	cleanup_root_ns(steering->port_sel_root_ns);
3389 	cleanup_root_ns(steering->sniffer_rx_root_ns);
3390 	cleanup_root_ns(steering->sniffer_tx_root_ns);
3391 	cleanup_root_ns(steering->rdma_rx_root_ns);
3392 	cleanup_root_ns(steering->rdma_tx_root_ns);
3393 	cleanup_root_ns(steering->egress_root_ns);
3394 
3395 	devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params,
3396 			       ARRAY_SIZE(mlx5_fs_params));
3397 }
3398 
mlx5_fs_core_init(struct mlx5_core_dev * dev)3399 int mlx5_fs_core_init(struct mlx5_core_dev *dev)
3400 {
3401 	struct mlx5_flow_steering *steering = dev->priv.steering;
3402 	int err;
3403 
3404 	err = devl_params_register(priv_to_devlink(dev), mlx5_fs_params,
3405 				   ARRAY_SIZE(mlx5_fs_params));
3406 	if (err)
3407 		return err;
3408 
3409 	if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3410 	      (MLX5_CAP_GEN(dev, nic_flow_table))) ||
3411 	     ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
3412 	      MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
3413 	    MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
3414 		err = init_root_ns(steering);
3415 		if (err)
3416 			goto err;
3417 	}
3418 
3419 	if (MLX5_ESWITCH_MANAGER(dev)) {
3420 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
3421 			err = init_fdb_root_ns(steering);
3422 			if (err)
3423 				goto err;
3424 		}
3425 	}
3426 
3427 	if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3428 		err = init_sniffer_rx_root_ns(steering);
3429 		if (err)
3430 			goto err;
3431 	}
3432 
3433 	if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3434 		err = init_sniffer_tx_root_ns(steering);
3435 		if (err)
3436 			goto err;
3437 	}
3438 
3439 	if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
3440 		err = init_port_sel_root_ns(steering);
3441 		if (err)
3442 			goto err;
3443 	}
3444 
3445 	if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3446 	    MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3447 		err = init_rdma_rx_root_ns(steering);
3448 		if (err)
3449 			goto err;
3450 	}
3451 
3452 	if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3453 		err = init_rdma_tx_root_ns(steering);
3454 		if (err)
3455 			goto err;
3456 	}
3457 
3458 	if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3459 		err = init_egress_root_ns(steering);
3460 		if (err)
3461 			goto err;
3462 	}
3463 
3464 	return 0;
3465 
3466 err:
3467 	mlx5_fs_core_cleanup(dev);
3468 	return err;
3469 }
3470 
mlx5_fs_core_free(struct mlx5_core_dev * dev)3471 void mlx5_fs_core_free(struct mlx5_core_dev *dev)
3472 {
3473 	struct mlx5_flow_steering *steering = dev->priv.steering;
3474 
3475 	kmem_cache_destroy(steering->ftes_cache);
3476 	kmem_cache_destroy(steering->fgs_cache);
3477 	kfree(steering);
3478 	mlx5_ft_pool_destroy(dev);
3479 	mlx5_cleanup_fc_stats(dev);
3480 }
3481 
mlx5_fs_core_alloc(struct mlx5_core_dev * dev)3482 int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
3483 {
3484 	struct mlx5_flow_steering *steering;
3485 	char name[80];
3486 	int err = 0;
3487 
3488 	err = mlx5_init_fc_stats(dev);
3489 	if (err)
3490 		return err;
3491 
3492 	err = mlx5_ft_pool_init(dev);
3493 	if (err)
3494 		goto err;
3495 
3496 	steering = kzalloc(sizeof(*steering), GFP_KERNEL);
3497 	if (!steering) {
3498 		err = -ENOMEM;
3499 		goto err;
3500 	}
3501 
3502 	steering->dev = dev;
3503 	dev->priv.steering = steering;
3504 
3505 	if (mlx5_fs_dr_is_supported(dev))
3506 		steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
3507 	else
3508 		steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
3509 
3510 	snprintf(name, sizeof(name), "%s-mlx5_fs_fgs", dev_name(dev->device));
3511 	steering->fgs_cache = kmem_cache_create(name,
3512 						sizeof(struct mlx5_flow_group), 0,
3513 						0, NULL);
3514 	snprintf(name, sizeof(name), "%s-mlx5_fs_ftes", dev_name(dev->device));
3515 	steering->ftes_cache = kmem_cache_create(name, sizeof(struct fs_fte), 0,
3516 						 0, NULL);
3517 	if (!steering->ftes_cache || !steering->fgs_cache) {
3518 		err = -ENOMEM;
3519 		goto err;
3520 	}
3521 
3522 	return 0;
3523 
3524 err:
3525 	mlx5_fs_core_free(dev);
3526 	return err;
3527 }
3528 
mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3529 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3530 {
3531 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3532 	struct mlx5_ft_underlay_qp *new_uqp;
3533 	int err = 0;
3534 
3535 	new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3536 	if (!new_uqp)
3537 		return -ENOMEM;
3538 
3539 	mutex_lock(&root->chain_lock);
3540 
3541 	if (!root->root_ft) {
3542 		err = -EINVAL;
3543 		goto update_ft_fail;
3544 	}
3545 
3546 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3547 					 false);
3548 	if (err) {
3549 		mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3550 			       underlay_qpn, err);
3551 		goto update_ft_fail;
3552 	}
3553 
3554 	new_uqp->qpn = underlay_qpn;
3555 	list_add_tail(&new_uqp->list, &root->underlay_qpns);
3556 
3557 	mutex_unlock(&root->chain_lock);
3558 
3559 	return 0;
3560 
3561 update_ft_fail:
3562 	mutex_unlock(&root->chain_lock);
3563 	kfree(new_uqp);
3564 	return err;
3565 }
3566 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3567 
mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3568 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3569 {
3570 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3571 	struct mlx5_ft_underlay_qp *uqp;
3572 	bool found = false;
3573 	int err = 0;
3574 
3575 	mutex_lock(&root->chain_lock);
3576 	list_for_each_entry(uqp, &root->underlay_qpns, list) {
3577 		if (uqp->qpn == underlay_qpn) {
3578 			found = true;
3579 			break;
3580 		}
3581 	}
3582 
3583 	if (!found) {
3584 		mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3585 			       underlay_qpn);
3586 		err = -EINVAL;
3587 		goto out;
3588 	}
3589 
3590 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3591 					 true);
3592 	if (err)
3593 		mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3594 			       underlay_qpn, err);
3595 
3596 	list_del(&uqp->list);
3597 	mutex_unlock(&root->chain_lock);
3598 	kfree(uqp);
3599 
3600 	return 0;
3601 
3602 out:
3603 	mutex_unlock(&root->chain_lock);
3604 	return err;
3605 }
3606 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3607 
3608 static struct mlx5_flow_root_namespace
get_root_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type ns_type)3609 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3610 {
3611 	struct mlx5_flow_namespace *ns;
3612 
3613 	if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3614 	    ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3615 		ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3616 	else
3617 		ns = mlx5_get_flow_namespace(dev, ns_type);
3618 	if (!ns)
3619 		return NULL;
3620 
3621 	return find_root(&ns->node);
3622 }
3623 
mlx5_modify_header_alloc(struct mlx5_core_dev * dev,u8 ns_type,u8 num_actions,void * modify_actions)3624 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3625 						 u8 ns_type, u8 num_actions,
3626 						 void *modify_actions)
3627 {
3628 	struct mlx5_flow_root_namespace *root;
3629 	struct mlx5_modify_hdr *modify_hdr;
3630 	int err;
3631 
3632 	root = get_root_namespace(dev, ns_type);
3633 	if (!root)
3634 		return ERR_PTR(-EOPNOTSUPP);
3635 
3636 	modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3637 	if (!modify_hdr)
3638 		return ERR_PTR(-ENOMEM);
3639 
3640 	modify_hdr->ns_type = ns_type;
3641 	err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3642 					      modify_actions, modify_hdr);
3643 	if (err) {
3644 		kfree(modify_hdr);
3645 		return ERR_PTR(err);
3646 	}
3647 
3648 	return modify_hdr;
3649 }
3650 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3651 
mlx5_modify_header_dealloc(struct mlx5_core_dev * dev,struct mlx5_modify_hdr * modify_hdr)3652 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3653 				struct mlx5_modify_hdr *modify_hdr)
3654 {
3655 	struct mlx5_flow_root_namespace *root;
3656 
3657 	root = get_root_namespace(dev, modify_hdr->ns_type);
3658 	if (WARN_ON(!root))
3659 		return;
3660 	root->cmds->modify_header_dealloc(root, modify_hdr);
3661 	kfree(modify_hdr);
3662 }
3663 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3664 
mlx5_packet_reformat_alloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type ns_type)3665 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3666 						     struct mlx5_pkt_reformat_params *params,
3667 						     enum mlx5_flow_namespace_type ns_type)
3668 {
3669 	struct mlx5_pkt_reformat *pkt_reformat;
3670 	struct mlx5_flow_root_namespace *root;
3671 	int err;
3672 
3673 	root = get_root_namespace(dev, ns_type);
3674 	if (!root)
3675 		return ERR_PTR(-EOPNOTSUPP);
3676 
3677 	pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3678 	if (!pkt_reformat)
3679 		return ERR_PTR(-ENOMEM);
3680 
3681 	pkt_reformat->ns_type = ns_type;
3682 	pkt_reformat->reformat_type = params->type;
3683 	err = root->cmds->packet_reformat_alloc(root, params, ns_type,
3684 						pkt_reformat);
3685 	if (err) {
3686 		kfree(pkt_reformat);
3687 		return ERR_PTR(err);
3688 	}
3689 
3690 	return pkt_reformat;
3691 }
3692 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3693 
mlx5_packet_reformat_dealloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat * pkt_reformat)3694 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3695 				  struct mlx5_pkt_reformat *pkt_reformat)
3696 {
3697 	struct mlx5_flow_root_namespace *root;
3698 
3699 	root = get_root_namespace(dev, pkt_reformat->ns_type);
3700 	if (WARN_ON(!root))
3701 		return;
3702 	root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3703 	kfree(pkt_reformat);
3704 }
3705 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3706 
mlx5_get_match_definer_id(struct mlx5_flow_definer * definer)3707 int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
3708 {
3709 	return definer->id;
3710 }
3711 
3712 struct mlx5_flow_definer *
mlx5_create_match_definer(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type ns_type,u16 format_id,u32 * match_mask)3713 mlx5_create_match_definer(struct mlx5_core_dev *dev,
3714 			  enum mlx5_flow_namespace_type ns_type, u16 format_id,
3715 			  u32 *match_mask)
3716 {
3717 	struct mlx5_flow_root_namespace *root;
3718 	struct mlx5_flow_definer *definer;
3719 	int id;
3720 
3721 	root = get_root_namespace(dev, ns_type);
3722 	if (!root)
3723 		return ERR_PTR(-EOPNOTSUPP);
3724 
3725 	definer = kzalloc(sizeof(*definer), GFP_KERNEL);
3726 	if (!definer)
3727 		return ERR_PTR(-ENOMEM);
3728 
3729 	definer->ns_type = ns_type;
3730 	id = root->cmds->create_match_definer(root, format_id, match_mask);
3731 	if (id < 0) {
3732 		mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
3733 		kfree(definer);
3734 		return ERR_PTR(id);
3735 	}
3736 	definer->id = id;
3737 	return definer;
3738 }
3739 
mlx5_destroy_match_definer(struct mlx5_core_dev * dev,struct mlx5_flow_definer * definer)3740 void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
3741 				struct mlx5_flow_definer *definer)
3742 {
3743 	struct mlx5_flow_root_namespace *root;
3744 
3745 	root = get_root_namespace(dev, definer->ns_type);
3746 	if (WARN_ON(!root))
3747 		return;
3748 
3749 	root->cmds->destroy_match_definer(root, definer->id);
3750 	kfree(definer);
3751 }
3752 
mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns,u16 peer_vhca_id)3753 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3754 				 struct mlx5_flow_root_namespace *peer_ns,
3755 				 u16 peer_vhca_id)
3756 {
3757 	if (peer_ns && ns->mode != peer_ns->mode) {
3758 		mlx5_core_err(ns->dev,
3759 			      "Can't peer namespace of different steering mode\n");
3760 		return -EINVAL;
3761 	}
3762 
3763 	return ns->cmds->set_peer(ns, peer_ns, peer_vhca_id);
3764 }
3765 
3766 /* This function should be called only at init stage of the namespace.
3767  * It is not safe to call this function while steering operations
3768  * are executed in the namespace.
3769  */
mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace * ns,enum mlx5_flow_steering_mode mode)3770 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3771 				 enum mlx5_flow_steering_mode mode)
3772 {
3773 	struct mlx5_flow_root_namespace *root;
3774 	const struct mlx5_flow_cmds *cmds;
3775 	int err;
3776 
3777 	root = find_root(&ns->node);
3778 	if (&root->ns != ns)
3779 	/* Can't set cmds to non root namespace */
3780 		return -EINVAL;
3781 
3782 	if (root->table_type != FS_FT_FDB)
3783 		return -EOPNOTSUPP;
3784 
3785 	if (root->mode == mode)
3786 		return 0;
3787 
3788 	if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3789 		cmds = mlx5_fs_cmd_get_dr_cmds();
3790 	else
3791 		cmds = mlx5_fs_cmd_get_fw_cmds();
3792 	if (!cmds)
3793 		return -EOPNOTSUPP;
3794 
3795 	err = cmds->create_ns(root);
3796 	if (err) {
3797 		mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3798 			      err);
3799 		return err;
3800 	}
3801 
3802 	root->cmds->destroy_ns(root);
3803 	root->cmds = cmds;
3804 	root->mode = mode;
3805 
3806 	return 0;
3807 }
3808