xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1  /*
2   * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3   *
4   * This software is available to you under a choice of one of two
5   * licenses.  You may choose to be licensed under the terms of the GNU
6   * General Public License (GPL) Version 2, available from the file
7   * COPYING in the main directory of this source tree, or the
8   * OpenIB.org BSD license below:
9   *
10   *     Redistribution and use in source and binary forms, with or
11   *     without modification, are permitted provided that the following
12   *     conditions are met:
13   *
14   *      - Redistributions of source code must retain the above
15   *        copyright notice, this list of conditions and the following
16   *        disclaimer.
17   *
18   *      - Redistributions in binary form must reproduce the above
19   *        copyright notice, this list of conditions and the following
20   *        disclaimer in the documentation and/or other materials
21   *        provided with the distribution.
22   *
23   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24   * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26   * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27   * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28   * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30   * SOFTWARE.
31   */
32  
33  #include <linux/debugfs.h>
34  #include <linux/list.h>
35  #include <linux/ip.h>
36  #include <linux/ipv6.h>
37  #include <linux/tcp.h>
38  #include <linux/mlx5/fs.h>
39  #include <linux/mlx5/mpfs.h>
40  #include "en_tc.h"
41  #include "lib/mpfs.h"
42  #include "en/ptp.h"
43  #include "en/fs_ethtool.h"
44  
45  struct mlx5e_flow_steering {
46  	struct work_struct		set_rx_mode_work;
47  	bool				state_destroy;
48  	bool				vlan_strip_disable;
49  	struct mlx5_core_dev		*mdev;
50  	struct net_device		*netdev;
51  	struct mlx5_flow_namespace      *ns;
52  	struct mlx5_flow_namespace      *egress_ns;
53  #ifdef CONFIG_MLX5_EN_RXNFC
54  	struct mlx5e_ethtool_steering   *ethtool;
55  #endif
56  	struct mlx5e_tc_table           *tc;
57  	struct mlx5e_promisc_table      promisc;
58  	struct mlx5e_vlan_table         *vlan;
59  	struct mlx5e_l2_table           l2;
60  	struct mlx5_ttc_table           *ttc;
61  	struct mlx5_ttc_table           *inner_ttc;
62  #ifdef CONFIG_MLX5_EN_ARFS
63  	struct mlx5e_arfs_tables       *arfs;
64  #endif
65  #ifdef CONFIG_MLX5_EN_TLS
66  	struct mlx5e_accel_fs_tcp      *accel_tcp;
67  #endif
68  	struct mlx5e_fs_udp            *udp;
69  	struct mlx5e_fs_any            *any;
70  	struct mlx5e_ptp_fs            *ptp_fs;
71  	struct dentry                  *dfs_root;
72  };
73  
74  static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
75  				  struct mlx5e_l2_rule *ai, int type);
76  static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
77  				   struct mlx5e_l2_rule *ai);
78  
79  enum {
80  	MLX5E_FULLMATCH = 0,
81  	MLX5E_ALLMULTI  = 1,
82  };
83  
84  enum {
85  	MLX5E_UC        = 0,
86  	MLX5E_MC_IPV4   = 1,
87  	MLX5E_MC_IPV6   = 2,
88  	MLX5E_MC_OTHER  = 3,
89  };
90  
91  enum {
92  	MLX5E_ACTION_NONE = 0,
93  	MLX5E_ACTION_ADD  = 1,
94  	MLX5E_ACTION_DEL  = 2,
95  };
96  
97  struct mlx5e_l2_hash_node {
98  	struct hlist_node          hlist;
99  	u8                         action;
100  	struct mlx5e_l2_rule ai;
101  	bool   mpfs;
102  };
103  
mlx5e_hash_l2(const u8 * addr)104  static inline int mlx5e_hash_l2(const u8 *addr)
105  {
106  	return addr[5];
107  }
108  
mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering * fs)109  struct dentry *mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering *fs)
110  {
111  	return fs->dfs_root;
112  }
113  
mlx5e_add_l2_to_hash(struct hlist_head * hash,const u8 * addr)114  static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr)
115  {
116  	struct mlx5e_l2_hash_node *hn;
117  	int ix = mlx5e_hash_l2(addr);
118  	int found = 0;
119  
120  	hlist_for_each_entry(hn, &hash[ix], hlist)
121  		if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
122  			found = 1;
123  			break;
124  		}
125  
126  	if (found) {
127  		hn->action = MLX5E_ACTION_NONE;
128  		return;
129  	}
130  
131  	hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
132  	if (!hn)
133  		return;
134  
135  	ether_addr_copy(hn->ai.addr, addr);
136  	hn->action = MLX5E_ACTION_ADD;
137  
138  	hlist_add_head(&hn->hlist, &hash[ix]);
139  }
140  
mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node * hn)141  static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
142  {
143  	hlist_del(&hn->hlist);
144  	kfree(hn);
145  }
146  
147  struct mlx5e_vlan_table {
148  	struct mlx5e_flow_table		ft;
149  	DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
150  	DECLARE_BITMAP(active_svlans, VLAN_N_VID);
151  	struct mlx5_flow_handle	*active_cvlans_rule[VLAN_N_VID];
152  	struct mlx5_flow_handle	*active_svlans_rule[VLAN_N_VID];
153  	struct mlx5_flow_handle	*untagged_rule;
154  	struct mlx5_flow_handle	*any_cvlan_rule;
155  	struct mlx5_flow_handle	*any_svlan_rule;
156  	struct mlx5_flow_handle	*trap_rule;
157  	bool			cvlan_filter_disabled;
158  };
159  
mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table * vlan)160  unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan)
161  {
162  	return vlan->active_svlans;
163  }
164  
mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table * vlan)165  struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
166  {
167  	return vlan->ft.t;
168  }
169  
mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering * fs)170  static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
171  {
172  	int max_list_size;
173  	int list_size;
174  	u16 *vlans;
175  	int vlan;
176  	int err;
177  	int i;
178  
179  	list_size = 0;
180  	for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID)
181  		list_size++;
182  
183  	max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list);
184  
185  	if (list_size > max_list_size) {
186  		fs_warn(fs, "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
187  			list_size, max_list_size);
188  		list_size = max_list_size;
189  	}
190  
191  	vlans = kvcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
192  	if (!vlans)
193  		return -ENOMEM;
194  
195  	i = 0;
196  	for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID) {
197  		if (i >= list_size)
198  			break;
199  		vlans[i++] = vlan;
200  	}
201  
202  	err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size);
203  	if (err)
204  		fs_err(fs, "Failed to modify vport vlans list err(%d)\n",
205  		       err);
206  
207  	kvfree(vlans);
208  	return err;
209  }
210  
211  enum mlx5e_vlan_rule_type {
212  	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
213  	MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
214  	MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
215  	MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
216  	MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
217  };
218  
__mlx5e_add_vlan_rule(struct mlx5e_flow_steering * fs,enum mlx5e_vlan_rule_type rule_type,u16 vid,struct mlx5_flow_spec * spec)219  static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
220  				 enum mlx5e_vlan_rule_type rule_type,
221  				 u16 vid, struct mlx5_flow_spec *spec)
222  {
223  	struct mlx5_flow_table *ft = fs->vlan->ft.t;
224  	struct mlx5_flow_destination dest = {};
225  	struct mlx5_flow_handle **rule_p;
226  	MLX5_DECLARE_FLOW_ACT(flow_act);
227  	int err = 0;
228  
229  	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
230  	dest.ft = fs->l2.ft.t;
231  
232  	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
233  
234  	switch (rule_type) {
235  	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
236  		/* cvlan_tag enabled in match criteria and
237  		 * disabled in match value means both S & C tags
238  		 * don't exist (untagged of both)
239  		 */
240  		rule_p = &fs->vlan->untagged_rule;
241  		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
242  				 outer_headers.cvlan_tag);
243  		break;
244  	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
245  		rule_p = &fs->vlan->any_cvlan_rule;
246  		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
247  				 outer_headers.cvlan_tag);
248  		MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
249  		break;
250  	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
251  		rule_p = &fs->vlan->any_svlan_rule;
252  		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
253  				 outer_headers.svlan_tag);
254  		MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
255  		break;
256  	case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
257  		rule_p = &fs->vlan->active_svlans_rule[vid];
258  		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
259  				 outer_headers.svlan_tag);
260  		MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
261  		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
262  				 outer_headers.first_vid);
263  		MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
264  			 vid);
265  		break;
266  	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
267  		rule_p = &fs->vlan->active_cvlans_rule[vid];
268  		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
269  				 outer_headers.cvlan_tag);
270  		MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
271  		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
272  				 outer_headers.first_vid);
273  		MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
274  			 vid);
275  		break;
276  	}
277  
278  	if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type))
279  		return 0;
280  
281  	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
282  
283  	if (IS_ERR(*rule_p)) {
284  		err = PTR_ERR(*rule_p);
285  		*rule_p = NULL;
286  		fs_err(fs, "add rule failed\n");
287  	}
288  
289  	return err;
290  }
291  
mlx5e_add_vlan_rule(struct mlx5e_flow_steering * fs,enum mlx5e_vlan_rule_type rule_type,u16 vid)292  static int mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
293  			       enum mlx5e_vlan_rule_type rule_type, u16 vid)
294  {
295  	struct mlx5_flow_spec *spec;
296  	int err = 0;
297  
298  	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
299  	if (!spec)
300  		return -ENOMEM;
301  
302  	if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
303  		mlx5e_vport_context_update_vlans(fs);
304  
305  	err = __mlx5e_add_vlan_rule(fs, rule_type, vid, spec);
306  
307  	kvfree(spec);
308  
309  	return err;
310  }
311  
mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering * fs,enum mlx5e_vlan_rule_type rule_type,u16 vid)312  static void mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering *fs,
313  				   enum mlx5e_vlan_rule_type rule_type, u16 vid)
314  {
315  	switch (rule_type) {
316  	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
317  		if (fs->vlan->untagged_rule) {
318  			mlx5_del_flow_rules(fs->vlan->untagged_rule);
319  			fs->vlan->untagged_rule = NULL;
320  		}
321  		break;
322  	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
323  		if (fs->vlan->any_cvlan_rule) {
324  			mlx5_del_flow_rules(fs->vlan->any_cvlan_rule);
325  			fs->vlan->any_cvlan_rule = NULL;
326  		}
327  		break;
328  	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
329  		if (fs->vlan->any_svlan_rule) {
330  			mlx5_del_flow_rules(fs->vlan->any_svlan_rule);
331  			fs->vlan->any_svlan_rule = NULL;
332  		}
333  		break;
334  	case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
335  		if (fs->vlan->active_svlans_rule[vid]) {
336  			mlx5_del_flow_rules(fs->vlan->active_svlans_rule[vid]);
337  			fs->vlan->active_svlans_rule[vid] = NULL;
338  		}
339  		break;
340  	case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
341  		if (fs->vlan->active_cvlans_rule[vid]) {
342  			mlx5_del_flow_rules(fs->vlan->active_cvlans_rule[vid]);
343  			fs->vlan->active_cvlans_rule[vid] = NULL;
344  		}
345  		mlx5e_vport_context_update_vlans(fs);
346  		break;
347  	}
348  }
349  
mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering * fs)350  static void mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering *fs)
351  {
352  	mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
353  	mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
354  }
355  
mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering * fs)356  static int mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering *fs)
357  {
358  	int err;
359  
360  	err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
361  	if (err)
362  		return err;
363  
364  	return mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
365  }
366  
367  static struct mlx5_flow_handle *
mlx5e_add_trap_rule(struct mlx5_flow_table * ft,int trap_id,int tir_num)368  mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
369  {
370  	struct mlx5_flow_destination dest = {};
371  	MLX5_DECLARE_FLOW_ACT(flow_act);
372  	struct mlx5_flow_handle *rule;
373  	struct mlx5_flow_spec *spec;
374  
375  	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
376  	if (!spec)
377  		return ERR_PTR(-ENOMEM);
378  	spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
379  	spec->flow_context.flow_tag = trap_id;
380  	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
381  	dest.tir_num = tir_num;
382  
383  	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
384  	kvfree(spec);
385  	return rule;
386  }
387  
mlx5e_add_vlan_trap(struct mlx5e_flow_steering * fs,int trap_id,int tir_num)388  int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
389  {
390  	struct mlx5_flow_table *ft = fs->vlan->ft.t;
391  	struct mlx5_flow_handle *rule;
392  	int err;
393  
394  	rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
395  	if (IS_ERR(rule)) {
396  		err = PTR_ERR(rule);
397  		fs->vlan->trap_rule = NULL;
398  		fs_err(fs, "add VLAN trap rule failed, err %d\n", err);
399  		return err;
400  	}
401  	fs->vlan->trap_rule = rule;
402  	return 0;
403  }
404  
mlx5e_remove_vlan_trap(struct mlx5e_flow_steering * fs)405  void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs)
406  {
407  	if (fs->vlan->trap_rule) {
408  		mlx5_del_flow_rules(fs->vlan->trap_rule);
409  		fs->vlan->trap_rule = NULL;
410  	}
411  }
412  
mlx5e_add_mac_trap(struct mlx5e_flow_steering * fs,int trap_id,int tir_num)413  int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
414  {
415  	struct mlx5_flow_table *ft = fs->l2.ft.t;
416  	struct mlx5_flow_handle *rule;
417  	int err;
418  
419  	rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
420  	if (IS_ERR(rule)) {
421  		err = PTR_ERR(rule);
422  		fs->l2.trap_rule = NULL;
423  		fs_err(fs, "add MAC trap rule failed, err %d\n", err);
424  		return err;
425  	}
426  	fs->l2.trap_rule = rule;
427  	return 0;
428  }
429  
mlx5e_remove_mac_trap(struct mlx5e_flow_steering * fs)430  void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs)
431  {
432  	if (fs->l2.trap_rule) {
433  		mlx5_del_flow_rules(fs->l2.trap_rule);
434  		fs->l2.trap_rule = NULL;
435  	}
436  }
437  
mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering * fs,bool promisc)438  void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
439  {
440  	if (!fs->vlan->cvlan_filter_disabled)
441  		return;
442  
443  	fs->vlan->cvlan_filter_disabled = false;
444  	if (promisc)
445  		return;
446  	mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
447  }
448  
mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering * fs,bool promisc)449  void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
450  {
451  	if (!fs->vlan || fs->vlan->cvlan_filter_disabled)
452  		return;
453  
454  	fs->vlan->cvlan_filter_disabled = true;
455  	if (promisc)
456  		return;
457  	mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
458  }
459  
mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering * fs,u16 vid)460  static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid)
461  {
462  	int err;
463  
464  	set_bit(vid, fs->vlan->active_cvlans);
465  
466  	err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
467  	if (err)
468  		clear_bit(vid, fs->vlan->active_cvlans);
469  
470  	return err;
471  }
472  
mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering * fs,struct net_device * netdev,u16 vid)473  static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
474  				  struct net_device *netdev, u16 vid)
475  {
476  	int err;
477  
478  	set_bit(vid, fs->vlan->active_svlans);
479  
480  	err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
481  	if (err) {
482  		clear_bit(vid, fs->vlan->active_svlans);
483  		return err;
484  	}
485  
486  	/* Need to fix some features.. */
487  	netdev_update_features(netdev);
488  	return err;
489  }
490  
mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering * fs,struct net_device * netdev,__be16 proto,u16 vid)491  int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
492  			     struct net_device *netdev,
493  			     __be16 proto, u16 vid)
494  {
495  
496  	if (!fs->vlan) {
497  		fs_err(fs, "Vlan doesn't exist\n");
498  		return -EINVAL;
499  	}
500  
501  	if (be16_to_cpu(proto) == ETH_P_8021Q)
502  		return mlx5e_vlan_rx_add_cvid(fs, vid);
503  	else if (be16_to_cpu(proto) == ETH_P_8021AD)
504  		return mlx5e_vlan_rx_add_svid(fs, netdev, vid);
505  
506  	return -EOPNOTSUPP;
507  }
508  
mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering * fs,struct net_device * netdev,__be16 proto,u16 vid)509  int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
510  			      struct net_device *netdev,
511  			      __be16 proto, u16 vid)
512  {
513  	if (!fs->vlan) {
514  		fs_err(fs, "Vlan doesn't exist\n");
515  		return -EINVAL;
516  	}
517  
518  	if (be16_to_cpu(proto) == ETH_P_8021Q) {
519  		clear_bit(vid, fs->vlan->active_cvlans);
520  		mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
521  	} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
522  		clear_bit(vid, fs->vlan->active_svlans);
523  		mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
524  		netdev_update_features(netdev);
525  	}
526  
527  	return 0;
528  }
529  
mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering * fs)530  static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs)
531  {
532  	int i;
533  
534  	mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
535  
536  	for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
537  		mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
538  	}
539  
540  	for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
541  		mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
542  
543  	if (fs->vlan->cvlan_filter_disabled)
544  		mlx5e_fs_add_any_vid_rules(fs);
545  }
546  
mlx5e_del_vlan_rules(struct mlx5e_flow_steering * fs)547  static void mlx5e_del_vlan_rules(struct mlx5e_flow_steering *fs)
548  {
549  	int i;
550  
551  	mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
552  
553  	for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
554  		mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
555  	}
556  
557  	for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
558  		mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
559  
560  	WARN_ON_ONCE(fs->state_destroy);
561  
562  	mlx5e_remove_vlan_trap(fs);
563  
564  	/* must be called after DESTROY bit is set and
565  	 * set_rx_mode is called and flushed
566  	 */
567  	if (fs->vlan->cvlan_filter_disabled)
568  		mlx5e_fs_del_any_vid_rules(fs);
569  }
570  
571  #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
572  	for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
573  		hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
574  
mlx5e_execute_l2_action(struct mlx5e_flow_steering * fs,struct mlx5e_l2_hash_node * hn)575  static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
576  				    struct mlx5e_l2_hash_node *hn)
577  {
578  	u8 action = hn->action;
579  	u8 mac_addr[ETH_ALEN];
580  	int l2_err = 0;
581  
582  	ether_addr_copy(mac_addr, hn->ai.addr);
583  
584  	switch (action) {
585  	case MLX5E_ACTION_ADD:
586  		mlx5e_add_l2_flow_rule(fs, &hn->ai, MLX5E_FULLMATCH);
587  		if (!is_multicast_ether_addr(mac_addr)) {
588  			l2_err = mlx5_mpfs_add_mac(fs->mdev, mac_addr);
589  			hn->mpfs = !l2_err;
590  		}
591  		hn->action = MLX5E_ACTION_NONE;
592  		break;
593  
594  	case MLX5E_ACTION_DEL:
595  		if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
596  			l2_err = mlx5_mpfs_del_mac(fs->mdev, mac_addr);
597  		mlx5e_del_l2_flow_rule(fs, &hn->ai);
598  		mlx5e_del_l2_from_hash(hn);
599  		break;
600  	}
601  
602  	if (l2_err)
603  		fs_warn(fs, "MPFS, failed to %s mac %pM, err(%d)\n",
604  			action == MLX5E_ACTION_ADD ? "add" : "del",
605  			mac_addr, l2_err);
606  }
607  
mlx5e_sync_netdev_addr(struct mlx5e_flow_steering * fs,struct net_device * netdev)608  static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
609  				   struct net_device *netdev)
610  {
611  	struct netdev_hw_addr *ha;
612  
613  	netif_addr_lock_bh(netdev);
614  
615  	mlx5e_add_l2_to_hash(fs->l2.netdev_uc, netdev->dev_addr);
616  	netdev_for_each_uc_addr(ha, netdev)
617  		mlx5e_add_l2_to_hash(fs->l2.netdev_uc, ha->addr);
618  
619  	netdev_for_each_mc_addr(ha, netdev)
620  		mlx5e_add_l2_to_hash(fs->l2.netdev_mc, ha->addr);
621  
622  	netif_addr_unlock_bh(netdev);
623  }
624  
mlx5e_fill_addr_array(struct mlx5e_flow_steering * fs,int list_type,struct net_device * ndev,u8 addr_array[][ETH_ALEN],int size)625  static void mlx5e_fill_addr_array(struct mlx5e_flow_steering *fs, int list_type,
626  				  struct net_device *ndev,
627  				  u8 addr_array[][ETH_ALEN], int size)
628  {
629  	bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
630  	struct mlx5e_l2_hash_node *hn;
631  	struct hlist_head *addr_list;
632  	struct hlist_node *tmp;
633  	int i = 0;
634  	int hi;
635  
636  	addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
637  
638  	if (is_uc) /* Make sure our own address is pushed first */
639  		ether_addr_copy(addr_array[i++], ndev->dev_addr);
640  	else if (fs->l2.broadcast_enabled)
641  		ether_addr_copy(addr_array[i++], ndev->broadcast);
642  
643  	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
644  		if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
645  			continue;
646  		if (i >= size)
647  			break;
648  		ether_addr_copy(addr_array[i++], hn->ai.addr);
649  	}
650  }
651  
mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering * fs,struct net_device * netdev,int list_type)652  static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
653  						 struct net_device *netdev,
654  						 int list_type)
655  {
656  	bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
657  	struct mlx5e_l2_hash_node *hn;
658  	u8 (*addr_array)[ETH_ALEN] = NULL;
659  	struct hlist_head *addr_list;
660  	struct hlist_node *tmp;
661  	int max_size;
662  	int size;
663  	int err;
664  	int hi;
665  
666  	size = is_uc ? 0 : (fs->l2.broadcast_enabled ? 1 : 0);
667  	max_size = is_uc ?
668  		1 << MLX5_CAP_GEN(fs->mdev, log_max_current_uc_list) :
669  		1 << MLX5_CAP_GEN(fs->mdev, log_max_current_mc_list);
670  
671  	addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
672  	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
673  		size++;
674  
675  	if (size > max_size) {
676  		fs_warn(fs, "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
677  			is_uc ? "UC" : "MC", size, max_size);
678  		size = max_size;
679  	}
680  
681  	if (size) {
682  		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
683  		if (!addr_array) {
684  			err = -ENOMEM;
685  			goto out;
686  		}
687  		mlx5e_fill_addr_array(fs, list_type, netdev, addr_array, size);
688  	}
689  
690  	err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size);
691  out:
692  	if (err)
693  		fs_err(fs, "Failed to modify vport %s list err(%d)\n",
694  		       is_uc ? "UC" : "MC", err);
695  	kfree(addr_array);
696  }
697  
mlx5e_vport_context_update(struct mlx5e_flow_steering * fs,struct net_device * netdev)698  static void mlx5e_vport_context_update(struct mlx5e_flow_steering *fs,
699  				       struct net_device *netdev)
700  {
701  	struct mlx5e_l2_table *ea = &fs->l2;
702  
703  	mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_UC);
704  	mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_MC);
705  	mlx5_modify_nic_vport_promisc(fs->mdev, 0,
706  				      ea->allmulti_enabled,
707  				      ea->promisc_enabled);
708  }
709  
mlx5e_apply_netdev_addr(struct mlx5e_flow_steering * fs)710  static void mlx5e_apply_netdev_addr(struct mlx5e_flow_steering *fs)
711  {
712  	struct mlx5e_l2_hash_node *hn;
713  	struct hlist_node *tmp;
714  	int i;
715  
716  	mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
717  		mlx5e_execute_l2_action(fs, hn);
718  
719  	mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
720  		mlx5e_execute_l2_action(fs, hn);
721  }
722  
mlx5e_handle_netdev_addr(struct mlx5e_flow_steering * fs,struct net_device * netdev)723  static void mlx5e_handle_netdev_addr(struct mlx5e_flow_steering *fs,
724  				     struct net_device *netdev)
725  {
726  	struct mlx5e_l2_hash_node *hn;
727  	struct hlist_node *tmp;
728  	int i;
729  
730  	mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
731  		hn->action = MLX5E_ACTION_DEL;
732  	mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
733  		hn->action = MLX5E_ACTION_DEL;
734  
735  	if (fs->state_destroy)
736  		mlx5e_sync_netdev_addr(fs, netdev);
737  
738  	mlx5e_apply_netdev_addr(fs);
739  }
740  
741  #define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
742  #define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
743  
mlx5e_add_promisc_rule(struct mlx5e_flow_steering * fs)744  static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
745  {
746  	struct mlx5_flow_table *ft = fs->promisc.ft.t;
747  	struct mlx5_flow_destination dest = {};
748  	struct mlx5_flow_handle **rule_p;
749  	MLX5_DECLARE_FLOW_ACT(flow_act);
750  	struct mlx5_flow_spec *spec;
751  	int err = 0;
752  
753  	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
754  	if (!spec)
755  		return -ENOMEM;
756  	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
757  	dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
758  
759  	rule_p = &fs->promisc.rule;
760  	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
761  	if (IS_ERR(*rule_p)) {
762  		err = PTR_ERR(*rule_p);
763  		*rule_p = NULL;
764  		fs_err(fs, "add promiscuous rule failed\n");
765  	}
766  	kvfree(spec);
767  	return err;
768  }
769  
mlx5e_create_promisc_table(struct mlx5e_flow_steering * fs)770  static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
771  {
772  	struct mlx5e_flow_table *ft = &fs->promisc.ft;
773  	struct mlx5_flow_table_attr ft_attr = {};
774  	int err;
775  
776  	ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
777  	ft_attr.autogroup.max_num_groups = 1;
778  	ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
779  	ft_attr.prio = MLX5E_NIC_PRIO;
780  
781  	ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
782  	if (IS_ERR(ft->t)) {
783  		err = PTR_ERR(ft->t);
784  		ft->t = NULL;
785  		fs_err(fs, "fail to create promisc table err=%d\n", err);
786  		return err;
787  	}
788  
789  	err = mlx5e_add_promisc_rule(fs);
790  	if (err)
791  		goto err_destroy_promisc_table;
792  
793  	return 0;
794  
795  err_destroy_promisc_table:
796  	mlx5_destroy_flow_table(ft->t);
797  	ft->t = NULL;
798  
799  	return err;
800  }
801  
mlx5e_del_promisc_rule(struct mlx5e_flow_steering * fs)802  static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)
803  {
804  	if (WARN(!fs->promisc.rule, "Trying to remove non-existing promiscuous rule"))
805  		return;
806  	mlx5_del_flow_rules(fs->promisc.rule);
807  	fs->promisc.rule = NULL;
808  }
809  
mlx5e_destroy_promisc_table(struct mlx5e_flow_steering * fs)810  static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
811  {
812  	if (!fs->promisc.ft.t)
813  		return;
814  	mlx5e_del_promisc_rule(fs);
815  	mlx5_destroy_flow_table(fs->promisc.ft.t);
816  	fs->promisc.ft.t = NULL;
817  }
818  
mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering * fs,struct net_device * netdev)819  void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
820  			       struct net_device *netdev)
821  {
822  	struct mlx5e_l2_table *ea = &fs->l2;
823  
824  	bool rx_mode_enable  = fs->state_destroy;
825  	bool promisc_enabled   = rx_mode_enable && (netdev->flags & IFF_PROMISC);
826  	bool allmulti_enabled  = rx_mode_enable && (netdev->flags & IFF_ALLMULTI);
827  	bool broadcast_enabled = rx_mode_enable;
828  
829  	bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
830  	bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
831  	bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
832  	bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
833  	bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
834  	bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
835  	int err;
836  
837  	if (enable_promisc) {
838  		err = mlx5e_create_promisc_table(fs);
839  		if (err)
840  			enable_promisc = false;
841  		if (!fs->vlan_strip_disable && !err)
842  			fs_warn_once(fs,
843  				     "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
844  	}
845  	if (enable_allmulti)
846  		mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI);
847  	if (enable_broadcast)
848  		mlx5e_add_l2_flow_rule(fs, &ea->broadcast, MLX5E_FULLMATCH);
849  
850  	mlx5e_handle_netdev_addr(fs, netdev);
851  
852  	if (disable_broadcast)
853  		mlx5e_del_l2_flow_rule(fs, &ea->broadcast);
854  	if (disable_allmulti)
855  		mlx5e_del_l2_flow_rule(fs, &ea->allmulti);
856  	if (disable_promisc)
857  		mlx5e_destroy_promisc_table(fs);
858  
859  	ea->promisc_enabled   = promisc_enabled;
860  	ea->allmulti_enabled  = allmulti_enabled;
861  	ea->broadcast_enabled = broadcast_enabled;
862  
863  	mlx5e_vport_context_update(fs, netdev);
864  }
865  
mlx5e_destroy_groups(struct mlx5e_flow_table * ft)866  static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
867  {
868  	int i;
869  
870  	for (i = ft->num_groups - 1; i >= 0; i--) {
871  		if (!IS_ERR_OR_NULL(ft->g[i]))
872  			mlx5_destroy_flow_group(ft->g[i]);
873  		ft->g[i] = NULL;
874  	}
875  	ft->num_groups = 0;
876  }
877  
mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering * fs,struct net_device * netdev)878  void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev)
879  {
880  	ether_addr_copy(fs->l2.broadcast.addr, netdev->broadcast);
881  }
882  
mlx5e_destroy_flow_table(struct mlx5e_flow_table * ft)883  void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
884  {
885  	mlx5e_destroy_groups(ft);
886  	kfree(ft->g);
887  	mlx5_destroy_flow_table(ft->t);
888  	ft->t = NULL;
889  }
890  
mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,struct ttc_params * ttc_params)891  static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
892  				       struct mlx5e_rx_res *rx_res,
893  				       struct ttc_params *ttc_params)
894  {
895  	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
896  	int tt;
897  
898  	memset(ttc_params, 0, sizeof(*ttc_params));
899  	ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
900  						 MLX5_FLOW_NAMESPACE_KERNEL);
901  	ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
902  	ft_attr->prio = MLX5E_NIC_PRIO;
903  
904  	for (tt = 0; tt < MLX5_NUM_TT; tt++) {
905  		ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
906  		ttc_params->dests[tt].tir_num =
907  			tt == MLX5_TT_ANY ?
908  				mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
909  				mlx5e_rx_res_get_tirn_rss_inner(rx_res,
910  								tt);
911  	}
912  }
913  
mlx5e_set_ttc_params(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,struct ttc_params * ttc_params,bool tunnel)914  void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
915  			  struct mlx5e_rx_res *rx_res,
916  			  struct ttc_params *ttc_params, bool tunnel)
917  
918  {
919  	struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
920  	int tt;
921  
922  	memset(ttc_params, 0, sizeof(*ttc_params));
923  	ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
924  						 MLX5_FLOW_NAMESPACE_KERNEL);
925  	ft_attr->level = MLX5E_TTC_FT_LEVEL;
926  	ft_attr->prio = MLX5E_NIC_PRIO;
927  
928  	for (tt = 0; tt < MLX5_NUM_TT; tt++) {
929  		ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
930  		ttc_params->dests[tt].tir_num =
931  			tt == MLX5_TT_ANY ?
932  				mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
933  				mlx5e_rx_res_get_tirn_rss(rx_res, tt);
934  	}
935  
936  	ttc_params->inner_ttc = tunnel;
937  	if (!tunnel || !mlx5_tunnel_inner_ft_supported(fs->mdev))
938  		return;
939  
940  	for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
941  		ttc_params->tunnel_dests[tt].type =
942  			MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
943  		ttc_params->tunnel_dests[tt].ft =
944  			mlx5_get_ttc_flow_table(fs->inner_ttc);
945  	}
946  }
947  
mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering * fs,struct mlx5e_l2_rule * ai)948  static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
949  				   struct mlx5e_l2_rule *ai)
950  {
951  	if (!IS_ERR_OR_NULL(ai->rule)) {
952  		mlx5_del_flow_rules(ai->rule);
953  		ai->rule = NULL;
954  	}
955  }
956  
mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering * fs,struct mlx5e_l2_rule * ai,int type)957  static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
958  				  struct mlx5e_l2_rule *ai, int type)
959  {
960  	struct mlx5_flow_table *ft = fs->l2.ft.t;
961  	struct mlx5_flow_destination dest = {};
962  	MLX5_DECLARE_FLOW_ACT(flow_act);
963  	struct mlx5_flow_spec *spec;
964  	int err = 0;
965  	u8 *mc_dmac;
966  	u8 *mv_dmac;
967  
968  	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
969  	if (!spec)
970  		return -ENOMEM;
971  
972  	mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
973  			       outer_headers.dmac_47_16);
974  	mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
975  			       outer_headers.dmac_47_16);
976  
977  	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
978  	dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
979  
980  	switch (type) {
981  	case MLX5E_FULLMATCH:
982  		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
983  		eth_broadcast_addr(mc_dmac);
984  		ether_addr_copy(mv_dmac, ai->addr);
985  		break;
986  
987  	case MLX5E_ALLMULTI:
988  		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
989  		mc_dmac[0] = 0x01;
990  		mv_dmac[0] = 0x01;
991  		break;
992  	}
993  
994  	ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
995  	if (IS_ERR(ai->rule)) {
996  		fs_err(fs, "add l2 rule(mac:%pM) failed\n", mv_dmac);
997  		err = PTR_ERR(ai->rule);
998  		ai->rule = NULL;
999  	}
1000  
1001  	kvfree(spec);
1002  
1003  	return err;
1004  }
1005  
1006  #define MLX5E_NUM_L2_GROUPS	   3
1007  #define MLX5E_L2_GROUP1_SIZE	   BIT(15)
1008  #define MLX5E_L2_GROUP2_SIZE	   BIT(0)
1009  #define MLX5E_L2_GROUP_TRAP_SIZE   BIT(0) /* must be last */
1010  #define MLX5E_L2_TABLE_SIZE	   (MLX5E_L2_GROUP1_SIZE +\
1011  				    MLX5E_L2_GROUP2_SIZE +\
1012  				    MLX5E_L2_GROUP_TRAP_SIZE)
mlx5e_create_l2_table_groups(struct mlx5e_l2_table * l2_table)1013  static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1014  {
1015  	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1016  	struct mlx5e_flow_table *ft = &l2_table->ft;
1017  	int ix = 0;
1018  	u8 *mc_dmac;
1019  	u32 *in;
1020  	int err;
1021  	u8 *mc;
1022  
1023  	ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1024  	if (!ft->g)
1025  		return -ENOMEM;
1026  	in = kvzalloc(inlen, GFP_KERNEL);
1027  	if (!in) {
1028  		kfree(ft->g);
1029  		return -ENOMEM;
1030  	}
1031  
1032  	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1033  	mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1034  			       outer_headers.dmac_47_16);
1035  	/* Flow Group for full match */
1036  	eth_broadcast_addr(mc_dmac);
1037  	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1038  	MLX5_SET_CFG(in, start_flow_index, ix);
1039  	ix += MLX5E_L2_GROUP1_SIZE;
1040  	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1041  	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1042  	if (IS_ERR(ft->g[ft->num_groups]))
1043  		goto err_destroy_groups;
1044  	ft->num_groups++;
1045  
1046  	/* Flow Group for allmulti */
1047  	eth_zero_addr(mc_dmac);
1048  	mc_dmac[0] = 0x01;
1049  	MLX5_SET_CFG(in, start_flow_index, ix);
1050  	ix += MLX5E_L2_GROUP2_SIZE;
1051  	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1052  	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1053  	if (IS_ERR(ft->g[ft->num_groups]))
1054  		goto err_destroy_groups;
1055  	ft->num_groups++;
1056  
1057  	/* Flow Group for l2 traps */
1058  	memset(in, 0, inlen);
1059  	MLX5_SET_CFG(in, start_flow_index, ix);
1060  	ix += MLX5E_L2_GROUP_TRAP_SIZE;
1061  	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1062  	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1063  	if (IS_ERR(ft->g[ft->num_groups]))
1064  		goto err_destroy_groups;
1065  	ft->num_groups++;
1066  
1067  	kvfree(in);
1068  	return 0;
1069  
1070  err_destroy_groups:
1071  	err = PTR_ERR(ft->g[ft->num_groups]);
1072  	ft->g[ft->num_groups] = NULL;
1073  	mlx5e_destroy_groups(ft);
1074  	kvfree(in);
1075  	kfree(ft->g);
1076  
1077  	return err;
1078  }
1079  
mlx5e_destroy_l2_table(struct mlx5e_flow_steering * fs)1080  static void mlx5e_destroy_l2_table(struct mlx5e_flow_steering *fs)
1081  {
1082  	mlx5e_destroy_flow_table(&fs->l2.ft);
1083  }
1084  
mlx5e_create_l2_table(struct mlx5e_flow_steering * fs)1085  static int mlx5e_create_l2_table(struct mlx5e_flow_steering *fs)
1086  {
1087  	struct mlx5e_l2_table *l2_table = &fs->l2;
1088  	struct mlx5e_flow_table *ft = &l2_table->ft;
1089  	struct mlx5_flow_table_attr ft_attr = {};
1090  	int err;
1091  
1092  	ft->num_groups = 0;
1093  
1094  	ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1095  	ft_attr.level = MLX5E_L2_FT_LEVEL;
1096  	ft_attr.prio = MLX5E_NIC_PRIO;
1097  
1098  	ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
1099  	if (IS_ERR(ft->t)) {
1100  		err = PTR_ERR(ft->t);
1101  		ft->t = NULL;
1102  		return err;
1103  	}
1104  
1105  	err = mlx5e_create_l2_table_groups(l2_table);
1106  	if (err)
1107  		goto err_destroy_flow_table;
1108  
1109  	return 0;
1110  
1111  err_destroy_flow_table:
1112  	mlx5_destroy_flow_table(ft->t);
1113  	ft->t = NULL;
1114  
1115  	return err;
1116  }
1117  
1118  #define MLX5E_NUM_VLAN_GROUPS	5
1119  #define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
1120  #define MLX5E_VLAN_GROUP1_SIZE	BIT(12)
1121  #define MLX5E_VLAN_GROUP2_SIZE	BIT(1)
1122  #define MLX5E_VLAN_GROUP3_SIZE	BIT(0)
1123  #define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0) /* must be last */
1124  #define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
1125  				 MLX5E_VLAN_GROUP1_SIZE +\
1126  				 MLX5E_VLAN_GROUP2_SIZE +\
1127  				 MLX5E_VLAN_GROUP3_SIZE +\
1128  				 MLX5E_VLAN_GROUP_TRAP_SIZE)
1129  
__mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft,u32 * in,int inlen)1130  static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1131  					    int inlen)
1132  {
1133  	int err;
1134  	int ix = 0;
1135  	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1136  
1137  	memset(in, 0, inlen);
1138  	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1139  	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1140  	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1141  	MLX5_SET_CFG(in, start_flow_index, ix);
1142  	ix += MLX5E_VLAN_GROUP0_SIZE;
1143  	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1144  	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1145  	if (IS_ERR(ft->g[ft->num_groups]))
1146  		goto err_destroy_groups;
1147  	ft->num_groups++;
1148  
1149  	memset(in, 0, inlen);
1150  	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1151  	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1152  	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1153  	MLX5_SET_CFG(in, start_flow_index, ix);
1154  	ix += MLX5E_VLAN_GROUP1_SIZE;
1155  	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1156  	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1157  	if (IS_ERR(ft->g[ft->num_groups]))
1158  		goto err_destroy_groups;
1159  	ft->num_groups++;
1160  
1161  	memset(in, 0, inlen);
1162  	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1163  	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1164  	MLX5_SET_CFG(in, start_flow_index, ix);
1165  	ix += MLX5E_VLAN_GROUP2_SIZE;
1166  	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1167  	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1168  	if (IS_ERR(ft->g[ft->num_groups]))
1169  		goto err_destroy_groups;
1170  	ft->num_groups++;
1171  
1172  	memset(in, 0, inlen);
1173  	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1174  	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1175  	MLX5_SET_CFG(in, start_flow_index, ix);
1176  	ix += MLX5E_VLAN_GROUP3_SIZE;
1177  	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1178  	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1179  	if (IS_ERR(ft->g[ft->num_groups]))
1180  		goto err_destroy_groups;
1181  	ft->num_groups++;
1182  
1183  	memset(in, 0, inlen);
1184  	MLX5_SET_CFG(in, start_flow_index, ix);
1185  	ix += MLX5E_VLAN_GROUP_TRAP_SIZE;
1186  	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1187  	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1188  	if (IS_ERR(ft->g[ft->num_groups]))
1189  		goto err_destroy_groups;
1190  	ft->num_groups++;
1191  
1192  	return 0;
1193  
1194  err_destroy_groups:
1195  	err = PTR_ERR(ft->g[ft->num_groups]);
1196  	ft->g[ft->num_groups] = NULL;
1197  	mlx5e_destroy_groups(ft);
1198  
1199  	return err;
1200  }
1201  
mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft)1202  static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1203  {
1204  	u32 *in;
1205  	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1206  	int err;
1207  
1208  	in = kvzalloc(inlen, GFP_KERNEL);
1209  	if (!in)
1210  		return -ENOMEM;
1211  
1212  	err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1213  
1214  	kvfree(in);
1215  	return err;
1216  }
1217  
mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering * fs)1218  static int mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering *fs)
1219  {
1220  	struct mlx5_flow_table_attr ft_attr = {};
1221  	struct mlx5e_flow_table *ft;
1222  	int err;
1223  
1224  	ft = &fs->vlan->ft;
1225  	ft->num_groups = 0;
1226  
1227  	ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1228  	ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1229  	ft_attr.prio = MLX5E_NIC_PRIO;
1230  
1231  	ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
1232  	if (IS_ERR(ft->t))
1233  		return PTR_ERR(ft->t);
1234  
1235  	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1236  	if (!ft->g) {
1237  		err = -ENOMEM;
1238  		goto err_destroy_vlan_table;
1239  	}
1240  
1241  	err = mlx5e_create_vlan_table_groups(ft);
1242  	if (err)
1243  		goto err_free_g;
1244  
1245  	mlx5e_fs_add_vlan_rules(fs);
1246  
1247  	return 0;
1248  
1249  err_free_g:
1250  	kfree(ft->g);
1251  err_destroy_vlan_table:
1252  	mlx5_destroy_flow_table(ft->t);
1253  
1254  	return err;
1255  }
1256  
mlx5e_destroy_vlan_table(struct mlx5e_flow_steering * fs)1257  static void mlx5e_destroy_vlan_table(struct mlx5e_flow_steering *fs)
1258  {
1259  	mlx5e_del_vlan_rules(fs);
1260  	mlx5e_destroy_flow_table(&fs->vlan->ft);
1261  }
1262  
mlx5e_destroy_inner_ttc_table(struct mlx5e_flow_steering * fs)1263  static void mlx5e_destroy_inner_ttc_table(struct mlx5e_flow_steering *fs)
1264  {
1265  	if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
1266  		return;
1267  	mlx5_destroy_ttc_table(fs->inner_ttc);
1268  }
1269  
mlx5e_destroy_ttc_table(struct mlx5e_flow_steering * fs)1270  void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs)
1271  {
1272  	mlx5_destroy_ttc_table(fs->ttc);
1273  }
1274  
mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res)1275  static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs,
1276  					struct mlx5e_rx_res *rx_res)
1277  {
1278  	struct ttc_params ttc_params = {};
1279  
1280  	if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
1281  		return 0;
1282  
1283  	mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params);
1284  	fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev,
1285  						    &ttc_params);
1286  	if (IS_ERR(fs->inner_ttc))
1287  		return PTR_ERR(fs->inner_ttc);
1288  	return 0;
1289  }
1290  
mlx5e_create_ttc_table(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res)1291  int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
1292  			   struct mlx5e_rx_res *rx_res)
1293  {
1294  	struct ttc_params ttc_params = {};
1295  
1296  	mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
1297  	fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
1298  	if (IS_ERR(fs->ttc))
1299  		return PTR_ERR(fs->ttc);
1300  	return 0;
1301  }
1302  
mlx5e_create_flow_steering(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,const struct mlx5e_profile * profile,struct net_device * netdev)1303  int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
1304  			       struct mlx5e_rx_res *rx_res,
1305  			       const struct mlx5e_profile *profile,
1306  			       struct net_device *netdev)
1307  {
1308  	struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(fs->mdev,
1309  								 MLX5_FLOW_NAMESPACE_KERNEL);
1310  	int err;
1311  
1312  	if (!ns)
1313  		return -EOPNOTSUPP;
1314  
1315  	mlx5e_fs_set_ns(fs, ns, false);
1316  	err = mlx5e_arfs_create_tables(fs, rx_res,
1317  				       !!(netdev->hw_features & NETIF_F_NTUPLE));
1318  	if (err) {
1319  		fs_err(fs, "Failed to create arfs tables, err=%d\n", err);
1320  		netdev->hw_features &= ~NETIF_F_NTUPLE;
1321  	}
1322  
1323  	err = mlx5e_create_inner_ttc_table(fs, rx_res);
1324  	if (err) {
1325  		fs_err(fs, "Failed to create inner ttc table, err=%d\n", err);
1326  		goto err_destroy_arfs_tables;
1327  	}
1328  
1329  	err = mlx5e_create_ttc_table(fs, rx_res);
1330  	if (err) {
1331  		fs_err(fs, "Failed to create ttc table, err=%d\n", err);
1332  		goto err_destroy_inner_ttc_table;
1333  	}
1334  
1335  	err = mlx5e_create_l2_table(fs);
1336  	if (err) {
1337  		fs_err(fs, "Failed to create l2 table, err=%d\n", err);
1338  		goto err_destroy_ttc_table;
1339  	}
1340  
1341  	err = mlx5e_fs_create_vlan_table(fs);
1342  	if (err) {
1343  		fs_err(fs, "Failed to create vlan table, err=%d\n", err);
1344  		goto err_destroy_l2_table;
1345  	}
1346  
1347  	err = mlx5e_ptp_alloc_rx_fs(fs, profile);
1348  	if (err)
1349  		goto err_destory_vlan_table;
1350  
1351  	mlx5e_ethtool_init_steering(fs);
1352  
1353  	return 0;
1354  
1355  err_destory_vlan_table:
1356  	mlx5e_destroy_vlan_table(fs);
1357  err_destroy_l2_table:
1358  	mlx5e_destroy_l2_table(fs);
1359  err_destroy_ttc_table:
1360  	mlx5e_destroy_ttc_table(fs);
1361  err_destroy_inner_ttc_table:
1362  	mlx5e_destroy_inner_ttc_table(fs);
1363  err_destroy_arfs_tables:
1364  	mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE));
1365  
1366  	return err;
1367  }
1368  
mlx5e_destroy_flow_steering(struct mlx5e_flow_steering * fs,bool ntuple,const struct mlx5e_profile * profile)1369  void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
1370  				 const struct mlx5e_profile *profile)
1371  {
1372  	mlx5e_ptp_free_rx_fs(fs, profile);
1373  	mlx5e_destroy_vlan_table(fs);
1374  	mlx5e_destroy_l2_table(fs);
1375  	mlx5e_destroy_ttc_table(fs);
1376  	mlx5e_destroy_inner_ttc_table(fs);
1377  	mlx5e_arfs_destroy_tables(fs, ntuple);
1378  	mlx5e_ethtool_cleanup_steering(fs);
1379  }
1380  
mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering * fs)1381  static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs)
1382  {
1383  	fs->vlan = kvzalloc(sizeof(*fs->vlan), GFP_KERNEL);
1384  	if (!fs->vlan)
1385  		return -ENOMEM;
1386  	return 0;
1387  }
1388  
mlx5e_fs_vlan_free(struct mlx5e_flow_steering * fs)1389  static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs)
1390  {
1391  	kvfree(fs->vlan);
1392  }
1393  
mlx5e_fs_get_vlan(struct mlx5e_flow_steering * fs)1394  struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs)
1395  {
1396  	return fs->vlan;
1397  }
1398  
mlx5e_fs_tc_alloc(struct mlx5e_flow_steering * fs)1399  static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs)
1400  {
1401  	fs->tc = mlx5e_tc_table_alloc();
1402  	if (IS_ERR(fs->tc))
1403  		return -ENOMEM;
1404  	return 0;
1405  }
1406  
mlx5e_fs_tc_free(struct mlx5e_flow_steering * fs)1407  static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
1408  {
1409  	mlx5e_tc_table_free(fs->tc);
1410  }
1411  
mlx5e_fs_get_tc(struct mlx5e_flow_steering * fs)1412  struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs)
1413  {
1414  	return fs->tc;
1415  }
1416  
1417  #ifdef CONFIG_MLX5_EN_RXNFC
mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering * fs)1418  static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
1419  {
1420  	return mlx5e_ethtool_alloc(&fs->ethtool);
1421  }
1422  
mlx5e_fs_ethtool_free(struct mlx5e_flow_steering * fs)1423  static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs)
1424  {
1425  	mlx5e_ethtool_free(fs->ethtool);
1426  }
1427  
mlx5e_fs_get_ethtool(struct mlx5e_flow_steering * fs)1428  struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs)
1429  {
1430  	return fs->ethtool;
1431  }
1432  #else
mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering * fs)1433  static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
1434  { return 0; }
mlx5e_fs_ethtool_free(struct mlx5e_flow_steering * fs)1435  static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs) { }
1436  #endif
1437  
mlx5e_fs_debugfs_init(struct mlx5e_flow_steering * fs,struct dentry * dfs_root)1438  static void mlx5e_fs_debugfs_init(struct mlx5e_flow_steering *fs,
1439  				  struct dentry *dfs_root)
1440  {
1441  	if (IS_ERR_OR_NULL(dfs_root))
1442  		return;
1443  
1444  	fs->dfs_root = debugfs_create_dir("fs", dfs_root);
1445  }
1446  
mlx5e_fs_init(const struct mlx5e_profile * profile,struct mlx5_core_dev * mdev,bool state_destroy,struct dentry * dfs_root)1447  struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
1448  					  struct mlx5_core_dev *mdev,
1449  					  bool state_destroy,
1450  					  struct dentry *dfs_root)
1451  {
1452  	struct mlx5e_flow_steering *fs;
1453  	int err;
1454  
1455  	fs = kvzalloc(sizeof(*fs), GFP_KERNEL);
1456  	if (!fs)
1457  		goto err;
1458  
1459  	fs->mdev = mdev;
1460  	fs->state_destroy = state_destroy;
1461  	if (mlx5e_profile_feature_cap(profile, FS_VLAN)) {
1462  		err = mlx5e_fs_vlan_alloc(fs);
1463  		if (err)
1464  			goto err_free_fs;
1465  	}
1466  
1467  	if (mlx5e_profile_feature_cap(profile, FS_TC)) {
1468  		err = mlx5e_fs_tc_alloc(fs);
1469  		if (err)
1470  			goto err_free_vlan;
1471  	}
1472  
1473  	err = mlx5e_fs_ethtool_alloc(fs);
1474  	if (err)
1475  		goto err_free_tc;
1476  
1477  	mlx5e_fs_debugfs_init(fs, dfs_root);
1478  
1479  	return fs;
1480  err_free_tc:
1481  	mlx5e_fs_tc_free(fs);
1482  err_free_vlan:
1483  	mlx5e_fs_vlan_free(fs);
1484  err_free_fs:
1485  	kvfree(fs);
1486  err:
1487  	return NULL;
1488  }
1489  
mlx5e_fs_cleanup(struct mlx5e_flow_steering * fs)1490  void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
1491  {
1492  	if (!fs)
1493  		return;
1494  	debugfs_remove_recursive(fs->dfs_root);
1495  	mlx5e_fs_ethtool_free(fs);
1496  	mlx5e_fs_tc_free(fs);
1497  	mlx5e_fs_vlan_free(fs);
1498  	kvfree(fs);
1499  }
1500  
mlx5e_fs_get_l2(struct mlx5e_flow_steering * fs)1501  struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs)
1502  {
1503  	return &fs->l2;
1504  }
1505  
mlx5e_fs_get_ns(struct mlx5e_flow_steering * fs,bool egress)1506  struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress)
1507  {
1508  	return  egress ? fs->egress_ns : fs->ns;
1509  }
1510  
mlx5e_fs_set_ns(struct mlx5e_flow_steering * fs,struct mlx5_flow_namespace * ns,bool egress)1511  void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress)
1512  {
1513  	if (!egress)
1514  		fs->ns = ns;
1515  	else
1516  		fs->egress_ns = ns;
1517  }
1518  
mlx5e_fs_get_ttc(struct mlx5e_flow_steering * fs,bool inner)1519  struct mlx5_ttc_table *mlx5e_fs_get_ttc(struct mlx5e_flow_steering *fs, bool inner)
1520  {
1521  	return inner ? fs->inner_ttc : fs->ttc;
1522  }
1523  
mlx5e_fs_set_ttc(struct mlx5e_flow_steering * fs,struct mlx5_ttc_table * ttc,bool inner)1524  void mlx5e_fs_set_ttc(struct mlx5e_flow_steering *fs, struct mlx5_ttc_table *ttc, bool inner)
1525  {
1526  	if (!inner)
1527  		fs->ttc = ttc;
1528  	else
1529  		fs->inner_ttc = ttc;
1530  }
1531  
1532  #ifdef CONFIG_MLX5_EN_ARFS
mlx5e_fs_get_arfs(struct mlx5e_flow_steering * fs)1533  struct mlx5e_arfs_tables *mlx5e_fs_get_arfs(struct mlx5e_flow_steering *fs)
1534  {
1535  	return fs->arfs;
1536  }
1537  
mlx5e_fs_set_arfs(struct mlx5e_flow_steering * fs,struct mlx5e_arfs_tables * arfs)1538  void mlx5e_fs_set_arfs(struct mlx5e_flow_steering *fs, struct mlx5e_arfs_tables *arfs)
1539  {
1540  	fs->arfs = arfs;
1541  }
1542  #endif
1543  
mlx5e_fs_get_ptp(struct mlx5e_flow_steering * fs)1544  struct mlx5e_ptp_fs *mlx5e_fs_get_ptp(struct mlx5e_flow_steering *fs)
1545  {
1546  	return fs->ptp_fs;
1547  }
1548  
mlx5e_fs_set_ptp(struct mlx5e_flow_steering * fs,struct mlx5e_ptp_fs * ptp_fs)1549  void mlx5e_fs_set_ptp(struct mlx5e_flow_steering *fs, struct mlx5e_ptp_fs *ptp_fs)
1550  {
1551  	fs->ptp_fs = ptp_fs;
1552  }
1553  
mlx5e_fs_get_any(struct mlx5e_flow_steering * fs)1554  struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs)
1555  {
1556  	return fs->any;
1557  }
1558  
mlx5e_fs_set_any(struct mlx5e_flow_steering * fs,struct mlx5e_fs_any * any)1559  void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any)
1560  {
1561  	fs->any = any;
1562  }
1563  
1564  #ifdef CONFIG_MLX5_EN_TLS
mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering * fs)1565  struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs)
1566  {
1567  	return fs->accel_tcp;
1568  }
1569  
mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering * fs,struct mlx5e_accel_fs_tcp * accel_tcp)1570  void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp)
1571  {
1572  	fs->accel_tcp = accel_tcp;
1573  }
1574  #endif
1575  
mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering * fs,bool state_destroy)1576  void mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering *fs, bool state_destroy)
1577  {
1578  	fs->state_destroy = state_destroy;
1579  }
1580  
mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering * fs,bool vlan_strip_disable)1581  void mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering *fs,
1582  				     bool vlan_strip_disable)
1583  {
1584  	fs->vlan_strip_disable = vlan_strip_disable;
1585  }
1586  
mlx5e_fs_get_udp(struct mlx5e_flow_steering * fs)1587  struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs)
1588  {
1589  	return fs->udp;
1590  }
1591  
mlx5e_fs_set_udp(struct mlx5e_flow_steering * fs,struct mlx5e_fs_udp * udp)1592  void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp)
1593  {
1594  	fs->udp = udp;
1595  }
1596  
mlx5e_fs_get_mdev(struct mlx5e_flow_steering * fs)1597  struct mlx5_core_dev *mlx5e_fs_get_mdev(struct mlx5e_flow_steering *fs)
1598  {
1599  	return fs->mdev;
1600  }
1601