169697b6eSOr Gerlitz /*
269697b6eSOr Gerlitz * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
369697b6eSOr Gerlitz *
469697b6eSOr Gerlitz * This software is available to you under a choice of one of two
569697b6eSOr Gerlitz * licenses. You may choose to be licensed under the terms of the GNU
669697b6eSOr Gerlitz * General Public License (GPL) Version 2, available from the file
769697b6eSOr Gerlitz * COPYING in the main directory of this source tree, or the
869697b6eSOr Gerlitz * OpenIB.org BSD license below:
969697b6eSOr Gerlitz *
1069697b6eSOr Gerlitz * Redistribution and use in source and binary forms, with or
1169697b6eSOr Gerlitz * without modification, are permitted provided that the following
1269697b6eSOr Gerlitz * conditions are met:
1369697b6eSOr Gerlitz *
1469697b6eSOr Gerlitz * - Redistributions of source code must retain the above
1569697b6eSOr Gerlitz * copyright notice, this list of conditions and the following
1669697b6eSOr Gerlitz * disclaimer.
1769697b6eSOr Gerlitz *
1869697b6eSOr Gerlitz * - Redistributions in binary form must reproduce the above
1969697b6eSOr Gerlitz * copyright notice, this list of conditions and the following
2069697b6eSOr Gerlitz * disclaimer in the documentation and/or other materials
2169697b6eSOr Gerlitz * provided with the distribution.
2269697b6eSOr Gerlitz *
2369697b6eSOr Gerlitz * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2469697b6eSOr Gerlitz * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2569697b6eSOr Gerlitz * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2669697b6eSOr Gerlitz * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2769697b6eSOr Gerlitz * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2869697b6eSOr Gerlitz * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2969697b6eSOr Gerlitz * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3069697b6eSOr Gerlitz * SOFTWARE.
3169697b6eSOr Gerlitz */
3269697b6eSOr Gerlitz
3369697b6eSOr Gerlitz #include <linux/etherdevice.h>
34133dcfc5SVu Pham #include <linux/idr.h>
3569697b6eSOr Gerlitz #include <linux/mlx5/driver.h>
3669697b6eSOr Gerlitz #include <linux/mlx5/mlx5_ifc.h>
3769697b6eSOr Gerlitz #include <linux/mlx5/vport.h>
3869697b6eSOr Gerlitz #include <linux/mlx5/fs.h>
3969697b6eSOr Gerlitz #include "mlx5_core.h"
4069697b6eSOr Gerlitz #include "eswitch.h"
4134ca6535SVlad Buslov #include "esw/indir_table.h"
42ea651a86SVu Pham #include "esw/acl/ofld.h"
4380f09dfcSMaor Gottlieb #include "rdma.h"
44e52c2802SPaul Blakey #include "en.h"
45e52c2802SPaul Blakey #include "fs_core.h"
46ac004b83SRoi Dayan #include "lib/devcom.h"
47a3888f33SBodong Wang #include "lib/eq.h"
48ae430332SAriel Levkovich #include "lib/fs_chains.h"
49c620b772SAriel Levkovich #include "en_tc.h"
50c9355682SChris Mi #include "en/mapping.h"
51c85a6b8fSAya Levin #include "devlink.h"
5294db3317SEli Cohen #include "lag/lag.h"
536fda078dSOz Shlomo #include "en/tc/post_meter.h"
5469697b6eSOr Gerlitz
5547dd7e60SParav Pandit #define mlx5_esw_for_each_rep(esw, i, rep) \
5647dd7e60SParav Pandit xa_for_each(&((esw)->offloads.vport_reps), i, rep)
5747dd7e60SParav Pandit
58cd7e4186SBodong Wang /* There are two match-all miss flows, one for unicast dst mac and
59cd7e4186SBodong Wang * one for multicast.
60cd7e4186SBodong Wang */
61cd7e4186SBodong Wang #define MLX5_ESW_MISS_FLOWS (2)
62c9b99abcSBodong Wang #define UPLINK_REP_INDEX 0
63c9b99abcSBodong Wang
64c796bb7cSChris Mi #define MLX5_ESW_VPORT_TBL_SIZE 128
65c796bb7cSChris Mi #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
66c796bb7cSChris Mi
678ea7bcf6SJianbo Liu #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
688ea7bcf6SJianbo Liu
69fd745f4cSChris Mi static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
70c796bb7cSChris Mi .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
71c796bb7cSChris Mi .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
72c796bb7cSChris Mi .flags = 0,
73c796bb7cSChris Mi };
74c796bb7cSChris Mi
mlx5_eswitch_get_rep(struct mlx5_eswitch * esw,u16 vport_num)75879c8f84SBodong Wang static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
76879c8f84SBodong Wang u16 vport_num)
77879c8f84SBodong Wang {
7847dd7e60SParav Pandit return xa_load(&esw->offloads.vport_reps, vport_num);
79879c8f84SBodong Wang }
80879c8f84SBodong Wang
816f7bbad1SJianbo Liu static void
mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_esw_flow_attr * attr)826f7bbad1SJianbo Liu mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
836f7bbad1SJianbo Liu struct mlx5_flow_spec *spec,
846f7bbad1SJianbo Liu struct mlx5_esw_flow_attr *attr)
856f7bbad1SJianbo Liu {
86166f431eSAriel Levkovich if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
87166f431eSAriel Levkovich return;
88166f431eSAriel Levkovich
89166f431eSAriel Levkovich if (attr->int_port) {
90166f431eSAriel Levkovich spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
91166f431eSAriel Levkovich
92166f431eSAriel Levkovich return;
93166f431eSAriel Levkovich }
94166f431eSAriel Levkovich
95166f431eSAriel Levkovich spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
96036e19b9SHamdan Igbaria MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
97036e19b9SHamdan Igbaria MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
986f7bbad1SJianbo Liu }
99b7826076SParav Pandit
100f94d6389SChris Mi /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
101f94d6389SChris Mi * are not needed as well in the following process. So clear them all for simplicity.
102f94d6389SChris Mi */
103f94d6389SChris Mi void
mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec)104f94d6389SChris Mi mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
105f94d6389SChris Mi {
106f94d6389SChris Mi if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
107f94d6389SChris Mi void *misc2;
108f94d6389SChris Mi
109f94d6389SChris Mi misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
110f94d6389SChris Mi MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
111f94d6389SChris Mi
112f94d6389SChris Mi misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
113f94d6389SChris Mi MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
114f94d6389SChris Mi
115f94d6389SChris Mi if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
116f94d6389SChris Mi spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
117f94d6389SChris Mi }
118f94d6389SChris Mi }
119f94d6389SChris Mi
120c01cfd0fSJianbo Liu static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr,struct mlx5_eswitch * src_esw,u16 vport)121c01cfd0fSJianbo Liu mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
122c01cfd0fSJianbo Liu struct mlx5_flow_spec *spec,
123a508728aSVlad Buslov struct mlx5_flow_attr *attr,
124b055ecf5SMark Bloch struct mlx5_eswitch *src_esw,
125b055ecf5SMark Bloch u16 vport)
126c01cfd0fSJianbo Liu {
127166f431eSAriel Levkovich struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
128166f431eSAriel Levkovich u32 metadata;
129c01cfd0fSJianbo Liu void *misc2;
130c01cfd0fSJianbo Liu void *misc;
131c01cfd0fSJianbo Liu
132c01cfd0fSJianbo Liu /* Use metadata matching because vport is not represented by single
133c01cfd0fSJianbo Liu * VHCA in dual-port RoCE mode, and matching on source vport may fail.
134c01cfd0fSJianbo Liu */
135c01cfd0fSJianbo Liu if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
136a508728aSVlad Buslov if (mlx5_esw_indir_table_decap_vport(attr))
137a508728aSVlad Buslov vport = mlx5_esw_indir_table_decap_vport(attr);
138166f431eSAriel Levkovich
139e0bf81bfSAriel Levkovich if (!attr->chain && esw_attr && esw_attr->int_port)
140166f431eSAriel Levkovich metadata =
141166f431eSAriel Levkovich mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
142166f431eSAriel Levkovich else
143166f431eSAriel Levkovich metadata =
144166f431eSAriel Levkovich mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
145166f431eSAriel Levkovich
146c01cfd0fSJianbo Liu misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
147166f431eSAriel Levkovich MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
148c01cfd0fSJianbo Liu
149c01cfd0fSJianbo Liu misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1500f0d3827SPaul Blakey MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
1510f0d3827SPaul Blakey mlx5_eswitch_get_vport_metadata_mask());
152c01cfd0fSJianbo Liu
153c01cfd0fSJianbo Liu spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
154c01cfd0fSJianbo Liu } else {
155c01cfd0fSJianbo Liu misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
156b055ecf5SMark Bloch MLX5_SET(fte_match_set_misc, misc, source_port, vport);
157c01cfd0fSJianbo Liu
158c01cfd0fSJianbo Liu if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
159c01cfd0fSJianbo Liu MLX5_SET(fte_match_set_misc, misc,
160c01cfd0fSJianbo Liu source_eswitch_owner_vhca_id,
161b055ecf5SMark Bloch MLX5_CAP_GEN(src_esw->dev, vhca_id));
162c01cfd0fSJianbo Liu
163c01cfd0fSJianbo Liu misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
164c01cfd0fSJianbo Liu MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
165c01cfd0fSJianbo Liu if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
166c01cfd0fSJianbo Liu MLX5_SET_TO_ONES(fte_match_set_misc, misc,
167c01cfd0fSJianbo Liu source_eswitch_owner_vhca_id);
168c01cfd0fSJianbo Liu
169c01cfd0fSJianbo Liu spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
170c01cfd0fSJianbo Liu }
171c01cfd0fSJianbo Liu }
172c01cfd0fSJianbo Liu
173a508728aSVlad Buslov static int
esw_setup_decap_indir(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)174a508728aSVlad Buslov esw_setup_decap_indir(struct mlx5_eswitch *esw,
175521933cdSMaor Dickman struct mlx5_flow_attr *attr)
176a508728aSVlad Buslov {
177a508728aSVlad Buslov struct mlx5_flow_table *ft;
178a508728aSVlad Buslov
179e5d4e1daSRoi Dayan if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
180a508728aSVlad Buslov return -EOPNOTSUPP;
181a508728aSVlad Buslov
182521933cdSMaor Dickman ft = mlx5_esw_indir_table_get(esw, attr,
183a508728aSVlad Buslov mlx5_esw_indir_table_decap_vport(attr), true);
184a508728aSVlad Buslov return PTR_ERR_OR_ZERO(ft);
185a508728aSVlad Buslov }
186a508728aSVlad Buslov
1879e51c0a6SVlad Buslov static void
esw_cleanup_decap_indir(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)188a508728aSVlad Buslov esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
189a508728aSVlad Buslov struct mlx5_flow_attr *attr)
190a508728aSVlad Buslov {
191a508728aSVlad Buslov if (mlx5_esw_indir_table_decap_vport(attr))
192521933cdSMaor Dickman mlx5_esw_indir_table_put(esw,
193a508728aSVlad Buslov mlx5_esw_indir_table_decap_vport(attr),
194a508728aSVlad Buslov true);
195a508728aSVlad Buslov }
196a508728aSVlad Buslov
197a508728aSVlad Buslov static int
esw_setup_mtu_dest(struct mlx5_flow_destination * dest,struct mlx5e_meter_attr * meter,int i)1986fda078dSOz Shlomo esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
1996fda078dSOz Shlomo struct mlx5e_meter_attr *meter,
2006fda078dSOz Shlomo int i)
2016fda078dSOz Shlomo {
2026fda078dSOz Shlomo dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE;
2036fda078dSOz Shlomo dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
2046fda078dSOz Shlomo dest[i].range.min = 0;
2056fda078dSOz Shlomo dest[i].range.max = meter->params.mtu;
2066fda078dSOz Shlomo dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
2076fda078dSOz Shlomo dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
2086fda078dSOz Shlomo
2096fda078dSOz Shlomo return 0;
2106fda078dSOz Shlomo }
2116fda078dSOz Shlomo
2126fda078dSOz Shlomo static int
esw_setup_sampler_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,u32 sampler_id,int i)213f94d6389SChris Mi esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
214f94d6389SChris Mi struct mlx5_flow_act *flow_act,
215eeed226eSRoi Dayan u32 sampler_id,
216f94d6389SChris Mi int i)
217f94d6389SChris Mi {
218f94d6389SChris Mi flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
219f94d6389SChris Mi dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
220eeed226eSRoi Dayan dest[i].sampler_id = sampler_id;
221f94d6389SChris Mi
222f94d6389SChris Mi return 0;
223f94d6389SChris Mi }
224f94d6389SChris Mi
225f94d6389SChris Mi static int
esw_setup_ft_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,int i)2269e51c0a6SVlad Buslov esw_setup_ft_dest(struct mlx5_flow_destination *dest,
2279e51c0a6SVlad Buslov struct mlx5_flow_act *flow_act,
228a508728aSVlad Buslov struct mlx5_eswitch *esw,
2299e51c0a6SVlad Buslov struct mlx5_flow_attr *attr,
2309e51c0a6SVlad Buslov int i)
2319e51c0a6SVlad Buslov {
2329e51c0a6SVlad Buslov flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
2339e51c0a6SVlad Buslov dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2349e51c0a6SVlad Buslov dest[i].ft = attr->dest_ft;
235a508728aSVlad Buslov
236a508728aSVlad Buslov if (mlx5_esw_indir_table_decap_vport(attr))
237521933cdSMaor Dickman return esw_setup_decap_indir(esw, attr);
238a508728aSVlad Buslov return 0;
2399e51c0a6SVlad Buslov }
2409e51c0a6SVlad Buslov
2419e51c0a6SVlad Buslov static void
esw_setup_accept_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_fs_chains * chains,int i)242c0063a43SVlad Buslov esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
243c0063a43SVlad Buslov struct mlx5_fs_chains *chains, int i)
2449e51c0a6SVlad Buslov {
2452a2c84faSRoi Dayan if (mlx5_chains_ignore_flow_level_supported(chains))
2469e51c0a6SVlad Buslov flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
2479e51c0a6SVlad Buslov dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2489e51c0a6SVlad Buslov dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
2499e51c0a6SVlad Buslov }
2509e51c0a6SVlad Buslov
251c0063a43SVlad Buslov static void
esw_setup_slow_path_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,int i)252c0063a43SVlad Buslov esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
253c0063a43SVlad Buslov struct mlx5_eswitch *esw, int i)
254c0063a43SVlad Buslov {
255c0063a43SVlad Buslov if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
256c0063a43SVlad Buslov flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
257c0063a43SVlad Buslov dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
258dcf19b9cSMaor Dickman dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
259c0063a43SVlad Buslov }
260c0063a43SVlad Buslov
2619e51c0a6SVlad Buslov static int
esw_setup_chain_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level,int i)2629e51c0a6SVlad Buslov esw_setup_chain_dest(struct mlx5_flow_destination *dest,
2639e51c0a6SVlad Buslov struct mlx5_flow_act *flow_act,
2649e51c0a6SVlad Buslov struct mlx5_fs_chains *chains,
2659e51c0a6SVlad Buslov u32 chain, u32 prio, u32 level,
2669e51c0a6SVlad Buslov int i)
2679e51c0a6SVlad Buslov {
2689e51c0a6SVlad Buslov struct mlx5_flow_table *ft;
2699e51c0a6SVlad Buslov
2709e51c0a6SVlad Buslov flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
2719e51c0a6SVlad Buslov ft = mlx5_chains_get_table(chains, chain, prio, level);
2729e51c0a6SVlad Buslov if (IS_ERR(ft))
2739e51c0a6SVlad Buslov return PTR_ERR(ft);
2749e51c0a6SVlad Buslov
2759e51c0a6SVlad Buslov dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2769e51c0a6SVlad Buslov dest[i].ft = ft;
2779e51c0a6SVlad Buslov return 0;
2789e51c0a6SVlad Buslov }
2799e51c0a6SVlad Buslov
esw_put_dest_tables_loop(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,int from,int to)28010742efcSVlad Buslov static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
28110742efcSVlad Buslov int from, int to)
28210742efcSVlad Buslov {
28310742efcSVlad Buslov struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
28410742efcSVlad Buslov struct mlx5_fs_chains *chains = esw_chains(esw);
28510742efcSVlad Buslov int i;
28610742efcSVlad Buslov
28710742efcSVlad Buslov for (i = from; i < to; i++)
28810742efcSVlad Buslov if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
28910742efcSVlad Buslov mlx5_chains_put_table(chains, 0, 1, 0);
29096c8c465SVlad Buslov else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
291a508728aSVlad Buslov esw_attr->dests[i].mdev))
29296c8c465SVlad Buslov mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false);
29310742efcSVlad Buslov }
29410742efcSVlad Buslov
29510742efcSVlad Buslov static bool
esw_is_chain_src_port_rewrite(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr)29610742efcSVlad Buslov esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
29710742efcSVlad Buslov {
29810742efcSVlad Buslov int i;
29910742efcSVlad Buslov
30010742efcSVlad Buslov for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
30110742efcSVlad Buslov if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
30210742efcSVlad Buslov return true;
30310742efcSVlad Buslov return false;
30410742efcSVlad Buslov }
30510742efcSVlad Buslov
30610742efcSVlad Buslov static int
esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_fs_chains * chains,struct mlx5_flow_attr * attr,int * i)30710742efcSVlad Buslov esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
30810742efcSVlad Buslov struct mlx5_flow_act *flow_act,
30910742efcSVlad Buslov struct mlx5_eswitch *esw,
31010742efcSVlad Buslov struct mlx5_fs_chains *chains,
31110742efcSVlad Buslov struct mlx5_flow_attr *attr,
31210742efcSVlad Buslov int *i)
31310742efcSVlad Buslov {
31410742efcSVlad Buslov struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
315de31854eSDima Chumak int err;
31610742efcSVlad Buslov
317e5d4e1daSRoi Dayan if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
31810742efcSVlad Buslov return -EOPNOTSUPP;
31910742efcSVlad Buslov
320de31854eSDima Chumak /* flow steering cannot handle more than one dest with the same ft
321de31854eSDima Chumak * in a single flow
322de31854eSDima Chumak */
323de31854eSDima Chumak if (esw_attr->out_count - esw_attr->split_count > 1)
324de31854eSDima Chumak return -EOPNOTSUPP;
325de31854eSDima Chumak
32610742efcSVlad Buslov err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
32710742efcSVlad Buslov if (err)
32810742efcSVlad Buslov return err;
329de31854eSDima Chumak
330de31854eSDima Chumak if (esw_attr->dests[esw_attr->split_count].pkt_reformat) {
331de31854eSDima Chumak flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
332de31854eSDima Chumak flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat;
333de31854eSDima Chumak }
334de31854eSDima Chumak (*i)++;
335de31854eSDima Chumak
336de31854eSDima Chumak return 0;
33710742efcSVlad Buslov }
33810742efcSVlad Buslov
esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)33910742efcSVlad Buslov static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
34010742efcSVlad Buslov struct mlx5_flow_attr *attr)
34110742efcSVlad Buslov {
34210742efcSVlad Buslov struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
34310742efcSVlad Buslov
34410742efcSVlad Buslov esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
34510742efcSVlad Buslov }
34610742efcSVlad Buslov
347a508728aSVlad Buslov static bool
esw_is_indir_table(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)348a508728aSVlad Buslov esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
349a508728aSVlad Buslov {
350a508728aSVlad Buslov struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
351e219440dSMaor Dickman bool result = false;
352a508728aSVlad Buslov int i;
353a508728aSVlad Buslov
354e219440dSMaor Dickman /* Indirect table is supported only for flows with in_port uplink
355e219440dSMaor Dickman * and the destination is vport on the same eswitch as the uplink,
356e219440dSMaor Dickman * return false in case at least one of destinations doesn't meet
357e219440dSMaor Dickman * this criteria.
358e219440dSMaor Dickman */
359e219440dSMaor Dickman for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
36096c8c465SVlad Buslov if (esw_attr->dests[i].vport_valid &&
36196c8c465SVlad Buslov mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
362e219440dSMaor Dickman esw_attr->dests[i].mdev)) {
363e219440dSMaor Dickman result = true;
364e219440dSMaor Dickman } else {
365e219440dSMaor Dickman result = false;
366e219440dSMaor Dickman break;
367e219440dSMaor Dickman }
368e219440dSMaor Dickman }
369e219440dSMaor Dickman return result;
370a508728aSVlad Buslov }
371a508728aSVlad Buslov
372a508728aSVlad Buslov static int
esw_setup_indir_table(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,int * i)373a508728aSVlad Buslov esw_setup_indir_table(struct mlx5_flow_destination *dest,
374a508728aSVlad Buslov struct mlx5_flow_act *flow_act,
375a508728aSVlad Buslov struct mlx5_eswitch *esw,
376a508728aSVlad Buslov struct mlx5_flow_attr *attr,
377a508728aSVlad Buslov int *i)
378a508728aSVlad Buslov {
379a508728aSVlad Buslov struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
380a508728aSVlad Buslov int j, err;
381a508728aSVlad Buslov
382e5d4e1daSRoi Dayan if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
383a508728aSVlad Buslov return -EOPNOTSUPP;
384a508728aSVlad Buslov
385a508728aSVlad Buslov for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
386a508728aSVlad Buslov flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
387a508728aSVlad Buslov dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
388a508728aSVlad Buslov
389521933cdSMaor Dickman dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
39096c8c465SVlad Buslov esw_attr->dests[j].vport, false);
391a508728aSVlad Buslov if (IS_ERR(dest[*i].ft)) {
392a508728aSVlad Buslov err = PTR_ERR(dest[*i].ft);
393a508728aSVlad Buslov goto err_indir_tbl_get;
394a508728aSVlad Buslov }
395a508728aSVlad Buslov }
396a508728aSVlad Buslov
397a508728aSVlad Buslov if (mlx5_esw_indir_table_decap_vport(attr)) {
398521933cdSMaor Dickman err = esw_setup_decap_indir(esw, attr);
399a508728aSVlad Buslov if (err)
400a508728aSVlad Buslov goto err_indir_tbl_get;
401a508728aSVlad Buslov }
402a508728aSVlad Buslov
403a508728aSVlad Buslov return 0;
404a508728aSVlad Buslov
405a508728aSVlad Buslov err_indir_tbl_get:
406a508728aSVlad Buslov esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
407a508728aSVlad Buslov return err;
408a508728aSVlad Buslov }
409a508728aSVlad Buslov
esw_cleanup_indir_table(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)410a508728aSVlad Buslov static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
411a508728aSVlad Buslov {
412a508728aSVlad Buslov struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
413a508728aSVlad Buslov
414a508728aSVlad Buslov esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
415a508728aSVlad Buslov esw_cleanup_decap_indir(esw, attr);
416a508728aSVlad Buslov }
417a508728aSVlad Buslov
4189e51c0a6SVlad Buslov static void
esw_cleanup_chain_dest(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)4199e51c0a6SVlad Buslov esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
4209e51c0a6SVlad Buslov {
4219e51c0a6SVlad Buslov mlx5_chains_put_table(chains, chain, prio, level);
4229e51c0a6SVlad Buslov }
4239e51c0a6SVlad Buslov
esw_same_vhca_id(struct mlx5_core_dev * mdev1,struct mlx5_core_dev * mdev2)424d1569537SJianbo Liu static bool esw_same_vhca_id(struct mlx5_core_dev *mdev1, struct mlx5_core_dev *mdev2)
425d1569537SJianbo Liu {
426d1569537SJianbo Liu return MLX5_CAP_GEN(mdev1, vhca_id) == MLX5_CAP_GEN(mdev2, vhca_id);
427d1569537SJianbo Liu }
428d1569537SJianbo Liu
esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int attr_idx)429d1569537SJianbo Liu static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
430d1569537SJianbo Liu struct mlx5_esw_flow_attr *esw_attr,
431d1569537SJianbo Liu int attr_idx)
432d1569537SJianbo Liu {
433d1569537SJianbo Liu if (esw->offloads.ft_ipsec_tx_pol &&
43496c8c465SVlad Buslov esw_attr->dests[attr_idx].vport_valid &&
43596c8c465SVlad Buslov esw_attr->dests[attr_idx].vport == MLX5_VPORT_UPLINK &&
436d1569537SJianbo Liu /* To be aligned with software, encryption is needed only for tunnel device */
437d1569537SJianbo Liu (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) &&
43896c8c465SVlad Buslov esw_attr->dests[attr_idx].vport != esw_attr->in_rep->vport &&
439d1569537SJianbo Liu esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
440d1569537SJianbo Liu return true;
441d1569537SJianbo Liu
442d1569537SJianbo Liu return false;
443d1569537SJianbo Liu }
444d1569537SJianbo Liu
esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr)445d1569537SJianbo Liu static bool esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch *esw,
446d1569537SJianbo Liu struct mlx5_esw_flow_attr *esw_attr)
447d1569537SJianbo Liu {
448d1569537SJianbo Liu int i;
449d1569537SJianbo Liu
450d1569537SJianbo Liu if (!esw->offloads.ft_ipsec_tx_pol)
451d1569537SJianbo Liu return true;
452d1569537SJianbo Liu
453d1569537SJianbo Liu for (i = 0; i < esw_attr->split_count; i++)
454d1569537SJianbo Liu if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i))
455d1569537SJianbo Liu return false;
456d1569537SJianbo Liu
457d1569537SJianbo Liu for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
458d1569537SJianbo Liu if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i) &&
459d1569537SJianbo Liu (esw_attr->out_count - esw_attr->split_count > 1))
460d1569537SJianbo Liu return false;
461d1569537SJianbo Liu
462d1569537SJianbo Liu return true;
463d1569537SJianbo Liu }
464d1569537SJianbo Liu
4659e51c0a6SVlad Buslov static void
esw_setup_dest_fwd_vport(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int attr_idx,int dest_idx,bool pkt_reformat)466d1569537SJianbo Liu esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
4679e51c0a6SVlad Buslov struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
4689e51c0a6SVlad Buslov int attr_idx, int dest_idx, bool pkt_reformat)
4699e51c0a6SVlad Buslov {
4709e51c0a6SVlad Buslov dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
47196c8c465SVlad Buslov dest[dest_idx].vport.num = esw_attr->dests[attr_idx].vport;
472c6719725SMaor Dickman if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
4739e51c0a6SVlad Buslov dest[dest_idx].vport.vhca_id =
4749e51c0a6SVlad Buslov MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
4759e51c0a6SVlad Buslov dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
476942fca7eSEli Cohen if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
4778ce81fc0SRoi Dayan mlx5_lag_is_mpesw(esw->dev))
47894db3317SEli Cohen dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
479c6719725SMaor Dickman }
4806d942e40SRoi Dayan if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
4819e51c0a6SVlad Buslov if (pkt_reformat) {
4829e51c0a6SVlad Buslov flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
4839e51c0a6SVlad Buslov flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
4849e51c0a6SVlad Buslov }
4859e51c0a6SVlad Buslov dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
4869e51c0a6SVlad Buslov dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
4879e51c0a6SVlad Buslov }
4889e51c0a6SVlad Buslov }
4899e51c0a6SVlad Buslov
490d1569537SJianbo Liu static void
esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int attr_idx,int dest_idx,bool pkt_reformat)491d1569537SJianbo Liu esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
492d1569537SJianbo Liu struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
493d1569537SJianbo Liu int attr_idx, int dest_idx, bool pkt_reformat)
494d1569537SJianbo Liu {
495d1569537SJianbo Liu dest[dest_idx].ft = esw->offloads.ft_ipsec_tx_pol;
496d1569537SJianbo Liu dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
497d1569537SJianbo Liu if (pkt_reformat &&
498d1569537SJianbo Liu esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
499d1569537SJianbo Liu flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
500d1569537SJianbo Liu flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
501d1569537SJianbo Liu }
502d1569537SJianbo Liu }
503d1569537SJianbo Liu
504d1569537SJianbo Liu static void
esw_setup_vport_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int attr_idx,int dest_idx,bool pkt_reformat)505d1569537SJianbo Liu esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
506d1569537SJianbo Liu struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
507d1569537SJianbo Liu int attr_idx, int dest_idx, bool pkt_reformat)
508d1569537SJianbo Liu {
509d1569537SJianbo Liu if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
510d1569537SJianbo Liu esw_setup_dest_fwd_ipsec(dest, flow_act, esw, esw_attr,
511d1569537SJianbo Liu attr_idx, dest_idx, pkt_reformat);
512d1569537SJianbo Liu else
513d1569537SJianbo Liu esw_setup_dest_fwd_vport(dest, flow_act, esw, esw_attr,
514d1569537SJianbo Liu attr_idx, dest_idx, pkt_reformat);
515d1569537SJianbo Liu }
516d1569537SJianbo Liu
5179e51c0a6SVlad Buslov static int
esw_setup_vport_dests(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int i)5189e51c0a6SVlad Buslov esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
5199e51c0a6SVlad Buslov struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
5209e51c0a6SVlad Buslov int i)
5219e51c0a6SVlad Buslov {
5229e51c0a6SVlad Buslov int j;
5239e51c0a6SVlad Buslov
5249e51c0a6SVlad Buslov for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
5259e51c0a6SVlad Buslov esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
5269e51c0a6SVlad Buslov return i;
5279e51c0a6SVlad Buslov }
5289e51c0a6SVlad Buslov
529e929e3daSMaor Dickman static bool
esw_src_port_rewrite_supported(struct mlx5_eswitch * esw)530e929e3daSMaor Dickman esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
531e929e3daSMaor Dickman {
532e929e3daSMaor Dickman return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
533e929e3daSMaor Dickman mlx5_eswitch_vport_match_metadata_enabled(esw) &&
534e929e3daSMaor Dickman MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
535e929e3daSMaor Dickman }
536e929e3daSMaor Dickman
537e0e22d59SJianbo Liu static bool
esw_dests_to_int_external(struct mlx5_flow_destination * dests,int max_dest)538c8d7228dSJianbo Liu esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
539e0e22d59SJianbo Liu {
540c8d7228dSJianbo Liu bool internal_dest = false, external_dest = false;
541e0e22d59SJianbo Liu int i;
542e0e22d59SJianbo Liu
543e0e22d59SJianbo Liu for (i = 0; i < max_dest; i++) {
544c8d7228dSJianbo Liu if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT &&
545c8d7228dSJianbo Liu dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK)
546e0e22d59SJianbo Liu continue;
547e0e22d59SJianbo Liu
548c8d7228dSJianbo Liu /* Uplink dest is external, but considered as internal
549c8d7228dSJianbo Liu * if there is reformat because firmware uses LB+hairpin to support it.
550c8d7228dSJianbo Liu */
551c8d7228dSJianbo Liu if (dests[i].vport.num == MLX5_VPORT_UPLINK &&
552c8d7228dSJianbo Liu !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID))
553c8d7228dSJianbo Liu external_dest = true;
554e0e22d59SJianbo Liu else
555c8d7228dSJianbo Liu internal_dest = true;
556e0e22d59SJianbo Liu
557c8d7228dSJianbo Liu if (internal_dest && external_dest)
558e0e22d59SJianbo Liu return true;
559e0e22d59SJianbo Liu }
560e0e22d59SJianbo Liu
561e0e22d59SJianbo Liu return false;
562e0e22d59SJianbo Liu }
563e0e22d59SJianbo Liu
5649e51c0a6SVlad Buslov static int
esw_setup_dests(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,struct mlx5_flow_spec * spec,int * i)5659e51c0a6SVlad Buslov esw_setup_dests(struct mlx5_flow_destination *dest,
5669e51c0a6SVlad Buslov struct mlx5_flow_act *flow_act,
5679e51c0a6SVlad Buslov struct mlx5_eswitch *esw,
5689e51c0a6SVlad Buslov struct mlx5_flow_attr *attr,
56910742efcSVlad Buslov struct mlx5_flow_spec *spec,
5709e51c0a6SVlad Buslov int *i)
5719e51c0a6SVlad Buslov {
5729e51c0a6SVlad Buslov struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
5739e51c0a6SVlad Buslov struct mlx5_fs_chains *chains = esw_chains(esw);
5749e51c0a6SVlad Buslov int err = 0;
5759e51c0a6SVlad Buslov
57610742efcSVlad Buslov if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
577e929e3daSMaor Dickman esw_src_port_rewrite_supported(esw))
578e5d4e1daSRoi Dayan attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
57910742efcSVlad Buslov
58042760d95SRoi Dayan if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
581c0063a43SVlad Buslov esw_setup_slow_path_dest(dest, flow_act, esw, *i);
582c0063a43SVlad Buslov (*i)++;
58342760d95SRoi Dayan goto out;
58442760d95SRoi Dayan }
58542760d95SRoi Dayan
58642760d95SRoi Dayan if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
58742760d95SRoi Dayan esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
58842760d95SRoi Dayan (*i)++;
589c0063a43SVlad Buslov } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
590c0063a43SVlad Buslov esw_setup_accept_dest(dest, flow_act, chains, *i);
5919e51c0a6SVlad Buslov (*i)++;
5926fda078dSOz Shlomo } else if (attr->flags & MLX5_ATTR_FLAG_MTU) {
5936fda078dSOz Shlomo err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
5946fda078dSOz Shlomo (*i)++;
595a508728aSVlad Buslov } else if (esw_is_indir_table(esw, attr)) {
596d602be22SRoi Dayan err = esw_setup_indir_table(dest, flow_act, esw, attr, i);
59710742efcSVlad Buslov } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
59810742efcSVlad Buslov err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
5999e51c0a6SVlad Buslov } else {
6009e51c0a6SVlad Buslov *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
6018c9cc1ebSRoi Dayan
6028c9cc1ebSRoi Dayan if (attr->dest_ft) {
603521933cdSMaor Dickman err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
6048c9cc1ebSRoi Dayan (*i)++;
6058c9cc1ebSRoi Dayan } else if (attr->dest_chain) {
6068c9cc1ebSRoi Dayan err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
6078c9cc1ebSRoi Dayan 1, 0, *i);
6088c9cc1ebSRoi Dayan (*i)++;
6098c9cc1ebSRoi Dayan }
6109e51c0a6SVlad Buslov }
6119e51c0a6SVlad Buslov
61242760d95SRoi Dayan out:
6139e51c0a6SVlad Buslov return err;
6149e51c0a6SVlad Buslov }
6159e51c0a6SVlad Buslov
6169e51c0a6SVlad Buslov static void
esw_cleanup_dests(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)6179e51c0a6SVlad Buslov esw_cleanup_dests(struct mlx5_eswitch *esw,
6189e51c0a6SVlad Buslov struct mlx5_flow_attr *attr)
6199e51c0a6SVlad Buslov {
62010742efcSVlad Buslov struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
6219e51c0a6SVlad Buslov struct mlx5_fs_chains *chains = esw_chains(esw);
6229e51c0a6SVlad Buslov
623a508728aSVlad Buslov if (attr->dest_ft) {
624a508728aSVlad Buslov esw_cleanup_decap_indir(esw, attr);
625e5d4e1daSRoi Dayan } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
62610742efcSVlad Buslov if (attr->dest_chain)
6279e51c0a6SVlad Buslov esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
628a508728aSVlad Buslov else if (esw_is_indir_table(esw, attr))
629a508728aSVlad Buslov esw_cleanup_indir_table(esw, attr);
63010742efcSVlad Buslov else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
63110742efcSVlad Buslov esw_cleanup_chain_src_port_rewrite(esw, attr);
63210742efcSVlad Buslov }
6339e51c0a6SVlad Buslov }
6349e51c0a6SVlad Buslov
6359153da46SJianbo Liu static void
esw_setup_meter(struct mlx5_flow_attr * attr,struct mlx5_flow_act * flow_act)6369153da46SJianbo Liu esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
6379153da46SJianbo Liu {
6389153da46SJianbo Liu struct mlx5e_flow_meter_handle *meter;
6399153da46SJianbo Liu
6409153da46SJianbo Liu meter = attr->meter_attr.meter;
6419153da46SJianbo Liu flow_act->exe_aso.type = attr->exe_aso_type;
6429153da46SJianbo Liu flow_act->exe_aso.object_id = meter->obj_id;
6439153da46SJianbo Liu flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
6449153da46SJianbo Liu flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
6459153da46SJianbo Liu /* use metadata reg 5 for packet color */
6469153da46SJianbo Liu flow_act->exe_aso.return_reg_id = 5;
6479153da46SJianbo Liu }
6489153da46SJianbo Liu
64974491de9SMark Bloch struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr)6503d80d1a2SOr Gerlitz mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
6513d80d1a2SOr Gerlitz struct mlx5_flow_spec *spec,
652c620b772SAriel Levkovich struct mlx5_flow_attr *attr)
6533d80d1a2SOr Gerlitz {
65442f7ad67SPaul Blakey struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
655c620b772SAriel Levkovich struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
656ae430332SAriel Levkovich struct mlx5_fs_chains *chains = esw_chains(esw);
657c620b772SAriel Levkovich bool split = !!(esw_attr->split_count);
658c620b772SAriel Levkovich struct mlx5_vport_tbl_attr fwd_attr;
65940888162SMaor Dickman struct mlx5_flow_destination *dest;
66074491de9SMark Bloch struct mlx5_flow_handle *rule;
661e52c2802SPaul Blakey struct mlx5_flow_table *fdb;
6629e51c0a6SVlad Buslov int i = 0;
6633d80d1a2SOr Gerlitz
664f6455de0SBodong Wang if (esw->mode != MLX5_ESWITCH_OFFLOADS)
6653d80d1a2SOr Gerlitz return ERR_PTR(-EOPNOTSUPP);
6663d80d1a2SOr Gerlitz
667633ad4b2SRoi Dayan if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
668633ad4b2SRoi Dayan return ERR_PTR(-EOPNOTSUPP);
669633ad4b2SRoi Dayan
670d1569537SJianbo Liu if (!esw_flow_dests_fwd_ipsec_check(esw, esw_attr))
671d1569537SJianbo Liu return ERR_PTR(-EOPNOTSUPP);
672d1569537SJianbo Liu
67340888162SMaor Dickman dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
67440888162SMaor Dickman if (!dest)
67540888162SMaor Dickman return ERR_PTR(-ENOMEM);
67640888162SMaor Dickman
6776acfbf38SOr Gerlitz flow_act.action = attr->action;
678633ad4b2SRoi Dayan
679633ad4b2SRoi Dayan if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
680c620b772SAriel Levkovich flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
681c620b772SAriel Levkovich flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
682c620b772SAriel Levkovich flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
683cc495188SJianbo Liu if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
684c620b772SAriel Levkovich flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
685c620b772SAriel Levkovich flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
686c620b772SAriel Levkovich flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
687cc495188SJianbo Liu }
6886acfbf38SOr Gerlitz }
689776b12b6SOr Gerlitz
69010742efcSVlad Buslov mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
69110742efcSVlad Buslov
69266958ed9SHadar Hen Zion if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
6939e51c0a6SVlad Buslov int err;
694e52c2802SPaul Blakey
69510742efcSVlad Buslov err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
6969e51c0a6SVlad Buslov if (err) {
6979e51c0a6SVlad Buslov rule = ERR_PTR(err);
698e52c2802SPaul Blakey goto err_create_goto_table;
699e52c2802SPaul Blakey }
700e0e22d59SJianbo Liu
701e0e22d59SJianbo Liu /* Header rewrite with combined wire+loopback in FDB is not allowed */
702e0e22d59SJianbo Liu if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
703c8d7228dSJianbo Liu esw_dests_to_int_external(dest, i)) {
704e0e22d59SJianbo Liu esw_warn(esw->dev,
705c8d7228dSJianbo Liu "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n");
706e0e22d59SJianbo Liu rule = ERR_PTR(-EINVAL);
707e0e22d59SJianbo Liu goto err_esw_get;
708e0e22d59SJianbo Liu }
709e52c2802SPaul Blakey }
71014e6b038SEli Cohen
711c620b772SAriel Levkovich if (esw_attr->decap_pkt_reformat)
712c620b772SAriel Levkovich flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
71314e6b038SEli Cohen
71466958ed9SHadar Hen Zion if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
715e37a79e5SMark Bloch dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
716171c7625SMark Bloch dest[i].counter_id = mlx5_fc_id(attr->counter);
717e37a79e5SMark Bloch i++;
7183d80d1a2SOr Gerlitz }
7193d80d1a2SOr Gerlitz
72093b3586eSHuy Nguyen if (attr->outer_match_level != MLX5_MATCH_NONE)
7216363651dSOr Gerlitz spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
72293b3586eSHuy Nguyen if (attr->inner_match_level != MLX5_MATCH_NONE)
723bbd00f7eSHadar Hen Zion spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
7243d80d1a2SOr Gerlitz
725aa24670eSOr Gerlitz if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
7262b688ea5SMaor Gottlieb flow_act.modify_hdr = attr->modify_hdr;
727d7e75a32SOr Gerlitz
7289153da46SJianbo Liu if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
7299153da46SJianbo Liu attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
7309153da46SJianbo Liu esw_setup_meter(attr, &flow_act);
7319153da46SJianbo Liu
7322741f223SChris Mi if (split) {
733c620b772SAriel Levkovich fwd_attr.chain = attr->chain;
734c620b772SAriel Levkovich fwd_attr.prio = attr->prio;
735c620b772SAriel Levkovich fwd_attr.vport = esw_attr->in_rep->vport;
736c796bb7cSChris Mi fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
737c620b772SAriel Levkovich
7380a9e2307SChris Mi fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
73996e32687SEli Cohen } else {
740d18296ffSPaul Blakey if (attr->chain || attr->prio)
741ae430332SAriel Levkovich fdb = mlx5_chains_get_table(chains, attr->chain,
742d18296ffSPaul Blakey attr->prio, 0);
743d18296ffSPaul Blakey else
744c620b772SAriel Levkovich fdb = attr->ft;
7456fb0701aSPaul Blakey
746e5d4e1daSRoi Dayan if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
747a508728aSVlad Buslov mlx5_eswitch_set_rule_source_port(esw, spec, attr,
748b055ecf5SMark Bloch esw_attr->in_mdev->priv.eswitch,
749b055ecf5SMark Bloch esw_attr->in_rep->vport);
75096e32687SEli Cohen }
751e52c2802SPaul Blakey if (IS_ERR(fdb)) {
752e52c2802SPaul Blakey rule = ERR_CAST(fdb);
753e52c2802SPaul Blakey goto err_esw_get;
754e52c2802SPaul Blakey }
755e52c2802SPaul Blakey
7565a5624d1SOz Shlomo if (!i) {
7575a5624d1SOz Shlomo kfree(dest);
7585a5624d1SOz Shlomo dest = NULL;
7595a5624d1SOz Shlomo }
7605a5624d1SOz Shlomo
76184be2fdaSEli Cohen if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
762c620b772SAriel Levkovich rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
76310caabdaSOz Shlomo &flow_act, dest, i);
76484be2fdaSEli Cohen else
765e52c2802SPaul Blakey rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
7663d80d1a2SOr Gerlitz if (IS_ERR(rule))
767e52c2802SPaul Blakey goto err_add_rule;
768375f51e2SRoi Dayan else
769525e84beSVlad Buslov atomic64_inc(&esw->offloads.num_flows);
7703d80d1a2SOr Gerlitz
77140888162SMaor Dickman kfree(dest);
772e52c2802SPaul Blakey return rule;
773e52c2802SPaul Blakey
774e52c2802SPaul Blakey err_add_rule:
77596e32687SEli Cohen if (split)
7760a9e2307SChris Mi mlx5_esw_vporttbl_put(esw, &fwd_attr);
777d18296ffSPaul Blakey else if (attr->chain || attr->prio)
778ae430332SAriel Levkovich mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
779e52c2802SPaul Blakey err_esw_get:
7809e51c0a6SVlad Buslov esw_cleanup_dests(esw, attr);
781e52c2802SPaul Blakey err_create_goto_table:
78240888162SMaor Dickman kfree(dest);
783aa0cbbaeSOr Gerlitz return rule;
7843d80d1a2SOr Gerlitz }
7853d80d1a2SOr Gerlitz
786e4ad91f2SChris Mi struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr)787e4ad91f2SChris Mi mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
788e4ad91f2SChris Mi struct mlx5_flow_spec *spec,
789c620b772SAriel Levkovich struct mlx5_flow_attr *attr)
790e4ad91f2SChris Mi {
79142f7ad67SPaul Blakey struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
792c620b772SAriel Levkovich struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
793ae430332SAriel Levkovich struct mlx5_fs_chains *chains = esw_chains(esw);
794c620b772SAriel Levkovich struct mlx5_vport_tbl_attr fwd_attr;
79540888162SMaor Dickman struct mlx5_flow_destination *dest;
796e52c2802SPaul Blakey struct mlx5_flow_table *fast_fdb;
797e52c2802SPaul Blakey struct mlx5_flow_table *fwd_fdb;
798e4ad91f2SChris Mi struct mlx5_flow_handle *rule;
79910742efcSVlad Buslov int i, err = 0;
800e4ad91f2SChris Mi
80140888162SMaor Dickman dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
80240888162SMaor Dickman if (!dest)
80340888162SMaor Dickman return ERR_PTR(-ENOMEM);
80440888162SMaor Dickman
805ae430332SAriel Levkovich fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
806e52c2802SPaul Blakey if (IS_ERR(fast_fdb)) {
807e52c2802SPaul Blakey rule = ERR_CAST(fast_fdb);
808e52c2802SPaul Blakey goto err_get_fast;
809e52c2802SPaul Blakey }
810e52c2802SPaul Blakey
811c620b772SAriel Levkovich fwd_attr.chain = attr->chain;
812c620b772SAriel Levkovich fwd_attr.prio = attr->prio;
813c620b772SAriel Levkovich fwd_attr.vport = esw_attr->in_rep->vport;
814c796bb7cSChris Mi fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
8150a9e2307SChris Mi fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
816e52c2802SPaul Blakey if (IS_ERR(fwd_fdb)) {
817e52c2802SPaul Blakey rule = ERR_CAST(fwd_fdb);
818e52c2802SPaul Blakey goto err_get_fwd;
819e52c2802SPaul Blakey }
820e52c2802SPaul Blakey
821e4ad91f2SChris Mi flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
82210742efcSVlad Buslov for (i = 0; i < esw_attr->split_count; i++) {
8231313d78aSMaor Dickman if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
8241313d78aSMaor Dickman /* Source port rewrite (forward to ovs internal port or statck device) isn't
8251313d78aSMaor Dickman * supported in the rule of split action.
8261313d78aSMaor Dickman */
8271313d78aSMaor Dickman err = -EOPNOTSUPP;
82810742efcSVlad Buslov else
8299e51c0a6SVlad Buslov esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
83010742efcSVlad Buslov
83110742efcSVlad Buslov if (err) {
83210742efcSVlad Buslov rule = ERR_PTR(err);
83310742efcSVlad Buslov goto err_chain_src_rewrite;
83410742efcSVlad Buslov }
83510742efcSVlad Buslov }
836e4ad91f2SChris Mi dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
837873d2f12SZheng Yongjun dest[i].ft = fwd_fdb;
838e4ad91f2SChris Mi i++;
839e4ad91f2SChris Mi
840a508728aSVlad Buslov mlx5_eswitch_set_rule_source_port(esw, spec, attr,
841b055ecf5SMark Bloch esw_attr->in_mdev->priv.eswitch,
842b055ecf5SMark Bloch esw_attr->in_rep->vport);
843e4ad91f2SChris Mi
84493b3586eSHuy Nguyen if (attr->outer_match_level != MLX5_MATCH_NONE)
845c01cfd0fSJianbo Liu spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
846e4ad91f2SChris Mi
847278d51f2SPaul Blakey flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
848e52c2802SPaul Blakey rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
849e4ad91f2SChris Mi
85010742efcSVlad Buslov if (IS_ERR(rule)) {
85110742efcSVlad Buslov i = esw_attr->split_count;
85210742efcSVlad Buslov goto err_chain_src_rewrite;
85310742efcSVlad Buslov }
854e52c2802SPaul Blakey
855525e84beSVlad Buslov atomic64_inc(&esw->offloads.num_flows);
856e4ad91f2SChris Mi
85740888162SMaor Dickman kfree(dest);
858e4ad91f2SChris Mi return rule;
85910742efcSVlad Buslov err_chain_src_rewrite:
8600a9e2307SChris Mi mlx5_esw_vporttbl_put(esw, &fwd_attr);
861e52c2802SPaul Blakey err_get_fwd:
862ae430332SAriel Levkovich mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
863e52c2802SPaul Blakey err_get_fast:
86440888162SMaor Dickman kfree(dest);
865e52c2802SPaul Blakey return rule;
866e52c2802SPaul Blakey }
867e52c2802SPaul Blakey
868e52c2802SPaul Blakey static void
__mlx5_eswitch_del_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr,bool fwd_rule)869e52c2802SPaul Blakey __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
870e52c2802SPaul Blakey struct mlx5_flow_handle *rule,
871c620b772SAriel Levkovich struct mlx5_flow_attr *attr,
872e52c2802SPaul Blakey bool fwd_rule)
873e52c2802SPaul Blakey {
874c620b772SAriel Levkovich struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
875ae430332SAriel Levkovich struct mlx5_fs_chains *chains = esw_chains(esw);
876c620b772SAriel Levkovich bool split = (esw_attr->split_count > 0);
877c620b772SAriel Levkovich struct mlx5_vport_tbl_attr fwd_attr;
87810caabdaSOz Shlomo int i;
879e52c2802SPaul Blakey
880e52c2802SPaul Blakey mlx5_del_flow_rules(rule);
88110caabdaSOz Shlomo
882e5d4e1daSRoi Dayan if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
88310caabdaSOz Shlomo /* unref the term table */
88410caabdaSOz Shlomo for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
885c620b772SAriel Levkovich if (esw_attr->dests[i].termtbl)
886c620b772SAriel Levkovich mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
88710caabdaSOz Shlomo }
888d8a2034fSEli Cohen }
88910caabdaSOz Shlomo
890525e84beSVlad Buslov atomic64_dec(&esw->offloads.num_flows);
891e52c2802SPaul Blakey
892c620b772SAriel Levkovich if (fwd_rule || split) {
893c620b772SAriel Levkovich fwd_attr.chain = attr->chain;
894c620b772SAriel Levkovich fwd_attr.prio = attr->prio;
895c620b772SAriel Levkovich fwd_attr.vport = esw_attr->in_rep->vport;
896c796bb7cSChris Mi fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
897c620b772SAriel Levkovich }
898c620b772SAriel Levkovich
899e52c2802SPaul Blakey if (fwd_rule) {
9000a9e2307SChris Mi mlx5_esw_vporttbl_put(esw, &fwd_attr);
901ae430332SAriel Levkovich mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
902e52c2802SPaul Blakey } else {
90396e32687SEli Cohen if (split)
9040a9e2307SChris Mi mlx5_esw_vporttbl_put(esw, &fwd_attr);
905d18296ffSPaul Blakey else if (attr->chain || attr->prio)
906ae430332SAriel Levkovich mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
9079e51c0a6SVlad Buslov esw_cleanup_dests(esw, attr);
908e52c2802SPaul Blakey }
909e4ad91f2SChris Mi }
910e4ad91f2SChris Mi
911d85cdccbSOr Gerlitz void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr)912d85cdccbSOr Gerlitz mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
913d85cdccbSOr Gerlitz struct mlx5_flow_handle *rule,
914c620b772SAriel Levkovich struct mlx5_flow_attr *attr)
915d85cdccbSOr Gerlitz {
916e52c2802SPaul Blakey __mlx5_eswitch_del_rule(esw, rule, attr, false);
917d85cdccbSOr Gerlitz }
918d85cdccbSOr Gerlitz
91948265006SOr Gerlitz void
mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr)92048265006SOr Gerlitz mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
92148265006SOr Gerlitz struct mlx5_flow_handle *rule,
922c620b772SAriel Levkovich struct mlx5_flow_attr *attr)
92348265006SOr Gerlitz {
924e52c2802SPaul Blakey __mlx5_eswitch_del_rule(esw, rule, attr, true);
92548265006SOr Gerlitz }
92648265006SOr Gerlitz
927f7a68945SMark Bloch struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch * on_esw,struct mlx5_eswitch * from_esw,struct mlx5_eswitch_rep * rep,u32 sqn)9283a46f4fbSMark Bloch mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
929979bf468SMark Bloch struct mlx5_eswitch *from_esw,
9303a46f4fbSMark Bloch struct mlx5_eswitch_rep *rep,
93102f3afd9SParav Pandit u32 sqn)
932ab22be9bSOr Gerlitz {
93366958ed9SHadar Hen Zion struct mlx5_flow_act flow_act = {0};
9344c5009c5SRabie Loulou struct mlx5_flow_destination dest = {};
93574491de9SMark Bloch struct mlx5_flow_handle *flow_rule;
936c5bb1730SMaor Gottlieb struct mlx5_flow_spec *spec;
937ab22be9bSOr Gerlitz void *misc;
93829bcb6e4SRoi Dayan u16 vport;
939ab22be9bSOr Gerlitz
9401b9a07eeSLeon Romanovsky spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
941c5bb1730SMaor Gottlieb if (!spec) {
942ab22be9bSOr Gerlitz flow_rule = ERR_PTR(-ENOMEM);
943ab22be9bSOr Gerlitz goto out;
944ab22be9bSOr Gerlitz }
945ab22be9bSOr Gerlitz
946c5bb1730SMaor Gottlieb misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
947ab22be9bSOr Gerlitz MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
94829bcb6e4SRoi Dayan
94929bcb6e4SRoi Dayan misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
95029bcb6e4SRoi Dayan MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
95129bcb6e4SRoi Dayan
95229bcb6e4SRoi Dayan spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
95329bcb6e4SRoi Dayan
954a1b3839aSBodong Wang /* source vport is the esw manager */
95529bcb6e4SRoi Dayan vport = from_esw->manager_vport;
95629bcb6e4SRoi Dayan
95729bcb6e4SRoi Dayan if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) {
95829bcb6e4SRoi Dayan misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
95929bcb6e4SRoi Dayan MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
96029bcb6e4SRoi Dayan mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport));
96129bcb6e4SRoi Dayan
96229bcb6e4SRoi Dayan misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
96329bcb6e4SRoi Dayan MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
96429bcb6e4SRoi Dayan mlx5_eswitch_get_vport_metadata_mask());
96529bcb6e4SRoi Dayan
96629bcb6e4SRoi Dayan spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
96729bcb6e4SRoi Dayan } else {
96829bcb6e4SRoi Dayan misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
96929bcb6e4SRoi Dayan MLX5_SET(fte_match_set_misc, misc, source_port, vport);
97029bcb6e4SRoi Dayan
9713a46f4fbSMark Bloch if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
9727d97822aSMark Bloch MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
973979bf468SMark Bloch MLX5_CAP_GEN(from_esw->dev, vhca_id));
974ab22be9bSOr Gerlitz
975c5bb1730SMaor Gottlieb misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
976ab22be9bSOr Gerlitz MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
97729bcb6e4SRoi Dayan
9783a46f4fbSMark Bloch if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
9797d97822aSMark Bloch MLX5_SET_TO_ONES(fte_match_set_misc, misc,
9807d97822aSMark Bloch source_eswitch_owner_vhca_id);
981ab22be9bSOr Gerlitz
98229bcb6e4SRoi Dayan spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
98329bcb6e4SRoi Dayan }
98429bcb6e4SRoi Dayan
985ab22be9bSOr Gerlitz dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
9863a46f4fbSMark Bloch dest.vport.num = rep->vport;
9873a46f4fbSMark Bloch dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
9883a46f4fbSMark Bloch dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
98966958ed9SHadar Hen Zion flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
990ab22be9bSOr Gerlitz
991f676cc90SJianbo Liu if (rep->vport == MLX5_VPORT_UPLINK &&
992f676cc90SJianbo Liu on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) {
993c6c2bf5dSJianbo Liu dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
994c6c2bf5dSJianbo Liu flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
995c6c2bf5dSJianbo Liu dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
996c6c2bf5dSJianbo Liu } else {
997c6c2bf5dSJianbo Liu dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
998c6c2bf5dSJianbo Liu dest.vport.num = rep->vport;
999c6c2bf5dSJianbo Liu dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
1000c6c2bf5dSJianbo Liu dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1001c6c2bf5dSJianbo Liu }
1002c6c2bf5dSJianbo Liu
10031bf8b0daSRoi Dayan if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
10041bf8b0daSRoi Dayan rep->vport == MLX5_VPORT_UPLINK)
1005d0444254SAriel Levkovich spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
1006d0444254SAriel Levkovich
1007dcf19b9cSMaor Dickman flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
100839ac237cSPaul Blakey spec, &flow_act, &dest, 1);
1009ab22be9bSOr Gerlitz if (IS_ERR(flow_rule))
10103a46f4fbSMark Bloch esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
10113a46f4fbSMark Bloch PTR_ERR(flow_rule));
1012ab22be9bSOr Gerlitz out:
1013c5bb1730SMaor Gottlieb kvfree(spec);
1014ab22be9bSOr Gerlitz return flow_rule;
1015ab22be9bSOr Gerlitz }
101657cbd893SMark Bloch EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
1017ab22be9bSOr Gerlitz
mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle * rule)1018159fe639SMark Bloch void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
1019159fe639SMark Bloch {
1020159fe639SMark Bloch mlx5_del_flow_rules(rule);
1021159fe639SMark Bloch }
1022159fe639SMark Bloch
mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle * rule)1023430e2d5eSRoi Dayan void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
10248e404fefSVlad Buslov {
1025430e2d5eSRoi Dayan if (rule)
1026430e2d5eSRoi Dayan mlx5_del_flow_rules(rule);
1027f019679eSChris Mi }
1028f019679eSChris Mi
1029430e2d5eSRoi Dayan struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch * esw,u16 vport_num)1030430e2d5eSRoi Dayan mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
10318e404fefSVlad Buslov {
10328e404fefSVlad Buslov struct mlx5_flow_destination dest = {};
10338e404fefSVlad Buslov struct mlx5_flow_act flow_act = {0};
10348e404fefSVlad Buslov struct mlx5_flow_handle *flow_rule;
10358e404fefSVlad Buslov struct mlx5_flow_spec *spec;
10368e404fefSVlad Buslov
10378e404fefSVlad Buslov spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1038430e2d5eSRoi Dayan if (!spec)
1039430e2d5eSRoi Dayan return ERR_PTR(-ENOMEM);
10408e404fefSVlad Buslov
10418e404fefSVlad Buslov MLX5_SET(fte_match_param, spec->match_criteria,
10428e404fefSVlad Buslov misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
10438e404fefSVlad Buslov MLX5_SET(fte_match_param, spec->match_criteria,
10448e404fefSVlad Buslov misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
10458e404fefSVlad Buslov MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
10468e404fefSVlad Buslov ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
10478e404fefSVlad Buslov
10488e404fefSVlad Buslov spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
10498e404fefSVlad Buslov dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
10508e404fefSVlad Buslov flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
10518e404fefSVlad Buslov
10528e404fefSVlad Buslov MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
10538e404fefSVlad Buslov mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
10548e404fefSVlad Buslov dest.vport.num = vport_num;
10558e404fefSVlad Buslov
1056dcf19b9cSMaor Dickman flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
10578e404fefSVlad Buslov spec, &flow_act, &dest, 1);
1058430e2d5eSRoi Dayan if (IS_ERR(flow_rule))
1059430e2d5eSRoi Dayan esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
1060430e2d5eSRoi Dayan vport_num, PTR_ERR(flow_rule));
10618e404fefSVlad Buslov
10628e404fefSVlad Buslov kvfree(spec);
1063430e2d5eSRoi Dayan return flow_rule;
10648e404fefSVlad Buslov }
10658e404fefSVlad Buslov
mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch * esw)10665b7cb745SPaul Blakey static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
10675b7cb745SPaul Blakey {
10685b7cb745SPaul Blakey return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
10695b7cb745SPaul Blakey MLX5_FDB_TO_VPORT_REG_C_1;
10705b7cb745SPaul Blakey }
10715b7cb745SPaul Blakey
esw_set_passing_vport_metadata(struct mlx5_eswitch * esw,bool enable)1072332bd3a5SParav Pandit static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
1073c1286050SJianbo Liu {
1074c1286050SJianbo Liu u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
1075e08a6832SLeon Romanovsky u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
1076e08a6832SLeon Romanovsky u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
10775b7cb745SPaul Blakey u8 curr, wanted;
1078c1286050SJianbo Liu int err;
1079c1286050SJianbo Liu
10805b7cb745SPaul Blakey if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
10815b7cb745SPaul Blakey !mlx5_eswitch_vport_match_metadata_enabled(esw))
1082332bd3a5SParav Pandit return 0;
1083332bd3a5SParav Pandit
1084e08a6832SLeon Romanovsky MLX5_SET(query_esw_vport_context_in, in, opcode,
1085e08a6832SLeon Romanovsky MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
1086e08a6832SLeon Romanovsky err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
1087c1286050SJianbo Liu if (err)
1088c1286050SJianbo Liu return err;
1089c1286050SJianbo Liu
10905b7cb745SPaul Blakey curr = MLX5_GET(query_esw_vport_context_out, out,
1091c1286050SJianbo Liu esw_vport_context.fdb_to_vport_reg_c_id);
10925b7cb745SPaul Blakey wanted = MLX5_FDB_TO_VPORT_REG_C_0;
10935b7cb745SPaul Blakey if (mlx5_eswitch_reg_c1_loopback_supported(esw))
10945b7cb745SPaul Blakey wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
1095c1286050SJianbo Liu
1096332bd3a5SParav Pandit if (enable)
10975b7cb745SPaul Blakey curr |= wanted;
1098332bd3a5SParav Pandit else
10995b7cb745SPaul Blakey curr &= ~wanted;
1100c1286050SJianbo Liu
1101e08a6832SLeon Romanovsky MLX5_SET(modify_esw_vport_context_in, min,
11025b7cb745SPaul Blakey esw_vport_context.fdb_to_vport_reg_c_id, curr);
1103e08a6832SLeon Romanovsky MLX5_SET(modify_esw_vport_context_in, min,
1104c1286050SJianbo Liu field_select.fdb_to_vport_reg_c_id, 1);
1105c1286050SJianbo Liu
1106e08a6832SLeon Romanovsky err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
11075b7cb745SPaul Blakey if (!err) {
11085b7cb745SPaul Blakey if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
11095b7cb745SPaul Blakey esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
11105b7cb745SPaul Blakey else
11115b7cb745SPaul Blakey esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
11125b7cb745SPaul Blakey }
11135b7cb745SPaul Blakey
11145b7cb745SPaul Blakey return err;
1115c1286050SJianbo Liu }
1116c1286050SJianbo Liu
peer_miss_rules_setup(struct mlx5_eswitch * esw,struct mlx5_core_dev * peer_dev,struct mlx5_flow_spec * spec,struct mlx5_flow_destination * dest)1117a5641cb5SJianbo Liu static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1118a5641cb5SJianbo Liu struct mlx5_core_dev *peer_dev,
1119ac004b83SRoi Dayan struct mlx5_flow_spec *spec,
1120ac004b83SRoi Dayan struct mlx5_flow_destination *dest)
1121ac004b83SRoi Dayan {
1122a5641cb5SJianbo Liu void *misc;
1123a5641cb5SJianbo Liu
1124a5641cb5SJianbo Liu if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1125a5641cb5SJianbo Liu misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1126a5641cb5SJianbo Liu misc_parameters_2);
11270f0d3827SPaul Blakey MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
11280f0d3827SPaul Blakey mlx5_eswitch_get_vport_metadata_mask());
1129a5641cb5SJianbo Liu
1130a5641cb5SJianbo Liu spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1131a5641cb5SJianbo Liu } else {
1132a5641cb5SJianbo Liu misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1133ac004b83SRoi Dayan misc_parameters);
1134ac004b83SRoi Dayan
1135ac004b83SRoi Dayan MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1136ac004b83SRoi Dayan MLX5_CAP_GEN(peer_dev, vhca_id));
1137ac004b83SRoi Dayan
1138ac004b83SRoi Dayan spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1139ac004b83SRoi Dayan
1140ac004b83SRoi Dayan misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1141ac004b83SRoi Dayan misc_parameters);
1142ac004b83SRoi Dayan MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1143ac004b83SRoi Dayan MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1144ac004b83SRoi Dayan source_eswitch_owner_vhca_id);
1145a5641cb5SJianbo Liu }
1146ac004b83SRoi Dayan
1147ac004b83SRoi Dayan dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1148a1b3839aSBodong Wang dest->vport.num = peer_dev->priv.eswitch->manager_vport;
1149ac004b83SRoi Dayan dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
115004de7ddaSRoi Dayan dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1151ac004b83SRoi Dayan }
1152ac004b83SRoi Dayan
esw_set_peer_miss_rule_source_port(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw,struct mlx5_flow_spec * spec,u16 vport)1153a5641cb5SJianbo Liu static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1154a5641cb5SJianbo Liu struct mlx5_eswitch *peer_esw,
1155a5641cb5SJianbo Liu struct mlx5_flow_spec *spec,
1156a5641cb5SJianbo Liu u16 vport)
1157a5641cb5SJianbo Liu {
1158a5641cb5SJianbo Liu void *misc;
1159a5641cb5SJianbo Liu
1160a5641cb5SJianbo Liu if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1161a5641cb5SJianbo Liu misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1162a5641cb5SJianbo Liu misc_parameters_2);
1163a5641cb5SJianbo Liu MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1164a5641cb5SJianbo Liu mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1165a5641cb5SJianbo Liu vport));
1166a5641cb5SJianbo Liu } else {
1167a5641cb5SJianbo Liu misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1168a5641cb5SJianbo Liu misc_parameters);
1169a5641cb5SJianbo Liu MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1170a5641cb5SJianbo Liu }
1171a5641cb5SJianbo Liu }
1172a5641cb5SJianbo Liu
esw_add_fdb_peer_miss_rules(struct mlx5_eswitch * esw,struct mlx5_core_dev * peer_dev)1173ac004b83SRoi Dayan static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1174ac004b83SRoi Dayan struct mlx5_core_dev *peer_dev)
1175ac004b83SRoi Dayan {
1176ac004b83SRoi Dayan struct mlx5_flow_destination dest = {};
1177ac004b83SRoi Dayan struct mlx5_flow_act flow_act = {0};
1178ac004b83SRoi Dayan struct mlx5_flow_handle **flows;
1179ac004b83SRoi Dayan /* total vports is the same for both e-switches */
1180ac004b83SRoi Dayan int nvports = esw->total_vports;
118147dd7e60SParav Pandit struct mlx5_flow_handle *flow;
118247dd7e60SParav Pandit struct mlx5_flow_spec *spec;
118347dd7e60SParav Pandit struct mlx5_vport *vport;
1184595d51b2SJianbo Liu int err, pfindex;
118547dd7e60SParav Pandit unsigned long i;
1186ac004b83SRoi Dayan void *misc;
1187ac004b83SRoi Dayan
11881552e9b5SRoi Dayan if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
11891552e9b5SRoi Dayan return 0;
11901552e9b5SRoi Dayan
1191ac004b83SRoi Dayan spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1192ac004b83SRoi Dayan if (!spec)
1193ac004b83SRoi Dayan return -ENOMEM;
1194ac004b83SRoi Dayan
1195a5641cb5SJianbo Liu peer_miss_rules_setup(esw, peer_dev, spec, &dest);
1196ac004b83SRoi Dayan
1197806bf340SGustavo A. R. Silva flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
1198ac004b83SRoi Dayan if (!flows) {
1199ac004b83SRoi Dayan err = -ENOMEM;
1200ac004b83SRoi Dayan goto alloc_flows_err;
1201ac004b83SRoi Dayan }
1202ac004b83SRoi Dayan
1203ac004b83SRoi Dayan flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1204ac004b83SRoi Dayan misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1205ac004b83SRoi Dayan misc_parameters);
1206ac004b83SRoi Dayan
120781cd229cSBodong Wang if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
120847dd7e60SParav Pandit vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1209a5641cb5SJianbo Liu esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1210a5641cb5SJianbo Liu spec, MLX5_VPORT_PF);
1211a5641cb5SJianbo Liu
1212dcf19b9cSMaor Dickman flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
121381cd229cSBodong Wang spec, &flow_act, &dest, 1);
121481cd229cSBodong Wang if (IS_ERR(flow)) {
121581cd229cSBodong Wang err = PTR_ERR(flow);
121681cd229cSBodong Wang goto add_pf_flow_err;
121781cd229cSBodong Wang }
121847dd7e60SParav Pandit flows[vport->index] = flow;
121981cd229cSBodong Wang }
122081cd229cSBodong Wang
122181cd229cSBodong Wang if (mlx5_ecpf_vport_exists(esw->dev)) {
122247dd7e60SParav Pandit vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
122381cd229cSBodong Wang MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1224dcf19b9cSMaor Dickman flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
122581cd229cSBodong Wang spec, &flow_act, &dest, 1);
122681cd229cSBodong Wang if (IS_ERR(flow)) {
122781cd229cSBodong Wang err = PTR_ERR(flow);
122881cd229cSBodong Wang goto add_ecpf_flow_err;
122981cd229cSBodong Wang }
123047dd7e60SParav Pandit flows[vport->index] = flow;
123181cd229cSBodong Wang }
123281cd229cSBodong Wang
123347dd7e60SParav Pandit mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1234a5641cb5SJianbo Liu esw_set_peer_miss_rule_source_port(esw,
1235a5641cb5SJianbo Liu peer_dev->priv.eswitch,
123647dd7e60SParav Pandit spec, vport->vport);
1237a5641cb5SJianbo Liu
1238dcf19b9cSMaor Dickman flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1239ac004b83SRoi Dayan spec, &flow_act, &dest, 1);
1240ac004b83SRoi Dayan if (IS_ERR(flow)) {
1241ac004b83SRoi Dayan err = PTR_ERR(flow);
124281cd229cSBodong Wang goto add_vf_flow_err;
1243ac004b83SRoi Dayan }
124447dd7e60SParav Pandit flows[vport->index] = flow;
1245ac004b83SRoi Dayan }
1246ac004b83SRoi Dayan
1247fa3c73eeSDaniel Jurgens if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1248fa3c73eeSDaniel Jurgens mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1249fa3c73eeSDaniel Jurgens if (i >= mlx5_core_max_ec_vfs(peer_dev))
1250fa3c73eeSDaniel Jurgens break;
1251fa3c73eeSDaniel Jurgens esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1252fa3c73eeSDaniel Jurgens spec, vport->vport);
1253fa3c73eeSDaniel Jurgens flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1254fa3c73eeSDaniel Jurgens spec, &flow_act, &dest, 1);
1255fa3c73eeSDaniel Jurgens if (IS_ERR(flow)) {
1256fa3c73eeSDaniel Jurgens err = PTR_ERR(flow);
1257fa3c73eeSDaniel Jurgens goto add_ec_vf_flow_err;
1258fa3c73eeSDaniel Jurgens }
1259fa3c73eeSDaniel Jurgens flows[vport->index] = flow;
1260fa3c73eeSDaniel Jurgens }
1261fa3c73eeSDaniel Jurgens }
1262595d51b2SJianbo Liu
1263595d51b2SJianbo Liu pfindex = mlx5_get_dev_index(peer_dev);
1264595d51b2SJianbo Liu if (pfindex >= MLX5_MAX_PORTS) {
1265595d51b2SJianbo Liu esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n",
1266595d51b2SJianbo Liu pfindex, MLX5_MAX_PORTS);
1267595d51b2SJianbo Liu err = -EINVAL;
1268595d51b2SJianbo Liu goto add_ec_vf_flow_err;
1269595d51b2SJianbo Liu }
1270595d51b2SJianbo Liu esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows;
1271ac004b83SRoi Dayan
1272ac004b83SRoi Dayan kvfree(spec);
1273ac004b83SRoi Dayan return 0;
1274ac004b83SRoi Dayan
1275fa3c73eeSDaniel Jurgens add_ec_vf_flow_err:
1276fa3c73eeSDaniel Jurgens mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1277fa3c73eeSDaniel Jurgens if (!flows[vport->index])
1278fa3c73eeSDaniel Jurgens continue;
1279fa3c73eeSDaniel Jurgens mlx5_del_flow_rules(flows[vport->index]);
1280fa3c73eeSDaniel Jurgens }
128181cd229cSBodong Wang add_vf_flow_err:
128247dd7e60SParav Pandit mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
128347dd7e60SParav Pandit if (!flows[vport->index])
128447dd7e60SParav Pandit continue;
128547dd7e60SParav Pandit mlx5_del_flow_rules(flows[vport->index]);
128647dd7e60SParav Pandit }
128747dd7e60SParav Pandit if (mlx5_ecpf_vport_exists(esw->dev)) {
128847dd7e60SParav Pandit vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
128947dd7e60SParav Pandit mlx5_del_flow_rules(flows[vport->index]);
129047dd7e60SParav Pandit }
129181cd229cSBodong Wang add_ecpf_flow_err:
129247dd7e60SParav Pandit if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
129347dd7e60SParav Pandit vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
129447dd7e60SParav Pandit mlx5_del_flow_rules(flows[vport->index]);
129547dd7e60SParav Pandit }
129681cd229cSBodong Wang add_pf_flow_err:
129781cd229cSBodong Wang esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1298ac004b83SRoi Dayan kvfree(flows);
1299ac004b83SRoi Dayan alloc_flows_err:
1300ac004b83SRoi Dayan kvfree(spec);
1301ac004b83SRoi Dayan return err;
1302ac004b83SRoi Dayan }
1303ac004b83SRoi Dayan
esw_del_fdb_peer_miss_rules(struct mlx5_eswitch * esw,struct mlx5_core_dev * peer_dev)13049bee385aSShay Drory static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
13059bee385aSShay Drory struct mlx5_core_dev *peer_dev)
1306ac004b83SRoi Dayan {
13071552e9b5SRoi Dayan u16 peer_index = mlx5_get_dev_index(peer_dev);
1308ac004b83SRoi Dayan struct mlx5_flow_handle **flows;
130947dd7e60SParav Pandit struct mlx5_vport *vport;
131047dd7e60SParav Pandit unsigned long i;
1311ac004b83SRoi Dayan
13121552e9b5SRoi Dayan flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
13131552e9b5SRoi Dayan if (!flows)
13141552e9b5SRoi Dayan return;
1315ac004b83SRoi Dayan
1316fa3c73eeSDaniel Jurgens if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1317fa3c73eeSDaniel Jurgens mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1318fa3c73eeSDaniel Jurgens /* The flow for a particular vport could be NULL if the other ECPF
1319fa3c73eeSDaniel Jurgens * has fewer or no VFs enabled
1320fa3c73eeSDaniel Jurgens */
1321fa3c73eeSDaniel Jurgens if (!flows[vport->index])
1322fa3c73eeSDaniel Jurgens continue;
1323fa3c73eeSDaniel Jurgens mlx5_del_flow_rules(flows[vport->index]);
1324fa3c73eeSDaniel Jurgens }
1325fa3c73eeSDaniel Jurgens }
1326fa3c73eeSDaniel Jurgens
132747dd7e60SParav Pandit mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
132847dd7e60SParav Pandit mlx5_del_flow_rules(flows[vport->index]);
1329ac004b83SRoi Dayan
133047dd7e60SParav Pandit if (mlx5_ecpf_vport_exists(esw->dev)) {
133147dd7e60SParav Pandit vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
133247dd7e60SParav Pandit mlx5_del_flow_rules(flows[vport->index]);
133347dd7e60SParav Pandit }
133481cd229cSBodong Wang
133547dd7e60SParav Pandit if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
133647dd7e60SParav Pandit vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
133747dd7e60SParav Pandit mlx5_del_flow_rules(flows[vport->index]);
133847dd7e60SParav Pandit }
13391552e9b5SRoi Dayan
1340ac004b83SRoi Dayan kvfree(flows);
13411552e9b5SRoi Dayan esw->fdb_table.offloads.peer_miss_rules[peer_index] = NULL;
1342ac004b83SRoi Dayan }
1343ac004b83SRoi Dayan
esw_add_fdb_miss_rule(struct mlx5_eswitch * esw)13443aa33572SOr Gerlitz static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
13453aa33572SOr Gerlitz {
134666958ed9SHadar Hen Zion struct mlx5_flow_act flow_act = {0};
13474c5009c5SRabie Loulou struct mlx5_flow_destination dest = {};
134874491de9SMark Bloch struct mlx5_flow_handle *flow_rule = NULL;
1349c5bb1730SMaor Gottlieb struct mlx5_flow_spec *spec;
1350f80be543SMark Bloch void *headers_c;
1351f80be543SMark Bloch void *headers_v;
13523aa33572SOr Gerlitz int err = 0;
1353f80be543SMark Bloch u8 *dmac_c;
1354f80be543SMark Bloch u8 *dmac_v;
13553aa33572SOr Gerlitz
13561b9a07eeSLeon Romanovsky spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1357c5bb1730SMaor Gottlieb if (!spec) {
13583aa33572SOr Gerlitz err = -ENOMEM;
13593aa33572SOr Gerlitz goto out;
13603aa33572SOr Gerlitz }
13613aa33572SOr Gerlitz
1362f80be543SMark Bloch spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1363f80be543SMark Bloch headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1364f80be543SMark Bloch outer_headers);
1365f80be543SMark Bloch dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1366f80be543SMark Bloch outer_headers.dmac_47_16);
1367f80be543SMark Bloch dmac_c[0] = 0x01;
1368f80be543SMark Bloch
13693aa33572SOr Gerlitz dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1370a1b3839aSBodong Wang dest.vport.num = esw->manager_vport;
137166958ed9SHadar Hen Zion flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
13723aa33572SOr Gerlitz
1373dcf19b9cSMaor Dickman flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
137439ac237cSPaul Blakey spec, &flow_act, &dest, 1);
13753aa33572SOr Gerlitz if (IS_ERR(flow_rule)) {
13763aa33572SOr Gerlitz err = PTR_ERR(flow_rule);
1377f80be543SMark Bloch esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
13783aa33572SOr Gerlitz goto out;
13793aa33572SOr Gerlitz }
13803aa33572SOr Gerlitz
1381f80be543SMark Bloch esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1382f80be543SMark Bloch
1383f80be543SMark Bloch headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1384f80be543SMark Bloch outer_headers);
1385f80be543SMark Bloch dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1386f80be543SMark Bloch outer_headers.dmac_47_16);
1387f80be543SMark Bloch dmac_v[0] = 0x01;
1388dcf19b9cSMaor Dickman flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
138939ac237cSPaul Blakey spec, &flow_act, &dest, 1);
1390f80be543SMark Bloch if (IS_ERR(flow_rule)) {
1391f80be543SMark Bloch err = PTR_ERR(flow_rule);
1392f80be543SMark Bloch esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1393f80be543SMark Bloch mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1394f80be543SMark Bloch goto out;
1395f80be543SMark Bloch }
1396f80be543SMark Bloch
1397f80be543SMark Bloch esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1398f80be543SMark Bloch
13993aa33572SOr Gerlitz out:
1400c5bb1730SMaor Gottlieb kvfree(spec);
14013aa33572SOr Gerlitz return err;
14023aa33572SOr Gerlitz }
14033aa33572SOr Gerlitz
140411b717d6SPaul Blakey struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch * esw,u32 tag)140511b717d6SPaul Blakey esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
140611b717d6SPaul Blakey {
140711b717d6SPaul Blakey struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
140811b717d6SPaul Blakey struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
140911b717d6SPaul Blakey struct mlx5_flow_context *flow_context;
141011b717d6SPaul Blakey struct mlx5_flow_handle *flow_rule;
141111b717d6SPaul Blakey struct mlx5_flow_destination dest;
141211b717d6SPaul Blakey struct mlx5_flow_spec *spec;
141311b717d6SPaul Blakey void *misc;
141411b717d6SPaul Blakey
141560acc105SPaul Blakey if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
141660acc105SPaul Blakey return ERR_PTR(-EOPNOTSUPP);
141760acc105SPaul Blakey
14189f4d9283SRoi Dayan spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
141911b717d6SPaul Blakey if (!spec)
142011b717d6SPaul Blakey return ERR_PTR(-ENOMEM);
142111b717d6SPaul Blakey
142211b717d6SPaul Blakey misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
142311b717d6SPaul Blakey misc_parameters_2);
142411b717d6SPaul Blakey MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1425a91d98a0SChris Mi ESW_REG_C0_USER_DATA_METADATA_MASK);
142611b717d6SPaul Blakey misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
142711b717d6SPaul Blakey misc_parameters_2);
142811b717d6SPaul Blakey MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
142911b717d6SPaul Blakey spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
14306724e66bSPaul Blakey flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
14316724e66bSPaul Blakey MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
14326724e66bSPaul Blakey flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
143311b717d6SPaul Blakey
143411b717d6SPaul Blakey flow_context = &spec->flow_context;
143511b717d6SPaul Blakey flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
143611b717d6SPaul Blakey flow_context->flow_tag = tag;
143711b717d6SPaul Blakey dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
143811b717d6SPaul Blakey dest.ft = esw->offloads.ft_offloads;
143911b717d6SPaul Blakey
144011b717d6SPaul Blakey flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
14419f4d9283SRoi Dayan kvfree(spec);
144211b717d6SPaul Blakey
144311b717d6SPaul Blakey if (IS_ERR(flow_rule))
144411b717d6SPaul Blakey esw_warn(esw->dev,
144511b717d6SPaul Blakey "Failed to create restore rule for tag: %d, err(%d)\n",
144611b717d6SPaul Blakey tag, (int)PTR_ERR(flow_rule));
144711b717d6SPaul Blakey
144811b717d6SPaul Blakey return flow_rule;
144911b717d6SPaul Blakey }
145011b717d6SPaul Blakey
14511967ce6eSOr Gerlitz #define MAX_PF_SQ 256
1452cd3d07e7SMark Bloch #define MAX_SQ_NVPORTS 32
14531967ce6eSOr Gerlitz
14547eb197fdSRoi Dayan void
mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch * esw,u32 * flow_group_in,int match_params)14557eb197fdSRoi Dayan mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
145629bcb6e4SRoi Dayan u32 *flow_group_in,
145729bcb6e4SRoi Dayan int match_params)
1458a5641cb5SJianbo Liu {
1459a5641cb5SJianbo Liu void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1460a5641cb5SJianbo Liu flow_group_in,
1461a5641cb5SJianbo Liu match_criteria);
1462a5641cb5SJianbo Liu
1463a5641cb5SJianbo Liu if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1464a5641cb5SJianbo Liu MLX5_SET(create_flow_group_in, flow_group_in,
1465a5641cb5SJianbo Liu match_criteria_enable,
146629bcb6e4SRoi Dayan MLX5_MATCH_MISC_PARAMETERS_2 | match_params);
1467a5641cb5SJianbo Liu
14680f0d3827SPaul Blakey MLX5_SET(fte_match_param, match_criteria,
14690f0d3827SPaul Blakey misc_parameters_2.metadata_reg_c_0,
14700f0d3827SPaul Blakey mlx5_eswitch_get_vport_metadata_mask());
1471a5641cb5SJianbo Liu } else {
1472a5641cb5SJianbo Liu MLX5_SET(create_flow_group_in, flow_group_in,
1473a5641cb5SJianbo Liu match_criteria_enable,
147429bcb6e4SRoi Dayan MLX5_MATCH_MISC_PARAMETERS | match_params);
1475a5641cb5SJianbo Liu
1476a5641cb5SJianbo Liu MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1477a5641cb5SJianbo Liu misc_parameters.source_port);
1478a5641cb5SJianbo Liu }
1479a5641cb5SJianbo Liu }
1480a5641cb5SJianbo Liu
1481ae430332SAriel Levkovich #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
esw_vport_tbl_put(struct mlx5_eswitch * esw)14820a9e2307SChris Mi static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
14834c7f4028SChris Mi {
14844c7f4028SChris Mi struct mlx5_vport_tbl_attr attr;
14854c7f4028SChris Mi struct mlx5_vport *vport;
148647dd7e60SParav Pandit unsigned long i;
14874c7f4028SChris Mi
14884c7f4028SChris Mi attr.chain = 0;
14894c7f4028SChris Mi attr.prio = 1;
149047dd7e60SParav Pandit mlx5_esw_for_each_vport(esw, i, vport) {
14914c7f4028SChris Mi attr.vport = vport->vport;
1492c796bb7cSChris Mi attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
14930a9e2307SChris Mi mlx5_esw_vporttbl_put(esw, &attr);
14944c7f4028SChris Mi }
14954c7f4028SChris Mi }
14964c7f4028SChris Mi
esw_vport_tbl_get(struct mlx5_eswitch * esw)14970a9e2307SChris Mi static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
14984c7f4028SChris Mi {
14994c7f4028SChris Mi struct mlx5_vport_tbl_attr attr;
15004c7f4028SChris Mi struct mlx5_flow_table *fdb;
15014c7f4028SChris Mi struct mlx5_vport *vport;
150247dd7e60SParav Pandit unsigned long i;
15034c7f4028SChris Mi
15044c7f4028SChris Mi attr.chain = 0;
15054c7f4028SChris Mi attr.prio = 1;
150647dd7e60SParav Pandit mlx5_esw_for_each_vport(esw, i, vport) {
15074c7f4028SChris Mi attr.vport = vport->vport;
1508c796bb7cSChris Mi attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
15090a9e2307SChris Mi fdb = mlx5_esw_vporttbl_get(esw, &attr);
15104c7f4028SChris Mi if (IS_ERR(fdb))
15114c7f4028SChris Mi goto out;
15124c7f4028SChris Mi }
15134c7f4028SChris Mi return 0;
15144c7f4028SChris Mi
15154c7f4028SChris Mi out:
15160a9e2307SChris Mi esw_vport_tbl_put(esw);
15174c7f4028SChris Mi return PTR_ERR(fdb);
15184c7f4028SChris Mi }
15194c7f4028SChris Mi
1520ae430332SAriel Levkovich #define fdb_modify_header_fwd_to_table_supported(esw) \
1521ae430332SAriel Levkovich (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
esw_init_chains_offload_flags(struct mlx5_eswitch * esw,u32 * flags)1522ae430332SAriel Levkovich static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1523ae430332SAriel Levkovich {
1524ae430332SAriel Levkovich struct mlx5_core_dev *dev = esw->dev;
1525ae430332SAriel Levkovich
1526ae430332SAriel Levkovich if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1527ae430332SAriel Levkovich *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1528ae430332SAriel Levkovich
1529ae430332SAriel Levkovich if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1530ae430332SAriel Levkovich esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1531ae430332SAriel Levkovich *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1532ae430332SAriel Levkovich esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1533ae430332SAriel Levkovich } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1534ae430332SAriel Levkovich *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1535ae430332SAriel Levkovich esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1536ae430332SAriel Levkovich } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1537ae430332SAriel Levkovich /* Disabled when ttl workaround is needed, e.g
1538ae430332SAriel Levkovich * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1539ae430332SAriel Levkovich */
1540ae430332SAriel Levkovich esw_warn(dev,
1541ae430332SAriel Levkovich "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1542ae430332SAriel Levkovich *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1543ae430332SAriel Levkovich } else {
1544ae430332SAriel Levkovich *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1545ae430332SAriel Levkovich esw_info(dev, "Supported tc chains and prios offload\n");
1546ae430332SAriel Levkovich }
1547ae430332SAriel Levkovich
1548ae430332SAriel Levkovich if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1549ae430332SAriel Levkovich *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1550ae430332SAriel Levkovich }
1551ae430332SAriel Levkovich
1552ae430332SAriel Levkovich static int
esw_chains_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * miss_fdb)1553ae430332SAriel Levkovich esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1554ae430332SAriel Levkovich {
1555ae430332SAriel Levkovich struct mlx5_core_dev *dev = esw->dev;
1556ae430332SAriel Levkovich struct mlx5_flow_table *nf_ft, *ft;
1557ae430332SAriel Levkovich struct mlx5_chains_attr attr = {};
1558ae430332SAriel Levkovich struct mlx5_fs_chains *chains;
1559ae430332SAriel Levkovich int err;
1560ae430332SAriel Levkovich
1561ae430332SAriel Levkovich esw_init_chains_offload_flags(esw, &attr.flags);
1562ae430332SAriel Levkovich attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1563ae430332SAriel Levkovich attr.max_grp_num = esw->params.large_group_num;
1564ae430332SAriel Levkovich attr.default_ft = miss_fdb;
1565c9355682SChris Mi attr.mapping = esw->offloads.reg_c0_obj_pool;
1566ae430332SAriel Levkovich
1567ae430332SAriel Levkovich chains = mlx5_chains_create(dev, &attr);
1568ae430332SAriel Levkovich if (IS_ERR(chains)) {
1569ae430332SAriel Levkovich err = PTR_ERR(chains);
1570ae430332SAriel Levkovich esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1571ae430332SAriel Levkovich return err;
1572ae430332SAriel Levkovich }
15738e80e564SPaul Blakey mlx5_chains_print_info(chains);
1574ae430332SAriel Levkovich
1575ae430332SAriel Levkovich esw->fdb_table.offloads.esw_chains_priv = chains;
1576ae430332SAriel Levkovich
1577ae430332SAriel Levkovich /* Create tc_end_ft which is the always created ft chain */
1578ae430332SAriel Levkovich nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1579ae430332SAriel Levkovich 1, 0);
1580ae430332SAriel Levkovich if (IS_ERR(nf_ft)) {
1581ae430332SAriel Levkovich err = PTR_ERR(nf_ft);
1582ae430332SAriel Levkovich goto nf_ft_err;
1583ae430332SAriel Levkovich }
1584ae430332SAriel Levkovich
1585ae430332SAriel Levkovich /* Always open the root for fast path */
1586ae430332SAriel Levkovich ft = mlx5_chains_get_table(chains, 0, 1, 0);
1587ae430332SAriel Levkovich if (IS_ERR(ft)) {
1588ae430332SAriel Levkovich err = PTR_ERR(ft);
1589ae430332SAriel Levkovich goto level_0_err;
1590ae430332SAriel Levkovich }
1591ae430332SAriel Levkovich
1592ae430332SAriel Levkovich /* Open level 1 for split fdb rules now if prios isn't supported */
1593ae430332SAriel Levkovich if (!mlx5_chains_prios_supported(chains)) {
15940a9e2307SChris Mi err = esw_vport_tbl_get(esw);
1595ae430332SAriel Levkovich if (err)
1596ae430332SAriel Levkovich goto level_1_err;
1597ae430332SAriel Levkovich }
1598ae430332SAriel Levkovich
1599ae430332SAriel Levkovich mlx5_chains_set_end_ft(chains, nf_ft);
1600ae430332SAriel Levkovich
1601ae430332SAriel Levkovich return 0;
1602ae430332SAriel Levkovich
1603ae430332SAriel Levkovich level_1_err:
1604ae430332SAriel Levkovich mlx5_chains_put_table(chains, 0, 1, 0);
1605ae430332SAriel Levkovich level_0_err:
1606ae430332SAriel Levkovich mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1607ae430332SAriel Levkovich nf_ft_err:
1608ae430332SAriel Levkovich mlx5_chains_destroy(chains);
1609ae430332SAriel Levkovich esw->fdb_table.offloads.esw_chains_priv = NULL;
1610ae430332SAriel Levkovich
1611ae430332SAriel Levkovich return err;
1612ae430332SAriel Levkovich }
1613ae430332SAriel Levkovich
1614ae430332SAriel Levkovich static void
esw_chains_destroy(struct mlx5_eswitch * esw,struct mlx5_fs_chains * chains)1615ae430332SAriel Levkovich esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1616ae430332SAriel Levkovich {
1617ae430332SAriel Levkovich if (!mlx5_chains_prios_supported(chains))
16180a9e2307SChris Mi esw_vport_tbl_put(esw);
1619ae430332SAriel Levkovich mlx5_chains_put_table(chains, 0, 1, 0);
1620ae430332SAriel Levkovich mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1621ae430332SAriel Levkovich mlx5_chains_destroy(chains);
1622ae430332SAriel Levkovich }
1623ae430332SAriel Levkovich
1624ae430332SAriel Levkovich #else /* CONFIG_MLX5_CLS_ACT */
1625ae430332SAriel Levkovich
1626ae430332SAriel Levkovich static int
esw_chains_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * miss_fdb)1627ae430332SAriel Levkovich esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1628ae430332SAriel Levkovich { return 0; }
1629ae430332SAriel Levkovich
1630ae430332SAriel Levkovich static void
esw_chains_destroy(struct mlx5_eswitch * esw,struct mlx5_fs_chains * chains)1631ae430332SAriel Levkovich esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1632ae430332SAriel Levkovich {}
1633ae430332SAriel Levkovich
1634ae430332SAriel Levkovich #endif
1635ae430332SAriel Levkovich
16364a561817SRoi Dayan static int
esw_create_send_to_vport_group(struct mlx5_eswitch * esw,struct mlx5_flow_table * fdb,u32 * flow_group_in,int * ix)16374a561817SRoi Dayan esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
16384a561817SRoi Dayan struct mlx5_flow_table *fdb,
16394a561817SRoi Dayan u32 *flow_group_in,
16404a561817SRoi Dayan int *ix)
16414a561817SRoi Dayan {
16424a561817SRoi Dayan int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
16434a561817SRoi Dayan struct mlx5_flow_group *g;
16444a561817SRoi Dayan void *match_criteria;
16454a561817SRoi Dayan int count, err = 0;
16464a561817SRoi Dayan
16474a561817SRoi Dayan memset(flow_group_in, 0, inlen);
16484a561817SRoi Dayan
16497eb197fdSRoi Dayan mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS);
16504a561817SRoi Dayan
16514a561817SRoi Dayan match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
16524a561817SRoi Dayan MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
165329bcb6e4SRoi Dayan
165429bcb6e4SRoi Dayan if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
165529bcb6e4SRoi Dayan MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
16564a561817SRoi Dayan MLX5_SET_TO_ONES(fte_match_param, match_criteria,
16574a561817SRoi Dayan misc_parameters.source_eswitch_owner_vhca_id);
16584a561817SRoi Dayan MLX5_SET(create_flow_group_in, flow_group_in,
16594a561817SRoi Dayan source_eswitch_owner_vhca_id_valid, 1);
16604a561817SRoi Dayan }
16614a561817SRoi Dayan
16624a561817SRoi Dayan /* See comment at table_size calculation */
16634a561817SRoi Dayan count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
16644a561817SRoi Dayan MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
16654a561817SRoi Dayan MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
16664a561817SRoi Dayan *ix += count;
16674a561817SRoi Dayan
16684a561817SRoi Dayan g = mlx5_create_flow_group(fdb, flow_group_in);
16694a561817SRoi Dayan if (IS_ERR(g)) {
16704a561817SRoi Dayan err = PTR_ERR(g);
16714a561817SRoi Dayan esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
16724a561817SRoi Dayan goto out;
16734a561817SRoi Dayan }
16744a561817SRoi Dayan esw->fdb_table.offloads.send_to_vport_grp = g;
16754a561817SRoi Dayan
16764a561817SRoi Dayan out:
16774a561817SRoi Dayan return err;
16784a561817SRoi Dayan }
16794a561817SRoi Dayan
16804a561817SRoi Dayan static int
esw_create_meta_send_to_vport_group(struct mlx5_eswitch * esw,struct mlx5_flow_table * fdb,u32 * flow_group_in,int * ix)16814a561817SRoi Dayan esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
16824a561817SRoi Dayan struct mlx5_flow_table *fdb,
16834a561817SRoi Dayan u32 *flow_group_in,
16844a561817SRoi Dayan int *ix)
16854a561817SRoi Dayan {
16864a561817SRoi Dayan int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
16874a561817SRoi Dayan struct mlx5_flow_group *g;
16884a561817SRoi Dayan void *match_criteria;
16894a561817SRoi Dayan int err = 0;
16904a561817SRoi Dayan
16914a561817SRoi Dayan if (!esw_src_port_rewrite_supported(esw))
16924a561817SRoi Dayan return 0;
16934a561817SRoi Dayan
16944a561817SRoi Dayan memset(flow_group_in, 0, inlen);
16954a561817SRoi Dayan
16964a561817SRoi Dayan MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
16974a561817SRoi Dayan MLX5_MATCH_MISC_PARAMETERS_2);
16984a561817SRoi Dayan
16994a561817SRoi Dayan match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
17004a561817SRoi Dayan
17014a561817SRoi Dayan MLX5_SET(fte_match_param, match_criteria,
17024a561817SRoi Dayan misc_parameters_2.metadata_reg_c_0,
17034a561817SRoi Dayan mlx5_eswitch_get_vport_metadata_mask());
17044a561817SRoi Dayan MLX5_SET(fte_match_param, match_criteria,
17054a561817SRoi Dayan misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
17064a561817SRoi Dayan
17074a561817SRoi Dayan MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
17084a561817SRoi Dayan MLX5_SET(create_flow_group_in, flow_group_in,
1709430e2d5eSRoi Dayan end_flow_index, *ix + esw->total_vports - 1);
1710430e2d5eSRoi Dayan *ix += esw->total_vports;
17114a561817SRoi Dayan
17124a561817SRoi Dayan g = mlx5_create_flow_group(fdb, flow_group_in);
17134a561817SRoi Dayan if (IS_ERR(g)) {
17144a561817SRoi Dayan err = PTR_ERR(g);
17154a561817SRoi Dayan esw_warn(esw->dev,
17164a561817SRoi Dayan "Failed to create send-to-vport meta flow group err(%d)\n", err);
17174a561817SRoi Dayan goto send_vport_meta_err;
17184a561817SRoi Dayan }
17194a561817SRoi Dayan esw->fdb_table.offloads.send_to_vport_meta_grp = g;
17204a561817SRoi Dayan
17214a561817SRoi Dayan return 0;
17224a561817SRoi Dayan
17234a561817SRoi Dayan send_vport_meta_err:
17244a561817SRoi Dayan return err;
17254a561817SRoi Dayan }
17264a561817SRoi Dayan
17274a561817SRoi Dayan static int
esw_create_peer_esw_miss_group(struct mlx5_eswitch * esw,struct mlx5_flow_table * fdb,u32 * flow_group_in,int * ix)17284a561817SRoi Dayan esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
17294a561817SRoi Dayan struct mlx5_flow_table *fdb,
17304a561817SRoi Dayan u32 *flow_group_in,
17314a561817SRoi Dayan int *ix)
17324a561817SRoi Dayan {
173318e31d42SShay Drory int max_peer_ports = (esw->total_vports - 1) * (MLX5_MAX_PORTS - 1);
17344a561817SRoi Dayan int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
17354a561817SRoi Dayan struct mlx5_flow_group *g;
17364a561817SRoi Dayan void *match_criteria;
17374a561817SRoi Dayan int err = 0;
17384a561817SRoi Dayan
17394a561817SRoi Dayan if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
17404a561817SRoi Dayan return 0;
17414a561817SRoi Dayan
17424a561817SRoi Dayan memset(flow_group_in, 0, inlen);
17434a561817SRoi Dayan
17447eb197fdSRoi Dayan mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
17454a561817SRoi Dayan
17464a561817SRoi Dayan if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
17474a561817SRoi Dayan match_criteria = MLX5_ADDR_OF(create_flow_group_in,
17484a561817SRoi Dayan flow_group_in,
17494a561817SRoi Dayan match_criteria);
17504a561817SRoi Dayan
17514a561817SRoi Dayan MLX5_SET_TO_ONES(fte_match_param, match_criteria,
17524a561817SRoi Dayan misc_parameters.source_eswitch_owner_vhca_id);
17534a561817SRoi Dayan
17544a561817SRoi Dayan MLX5_SET(create_flow_group_in, flow_group_in,
17554a561817SRoi Dayan source_eswitch_owner_vhca_id_valid, 1);
17564a561817SRoi Dayan }
17574a561817SRoi Dayan
17584a561817SRoi Dayan MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
17594a561817SRoi Dayan MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
176018e31d42SShay Drory *ix + max_peer_ports);
176118e31d42SShay Drory *ix += max_peer_ports + 1;
17624a561817SRoi Dayan
17634a561817SRoi Dayan g = mlx5_create_flow_group(fdb, flow_group_in);
17644a561817SRoi Dayan if (IS_ERR(g)) {
17654a561817SRoi Dayan err = PTR_ERR(g);
17664a561817SRoi Dayan esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
17674a561817SRoi Dayan goto out;
17684a561817SRoi Dayan }
17694a561817SRoi Dayan esw->fdb_table.offloads.peer_miss_grp = g;
17704a561817SRoi Dayan
17714a561817SRoi Dayan out:
17724a561817SRoi Dayan return err;
17734a561817SRoi Dayan }
17744a561817SRoi Dayan
17754a561817SRoi Dayan static int
esw_create_miss_group(struct mlx5_eswitch * esw,struct mlx5_flow_table * fdb,u32 * flow_group_in,int * ix)17764a561817SRoi Dayan esw_create_miss_group(struct mlx5_eswitch *esw,
17774a561817SRoi Dayan struct mlx5_flow_table *fdb,
17784a561817SRoi Dayan u32 *flow_group_in,
17794a561817SRoi Dayan int *ix)
17804a561817SRoi Dayan {
17814a561817SRoi Dayan int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
17824a561817SRoi Dayan struct mlx5_flow_group *g;
17834a561817SRoi Dayan void *match_criteria;
17844a561817SRoi Dayan int err = 0;
17854a561817SRoi Dayan u8 *dmac;
17864a561817SRoi Dayan
17874a561817SRoi Dayan memset(flow_group_in, 0, inlen);
17884a561817SRoi Dayan
17894a561817SRoi Dayan MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
17904a561817SRoi Dayan MLX5_MATCH_OUTER_HEADERS);
17914a561817SRoi Dayan match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
17924a561817SRoi Dayan match_criteria);
17934a561817SRoi Dayan dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
17944a561817SRoi Dayan outer_headers.dmac_47_16);
17954a561817SRoi Dayan dmac[0] = 0x01;
17964a561817SRoi Dayan
17974a561817SRoi Dayan MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
17984a561817SRoi Dayan MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
17994a561817SRoi Dayan *ix + MLX5_ESW_MISS_FLOWS);
18004a561817SRoi Dayan
18014a561817SRoi Dayan g = mlx5_create_flow_group(fdb, flow_group_in);
18024a561817SRoi Dayan if (IS_ERR(g)) {
18034a561817SRoi Dayan err = PTR_ERR(g);
18044a561817SRoi Dayan esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
18054a561817SRoi Dayan goto miss_err;
18064a561817SRoi Dayan }
18074a561817SRoi Dayan esw->fdb_table.offloads.miss_grp = g;
18084a561817SRoi Dayan
18094a561817SRoi Dayan err = esw_add_fdb_miss_rule(esw);
18104a561817SRoi Dayan if (err)
18114a561817SRoi Dayan goto miss_rule_err;
18124a561817SRoi Dayan
18134a561817SRoi Dayan return 0;
18144a561817SRoi Dayan
18154a561817SRoi Dayan miss_rule_err:
18164a561817SRoi Dayan mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
18174a561817SRoi Dayan miss_err:
18184a561817SRoi Dayan return err;
18194a561817SRoi Dayan }
18204a561817SRoi Dayan
esw_create_offloads_fdb_tables(struct mlx5_eswitch * esw)18210da3c12dSParav Pandit static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
18221967ce6eSOr Gerlitz {
18231967ce6eSOr Gerlitz int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
18241967ce6eSOr Gerlitz struct mlx5_flow_table_attr ft_attr = {};
18251967ce6eSOr Gerlitz struct mlx5_core_dev *dev = esw->dev;
18261967ce6eSOr Gerlitz struct mlx5_flow_namespace *root_ns;
18271967ce6eSOr Gerlitz struct mlx5_flow_table *fdb = NULL;
18284a561817SRoi Dayan int table_size, ix = 0, err = 0;
182939ac237cSPaul Blakey u32 flags = 0, *flow_group_in;
18301967ce6eSOr Gerlitz
18311967ce6eSOr Gerlitz esw_debug(esw->dev, "Create offloads FDB Tables\n");
183239ac237cSPaul Blakey
18331b9a07eeSLeon Romanovsky flow_group_in = kvzalloc(inlen, GFP_KERNEL);
18341967ce6eSOr Gerlitz if (!flow_group_in)
18351967ce6eSOr Gerlitz return -ENOMEM;
18361967ce6eSOr Gerlitz
18371967ce6eSOr Gerlitz root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
18381967ce6eSOr Gerlitz if (!root_ns) {
18391967ce6eSOr Gerlitz esw_warn(dev, "Failed to get FDB flow namespace\n");
18401967ce6eSOr Gerlitz err = -EOPNOTSUPP;
18411967ce6eSOr Gerlitz goto ns_err;
18421967ce6eSOr Gerlitz }
18438463daf1SMaor Gottlieb esw->fdb_table.offloads.ns = root_ns;
18448463daf1SMaor Gottlieb err = mlx5_flow_namespace_set_mode(root_ns,
18458463daf1SMaor Gottlieb esw->dev->priv.steering->mode);
18468463daf1SMaor Gottlieb if (err) {
18478463daf1SMaor Gottlieb esw_warn(dev, "Failed to set FDB namespace steering mode\n");
18488463daf1SMaor Gottlieb goto ns_err;
18498463daf1SMaor Gottlieb }
18501967ce6eSOr Gerlitz
1851898b0786SMark Bloch /* To be strictly correct:
1852898b0786SMark Bloch * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ)
1853898b0786SMark Bloch * should be:
1854898b0786SMark Bloch * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1855898b0786SMark Bloch * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ
1856898b0786SMark Bloch * but as the peer device might not be in switchdev mode it's not
1857898b0786SMark Bloch * possible. We use the fact that by default FW sets max vfs and max sfs
1858898b0786SMark Bloch * to the same value on both devices. If it needs to be changed in the future note
1859898b0786SMark Bloch * the peer miss group should also be created based on the number of
1860898b0786SMark Bloch * total vports of the peer (currently is also uses esw->total_vports).
1861898b0786SMark Bloch */
1862898b0786SMark Bloch table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
186318e31d42SShay Drory esw->total_vports * MLX5_MAX_PORTS + MLX5_ESW_MISS_FLOWS;
1864b3ba5149SErez Shitrit
1865e52c2802SPaul Blakey /* create the slow path fdb with encap set, so further table instances
1866e52c2802SPaul Blakey * can be created at run time while VFs are probed if the FW allows that.
1867e52c2802SPaul Blakey */
1868e52c2802SPaul Blakey if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1869e52c2802SPaul Blakey flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1870e52c2802SPaul Blakey MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1871e52c2802SPaul Blakey
1872e52c2802SPaul Blakey ft_attr.flags = flags;
1873b3ba5149SErez Shitrit ft_attr.max_fte = table_size;
1874b3ba5149SErez Shitrit ft_attr.prio = FDB_SLOW_PATH;
1875b3ba5149SErez Shitrit
1876b3ba5149SErez Shitrit fdb = mlx5_create_flow_table(root_ns, &ft_attr);
18771033665eSOr Gerlitz if (IS_ERR(fdb)) {
18781033665eSOr Gerlitz err = PTR_ERR(fdb);
18791033665eSOr Gerlitz esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
18801033665eSOr Gerlitz goto slow_fdb_err;
18811033665eSOr Gerlitz }
188252fff327SChris Mi esw->fdb_table.offloads.slow_fdb = fdb;
18831033665eSOr Gerlitz
1884ec3be887SVlad Buslov /* Create empty TC-miss managed table. This allows plugging in following
1885ec3be887SVlad Buslov * priorities without directly exposing their level 0 table to
1886ec3be887SVlad Buslov * eswitch_offloads and passing it as miss_fdb to following call to
1887ec3be887SVlad Buslov * esw_chains_create().
1888ec3be887SVlad Buslov */
1889ec3be887SVlad Buslov memset(&ft_attr, 0, sizeof(ft_attr));
1890ec3be887SVlad Buslov ft_attr.prio = FDB_TC_MISS;
1891ec3be887SVlad Buslov esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
1892ec3be887SVlad Buslov if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
1893ec3be887SVlad Buslov err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
1894ec3be887SVlad Buslov esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
1895ec3be887SVlad Buslov goto tc_miss_table_err;
1896ec3be887SVlad Buslov }
1897ec3be887SVlad Buslov
1898ec3be887SVlad Buslov err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
189939ac237cSPaul Blakey if (err) {
1900ae430332SAriel Levkovich esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
190139ac237cSPaul Blakey goto fdb_chains_err;
1902e52c2802SPaul Blakey }
1903e52c2802SPaul Blakey
19044a561817SRoi Dayan err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
19054a561817SRoi Dayan if (err)
190669697b6eSOr Gerlitz goto send_vport_err;
190769697b6eSOr Gerlitz
19084a561817SRoi Dayan err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
19094a561817SRoi Dayan if (err)
19108e404fefSVlad Buslov goto send_vport_meta_err;
19118e404fefSVlad Buslov
19124a561817SRoi Dayan err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
19138e404fefSVlad Buslov if (err)
1914ac004b83SRoi Dayan goto peer_miss_err;
1915ac004b83SRoi Dayan
19164a561817SRoi Dayan err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
19173aa33572SOr Gerlitz if (err)
19184a561817SRoi Dayan goto miss_err;
19193aa33572SOr Gerlitz
1920c88a026eSRaed Salem kvfree(flow_group_in);
192169697b6eSOr Gerlitz return 0;
192269697b6eSOr Gerlitz
192369697b6eSOr Gerlitz miss_err:
19246cec0229SMaor Dickman if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1925ac004b83SRoi Dayan mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1926ac004b83SRoi Dayan peer_miss_err:
19278e404fefSVlad Buslov if (esw->fdb_table.offloads.send_to_vport_meta_grp)
19288e404fefSVlad Buslov mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
19298e404fefSVlad Buslov send_vport_meta_err:
193069697b6eSOr Gerlitz mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
193169697b6eSOr Gerlitz send_vport_err:
1932ae430332SAriel Levkovich esw_chains_destroy(esw, esw_chains(esw));
193339ac237cSPaul Blakey fdb_chains_err:
1934ec3be887SVlad Buslov mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1935ec3be887SVlad Buslov tc_miss_table_err:
1936dcf19b9cSMaor Dickman mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
19371033665eSOr Gerlitz slow_fdb_err:
19388463daf1SMaor Gottlieb /* Holds true only as long as DMFS is the default */
19398463daf1SMaor Gottlieb mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
194069697b6eSOr Gerlitz ns_err:
194169697b6eSOr Gerlitz kvfree(flow_group_in);
194269697b6eSOr Gerlitz return err;
194369697b6eSOr Gerlitz }
194469697b6eSOr Gerlitz
esw_destroy_offloads_fdb_tables(struct mlx5_eswitch * esw)19451967ce6eSOr Gerlitz static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
194669697b6eSOr Gerlitz {
1947dcf19b9cSMaor Dickman if (!mlx5_eswitch_get_slow_fdb(esw))
194869697b6eSOr Gerlitz return;
194969697b6eSOr Gerlitz
19501967ce6eSOr Gerlitz esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1951f80be543SMark Bloch mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1952f80be543SMark Bloch mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
195369697b6eSOr Gerlitz mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
19548e404fefSVlad Buslov if (esw->fdb_table.offloads.send_to_vport_meta_grp)
19558e404fefSVlad Buslov mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
19566cec0229SMaor Dickman if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1957ac004b83SRoi Dayan mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
195869697b6eSOr Gerlitz mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
195969697b6eSOr Gerlitz
1960ae430332SAriel Levkovich esw_chains_destroy(esw, esw_chains(esw));
1961ae430332SAriel Levkovich
1962ec3be887SVlad Buslov mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1963dcf19b9cSMaor Dickman mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
19648463daf1SMaor Gottlieb /* Holds true only as long as DMFS is the default */
19658463daf1SMaor Gottlieb mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
19668463daf1SMaor Gottlieb MLX5_FLOW_STEERING_MODE_DMFS);
19677dc84de9SRoi Dayan atomic64_set(&esw->user_count, 0);
196869697b6eSOr Gerlitz }
1969c116c6eeSOr Gerlitz
esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch * esw)19708ea7bcf6SJianbo Liu static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
19714f4edcc2SAriel Levkovich {
19724f4edcc2SAriel Levkovich int nvports;
19734f4edcc2SAriel Levkovich
19744f4edcc2SAriel Levkovich nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
19754f4edcc2SAriel Levkovich if (mlx5e_tc_int_port_supported(esw))
19764f4edcc2SAriel Levkovich nvports += MLX5E_TC_MAX_INT_PORT_NUM;
19774f4edcc2SAriel Levkovich
19784f4edcc2SAriel Levkovich return nvports;
19794f4edcc2SAriel Levkovich }
19804f4edcc2SAriel Levkovich
esw_create_offloads_table(struct mlx5_eswitch * esw)19818d6bd3c3SParav Pandit static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1982c116c6eeSOr Gerlitz {
1983b3ba5149SErez Shitrit struct mlx5_flow_table_attr ft_attr = {};
1984c116c6eeSOr Gerlitz struct mlx5_core_dev *dev = esw->dev;
1985b3ba5149SErez Shitrit struct mlx5_flow_table *ft_offloads;
1986b3ba5149SErez Shitrit struct mlx5_flow_namespace *ns;
1987c116c6eeSOr Gerlitz int err = 0;
1988c116c6eeSOr Gerlitz
1989c116c6eeSOr Gerlitz ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1990c116c6eeSOr Gerlitz if (!ns) {
1991c116c6eeSOr Gerlitz esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1992eff596daSOr Gerlitz return -EOPNOTSUPP;
1993c116c6eeSOr Gerlitz }
1994c116c6eeSOr Gerlitz
19958ea7bcf6SJianbo Liu ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
19968ea7bcf6SJianbo Liu MLX5_ESW_FT_OFFLOADS_DROP_RULE;
199711b717d6SPaul Blakey ft_attr.prio = 1;
1998b3ba5149SErez Shitrit
1999b3ba5149SErez Shitrit ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
2000c116c6eeSOr Gerlitz if (IS_ERR(ft_offloads)) {
2001c116c6eeSOr Gerlitz err = PTR_ERR(ft_offloads);
2002c116c6eeSOr Gerlitz esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
2003c116c6eeSOr Gerlitz return err;
2004c116c6eeSOr Gerlitz }
2005c116c6eeSOr Gerlitz
2006c116c6eeSOr Gerlitz esw->offloads.ft_offloads = ft_offloads;
2007c116c6eeSOr Gerlitz return 0;
2008c116c6eeSOr Gerlitz }
2009c116c6eeSOr Gerlitz
esw_destroy_offloads_table(struct mlx5_eswitch * esw)2010c116c6eeSOr Gerlitz static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
2011c116c6eeSOr Gerlitz {
2012c116c6eeSOr Gerlitz struct mlx5_esw_offload *offloads = &esw->offloads;
2013c116c6eeSOr Gerlitz
2014c116c6eeSOr Gerlitz mlx5_destroy_flow_table(offloads->ft_offloads);
2015c116c6eeSOr Gerlitz }
2016fed9ce22SOr Gerlitz
esw_create_vport_rx_group(struct mlx5_eswitch * esw)20178d6bd3c3SParav Pandit static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
2018fed9ce22SOr Gerlitz {
2019fed9ce22SOr Gerlitz int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2020fed9ce22SOr Gerlitz struct mlx5_flow_group *g;
2021fed9ce22SOr Gerlitz u32 *flow_group_in;
20228d6bd3c3SParav Pandit int nvports;
2023fed9ce22SOr Gerlitz int err = 0;
2024fed9ce22SOr Gerlitz
20258ea7bcf6SJianbo Liu nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
20261b9a07eeSLeon Romanovsky flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2027fed9ce22SOr Gerlitz if (!flow_group_in)
2028fed9ce22SOr Gerlitz return -ENOMEM;
2029fed9ce22SOr Gerlitz
20307eb197fdSRoi Dayan mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
2031fed9ce22SOr Gerlitz
2032fed9ce22SOr Gerlitz MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2033fed9ce22SOr Gerlitz MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
2034fed9ce22SOr Gerlitz
2035fed9ce22SOr Gerlitz g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
2036fed9ce22SOr Gerlitz
2037fed9ce22SOr Gerlitz if (IS_ERR(g)) {
2038fed9ce22SOr Gerlitz err = PTR_ERR(g);
2039fed9ce22SOr Gerlitz mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
2040fed9ce22SOr Gerlitz goto out;
2041fed9ce22SOr Gerlitz }
2042fed9ce22SOr Gerlitz
2043fed9ce22SOr Gerlitz esw->offloads.vport_rx_group = g;
2044fed9ce22SOr Gerlitz out:
2045e574978aSChristophe JAILLET kvfree(flow_group_in);
2046fed9ce22SOr Gerlitz return err;
2047fed9ce22SOr Gerlitz }
2048fed9ce22SOr Gerlitz
esw_destroy_vport_rx_group(struct mlx5_eswitch * esw)2049fed9ce22SOr Gerlitz static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
2050fed9ce22SOr Gerlitz {
2051fed9ce22SOr Gerlitz mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
2052fed9ce22SOr Gerlitz }
2053fed9ce22SOr Gerlitz
esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch * esw)20548ea7bcf6SJianbo Liu static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
20558ea7bcf6SJianbo Liu {
20568ea7bcf6SJianbo Liu /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
20578ea7bcf6SJianbo Liu * for the drop rule, which is placed at the end of the table.
20588ea7bcf6SJianbo Liu * So return the total of vport and int_port as rule index.
20598ea7bcf6SJianbo Liu */
20608ea7bcf6SJianbo Liu return esw_get_nr_ft_offloads_steering_src_ports(esw);
20618ea7bcf6SJianbo Liu }
20628ea7bcf6SJianbo Liu
esw_create_vport_rx_drop_group(struct mlx5_eswitch * esw)20638ea7bcf6SJianbo Liu static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
20648ea7bcf6SJianbo Liu {
20658ea7bcf6SJianbo Liu int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
20668ea7bcf6SJianbo Liu struct mlx5_flow_group *g;
20678ea7bcf6SJianbo Liu u32 *flow_group_in;
20688ea7bcf6SJianbo Liu int flow_index;
20698ea7bcf6SJianbo Liu int err = 0;
20708ea7bcf6SJianbo Liu
20718ea7bcf6SJianbo Liu flow_index = esw_create_vport_rx_drop_rule_index(esw);
20728ea7bcf6SJianbo Liu
20738ea7bcf6SJianbo Liu flow_group_in = kvzalloc(inlen, GFP_KERNEL);
20748ea7bcf6SJianbo Liu if (!flow_group_in)
20758ea7bcf6SJianbo Liu return -ENOMEM;
20768ea7bcf6SJianbo Liu
20778ea7bcf6SJianbo Liu MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
20788ea7bcf6SJianbo Liu MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
20798ea7bcf6SJianbo Liu
20808ea7bcf6SJianbo Liu g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
20818ea7bcf6SJianbo Liu
20828ea7bcf6SJianbo Liu if (IS_ERR(g)) {
20838ea7bcf6SJianbo Liu err = PTR_ERR(g);
20848ea7bcf6SJianbo Liu mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
20858ea7bcf6SJianbo Liu goto out;
20868ea7bcf6SJianbo Liu }
20878ea7bcf6SJianbo Liu
20888ea7bcf6SJianbo Liu esw->offloads.vport_rx_drop_group = g;
20898ea7bcf6SJianbo Liu out:
20908ea7bcf6SJianbo Liu kvfree(flow_group_in);
20918ea7bcf6SJianbo Liu return err;
20928ea7bcf6SJianbo Liu }
20938ea7bcf6SJianbo Liu
esw_destroy_vport_rx_drop_group(struct mlx5_eswitch * esw)20948ea7bcf6SJianbo Liu static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
20958ea7bcf6SJianbo Liu {
20968ea7bcf6SJianbo Liu if (esw->offloads.vport_rx_drop_group)
20978ea7bcf6SJianbo Liu mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
20988ea7bcf6SJianbo Liu }
20998ea7bcf6SJianbo Liu
21007eb197fdSRoi Dayan void
mlx5_esw_set_spec_source_port(struct mlx5_eswitch * esw,u16 vport,struct mlx5_flow_spec * spec)21017eb197fdSRoi Dayan mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
21027eb197fdSRoi Dayan u16 vport,
21037eb197fdSRoi Dayan struct mlx5_flow_spec *spec)
2104fed9ce22SOr Gerlitz {
2105fed9ce22SOr Gerlitz void *misc;
2106fed9ce22SOr Gerlitz
2107a5641cb5SJianbo Liu if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2108a5641cb5SJianbo Liu misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
2109a5641cb5SJianbo Liu MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2110a5641cb5SJianbo Liu mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
2111a5641cb5SJianbo Liu
2112a5641cb5SJianbo Liu misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
21130f0d3827SPaul Blakey MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
21140f0d3827SPaul Blakey mlx5_eswitch_get_vport_metadata_mask());
2115a5641cb5SJianbo Liu
2116a5641cb5SJianbo Liu spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
2117a5641cb5SJianbo Liu } else {
2118c5bb1730SMaor Gottlieb misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
2119fed9ce22SOr Gerlitz MLX5_SET(fte_match_set_misc, misc, source_port, vport);
2120fed9ce22SOr Gerlitz
2121c5bb1730SMaor Gottlieb misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2122fed9ce22SOr Gerlitz MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2123fed9ce22SOr Gerlitz
2124c5bb1730SMaor Gottlieb spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2125a5641cb5SJianbo Liu }
21267eb197fdSRoi Dayan }
21277eb197fdSRoi Dayan
21287eb197fdSRoi Dayan struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch * esw,u16 vport,struct mlx5_flow_destination * dest)21297eb197fdSRoi Dayan mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
21307eb197fdSRoi Dayan struct mlx5_flow_destination *dest)
21317eb197fdSRoi Dayan {
21327eb197fdSRoi Dayan struct mlx5_flow_act flow_act = {0};
21337eb197fdSRoi Dayan struct mlx5_flow_handle *flow_rule;
21347eb197fdSRoi Dayan struct mlx5_flow_spec *spec;
21357eb197fdSRoi Dayan
21367eb197fdSRoi Dayan spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
21377eb197fdSRoi Dayan if (!spec) {
21387eb197fdSRoi Dayan flow_rule = ERR_PTR(-ENOMEM);
21397eb197fdSRoi Dayan goto out;
21407eb197fdSRoi Dayan }
21417eb197fdSRoi Dayan
21427eb197fdSRoi Dayan mlx5_esw_set_spec_source_port(esw, vport, spec);
2143fed9ce22SOr Gerlitz
214466958ed9SHadar Hen Zion flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
214574491de9SMark Bloch flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
2146c966f7d5SGavi Teitz &flow_act, dest, 1);
2147fed9ce22SOr Gerlitz if (IS_ERR(flow_rule)) {
2148fed9ce22SOr Gerlitz esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
2149fed9ce22SOr Gerlitz goto out;
2150fed9ce22SOr Gerlitz }
2151fed9ce22SOr Gerlitz
2152fed9ce22SOr Gerlitz out:
2153c5bb1730SMaor Gottlieb kvfree(spec);
2154fed9ce22SOr Gerlitz return flow_rule;
2155fed9ce22SOr Gerlitz }
2156feae9087SOr Gerlitz
esw_create_vport_rx_drop_rule(struct mlx5_eswitch * esw)21578ea7bcf6SJianbo Liu static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
21588ea7bcf6SJianbo Liu {
21598ea7bcf6SJianbo Liu struct mlx5_flow_act flow_act = {};
21608ea7bcf6SJianbo Liu struct mlx5_flow_handle *flow_rule;
21618ea7bcf6SJianbo Liu
21628ea7bcf6SJianbo Liu flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
21638ea7bcf6SJianbo Liu flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
21648ea7bcf6SJianbo Liu &flow_act, NULL, 0);
21658ea7bcf6SJianbo Liu if (IS_ERR(flow_rule)) {
21668ea7bcf6SJianbo Liu esw_warn(esw->dev,
21678ea7bcf6SJianbo Liu "fs offloads: Failed to add vport rx drop rule err %ld\n",
21688ea7bcf6SJianbo Liu PTR_ERR(flow_rule));
21698ea7bcf6SJianbo Liu return PTR_ERR(flow_rule);
21708ea7bcf6SJianbo Liu }
21718ea7bcf6SJianbo Liu
21728ea7bcf6SJianbo Liu esw->offloads.vport_rx_drop_rule = flow_rule;
21738ea7bcf6SJianbo Liu
21748ea7bcf6SJianbo Liu return 0;
21758ea7bcf6SJianbo Liu }
21768ea7bcf6SJianbo Liu
esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch * esw)21778ea7bcf6SJianbo Liu static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
21788ea7bcf6SJianbo Liu {
21798ea7bcf6SJianbo Liu if (esw->offloads.vport_rx_drop_rule)
21808ea7bcf6SJianbo Liu mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
21818ea7bcf6SJianbo Liu }
21828ea7bcf6SJianbo Liu
mlx5_eswitch_inline_mode_get(struct mlx5_eswitch * esw,u8 * mode)218347dd7e60SParav Pandit static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
2184cc617cedSParav Pandit {
2185cc617cedSParav Pandit u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2186cc617cedSParav Pandit struct mlx5_core_dev *dev = esw->dev;
218747dd7e60SParav Pandit struct mlx5_vport *vport;
218847dd7e60SParav Pandit unsigned long i;
2189cc617cedSParav Pandit
2190cc617cedSParav Pandit if (!MLX5_CAP_GEN(dev, vport_group_manager))
2191cc617cedSParav Pandit return -EOPNOTSUPP;
2192cc617cedSParav Pandit
2193f019679eSChris Mi if (!mlx5_esw_is_fdb_created(esw))
2194cc617cedSParav Pandit return -EOPNOTSUPP;
2195cc617cedSParav Pandit
2196cc617cedSParav Pandit switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2197cc617cedSParav Pandit case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2198cc617cedSParav Pandit mlx5_mode = MLX5_INLINE_MODE_NONE;
2199cc617cedSParav Pandit goto out;
2200cc617cedSParav Pandit case MLX5_CAP_INLINE_MODE_L2:
2201cc617cedSParav Pandit mlx5_mode = MLX5_INLINE_MODE_L2;
2202cc617cedSParav Pandit goto out;
2203cc617cedSParav Pandit case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2204cc617cedSParav Pandit goto query_vports;
2205cc617cedSParav Pandit }
2206cc617cedSParav Pandit
2207cc617cedSParav Pandit query_vports:
2208cc617cedSParav Pandit mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
220947dd7e60SParav Pandit mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
221047dd7e60SParav Pandit mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
2211cc617cedSParav Pandit if (prev_mlx5_mode != mlx5_mode)
2212cc617cedSParav Pandit return -EINVAL;
2213cc617cedSParav Pandit prev_mlx5_mode = mlx5_mode;
2214cc617cedSParav Pandit }
2215cc617cedSParav Pandit
2216cc617cedSParav Pandit out:
2217cc617cedSParav Pandit *mode = mlx5_mode;
2218cc617cedSParav Pandit return 0;
2219cc617cedSParav Pandit }
2220cc617cedSParav Pandit
esw_destroy_restore_table(struct mlx5_eswitch * esw)222111b717d6SPaul Blakey static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
222211b717d6SPaul Blakey {
222311b717d6SPaul Blakey struct mlx5_esw_offload *offloads = &esw->offloads;
222411b717d6SPaul Blakey
222560acc105SPaul Blakey if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
222660acc105SPaul Blakey return;
222760acc105SPaul Blakey
22286724e66bSPaul Blakey mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
222911b717d6SPaul Blakey mlx5_destroy_flow_group(offloads->restore_group);
223011b717d6SPaul Blakey mlx5_destroy_flow_table(offloads->ft_offloads_restore);
223111b717d6SPaul Blakey }
223211b717d6SPaul Blakey
esw_create_restore_table(struct mlx5_eswitch * esw)223311b717d6SPaul Blakey static int esw_create_restore_table(struct mlx5_eswitch *esw)
223411b717d6SPaul Blakey {
2235d65dbedfSHuy Nguyen u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
223611b717d6SPaul Blakey int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
223711b717d6SPaul Blakey struct mlx5_flow_table_attr ft_attr = {};
223811b717d6SPaul Blakey struct mlx5_core_dev *dev = esw->dev;
223911b717d6SPaul Blakey struct mlx5_flow_namespace *ns;
22406724e66bSPaul Blakey struct mlx5_modify_hdr *mod_hdr;
224111b717d6SPaul Blakey void *match_criteria, *misc;
224211b717d6SPaul Blakey struct mlx5_flow_table *ft;
224311b717d6SPaul Blakey struct mlx5_flow_group *g;
224411b717d6SPaul Blakey u32 *flow_group_in;
224511b717d6SPaul Blakey int err = 0;
224611b717d6SPaul Blakey
224760acc105SPaul Blakey if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
224860acc105SPaul Blakey return 0;
224960acc105SPaul Blakey
225011b717d6SPaul Blakey ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
225111b717d6SPaul Blakey if (!ns) {
225211b717d6SPaul Blakey esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
225311b717d6SPaul Blakey return -EOPNOTSUPP;
225411b717d6SPaul Blakey }
225511b717d6SPaul Blakey
225611b717d6SPaul Blakey flow_group_in = kvzalloc(inlen, GFP_KERNEL);
225711b717d6SPaul Blakey if (!flow_group_in) {
225811b717d6SPaul Blakey err = -ENOMEM;
225911b717d6SPaul Blakey goto out_free;
226011b717d6SPaul Blakey }
226111b717d6SPaul Blakey
2262a91d98a0SChris Mi ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
226311b717d6SPaul Blakey ft = mlx5_create_flow_table(ns, &ft_attr);
226411b717d6SPaul Blakey if (IS_ERR(ft)) {
226511b717d6SPaul Blakey err = PTR_ERR(ft);
226611b717d6SPaul Blakey esw_warn(esw->dev, "Failed to create restore table, err %d\n",
226711b717d6SPaul Blakey err);
226811b717d6SPaul Blakey goto out_free;
226911b717d6SPaul Blakey }
227011b717d6SPaul Blakey
227111b717d6SPaul Blakey match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
227211b717d6SPaul Blakey match_criteria);
227311b717d6SPaul Blakey misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
227411b717d6SPaul Blakey misc_parameters_2);
227511b717d6SPaul Blakey
227611b717d6SPaul Blakey MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2277a91d98a0SChris Mi ESW_REG_C0_USER_DATA_METADATA_MASK);
227811b717d6SPaul Blakey MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
227911b717d6SPaul Blakey MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
228011b717d6SPaul Blakey ft_attr.max_fte - 1);
228111b717d6SPaul Blakey MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
228211b717d6SPaul Blakey MLX5_MATCH_MISC_PARAMETERS_2);
228311b717d6SPaul Blakey g = mlx5_create_flow_group(ft, flow_group_in);
228411b717d6SPaul Blakey if (IS_ERR(g)) {
228511b717d6SPaul Blakey err = PTR_ERR(g);
228611b717d6SPaul Blakey esw_warn(dev, "Failed to create restore flow group, err: %d\n",
228711b717d6SPaul Blakey err);
228811b717d6SPaul Blakey goto err_group;
228911b717d6SPaul Blakey }
229011b717d6SPaul Blakey
22916724e66bSPaul Blakey MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
22926724e66bSPaul Blakey MLX5_SET(copy_action_in, modact, src_field,
22936724e66bSPaul Blakey MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
22946724e66bSPaul Blakey MLX5_SET(copy_action_in, modact, dst_field,
22956724e66bSPaul Blakey MLX5_ACTION_IN_FIELD_METADATA_REG_B);
22966724e66bSPaul Blakey mod_hdr = mlx5_modify_header_alloc(esw->dev,
22976724e66bSPaul Blakey MLX5_FLOW_NAMESPACE_KERNEL, 1,
22986724e66bSPaul Blakey modact);
22996724e66bSPaul Blakey if (IS_ERR(mod_hdr)) {
2300e9864539SParav Pandit err = PTR_ERR(mod_hdr);
23016724e66bSPaul Blakey esw_warn(dev, "Failed to create restore mod header, err: %d\n",
23026724e66bSPaul Blakey err);
23036724e66bSPaul Blakey goto err_mod_hdr;
23046724e66bSPaul Blakey }
23056724e66bSPaul Blakey
230611b717d6SPaul Blakey esw->offloads.ft_offloads_restore = ft;
230711b717d6SPaul Blakey esw->offloads.restore_group = g;
23086724e66bSPaul Blakey esw->offloads.restore_copy_hdr_id = mod_hdr;
230911b717d6SPaul Blakey
2310c8508713SRoi Dayan kvfree(flow_group_in);
2311c8508713SRoi Dayan
231211b717d6SPaul Blakey return 0;
231311b717d6SPaul Blakey
23146724e66bSPaul Blakey err_mod_hdr:
23156724e66bSPaul Blakey mlx5_destroy_flow_group(g);
231611b717d6SPaul Blakey err_group:
231711b717d6SPaul Blakey mlx5_destroy_flow_table(ft);
231811b717d6SPaul Blakey out_free:
231911b717d6SPaul Blakey kvfree(flow_group_in);
232011b717d6SPaul Blakey
232111b717d6SPaul Blakey return err;
232211b717d6SPaul Blakey }
232311b717d6SPaul Blakey
esw_offloads_start(struct mlx5_eswitch * esw,struct netlink_ext_ack * extack)2324db7ff19eSEli Britstein static int esw_offloads_start(struct mlx5_eswitch *esw,
2325db7ff19eSEli Britstein struct netlink_ext_ack *extack)
2326c930a3adSOr Gerlitz {
2327e12de39cSChris Mi int err;
2328c930a3adSOr Gerlitz
2329b6f2846aSChris Mi esw->mode = MLX5_ESWITCH_OFFLOADS;
2330b6f2846aSChris Mi err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
23316c419ba8SOr Gerlitz if (err) {
23328c98ee77SEli Britstein NL_SET_ERR_MSG_MOD(extack,
23338c98ee77SEli Britstein "Failed setting eswitch to offloads");
2334b6f2846aSChris Mi esw->mode = MLX5_ESWITCH_LEGACY;
2335b6f2846aSChris Mi mlx5_rescan_drivers(esw->dev);
233697bd788eSJiri Pirko return err;
23376c419ba8SOr Gerlitz }
2338bffaa916SRoi Dayan if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2339bffaa916SRoi Dayan if (mlx5_eswitch_inline_mode_get(esw,
2340bffaa916SRoi Dayan &esw->offloads.inline_mode)) {
2341bffaa916SRoi Dayan esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
23428c98ee77SEli Britstein NL_SET_ERR_MSG_MOD(extack,
23438c98ee77SEli Britstein "Inline mode is different between vports");
2344bffaa916SRoi Dayan }
2345bffaa916SRoi Dayan }
234697bd788eSJiri Pirko return 0;
2347c930a3adSOr Gerlitz }
2348c930a3adSOr Gerlitz
mlx5_esw_offloads_rep_init(struct mlx5_eswitch * esw,const struct mlx5_vport * vport)234947dd7e60SParav Pandit static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
235047dd7e60SParav Pandit {
235147dd7e60SParav Pandit struct mlx5_eswitch_rep *rep;
235247dd7e60SParav Pandit int rep_type;
235347dd7e60SParav Pandit int err;
235447dd7e60SParav Pandit
235547dd7e60SParav Pandit rep = kzalloc(sizeof(*rep), GFP_KERNEL);
235647dd7e60SParav Pandit if (!rep)
235747dd7e60SParav Pandit return -ENOMEM;
235847dd7e60SParav Pandit
235947dd7e60SParav Pandit rep->vport = vport->vport;
236047dd7e60SParav Pandit rep->vport_index = vport->index;
236147dd7e60SParav Pandit for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
236247dd7e60SParav Pandit atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
236347dd7e60SParav Pandit
236447dd7e60SParav Pandit err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
236547dd7e60SParav Pandit if (err)
236647dd7e60SParav Pandit goto insert_err;
236747dd7e60SParav Pandit
236847dd7e60SParav Pandit return 0;
236947dd7e60SParav Pandit
237047dd7e60SParav Pandit insert_err:
237147dd7e60SParav Pandit kfree(rep);
237247dd7e60SParav Pandit return err;
237347dd7e60SParav Pandit }
237447dd7e60SParav Pandit
mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep)237547dd7e60SParav Pandit static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
237647dd7e60SParav Pandit struct mlx5_eswitch_rep *rep)
237747dd7e60SParav Pandit {
237847dd7e60SParav Pandit xa_erase(&esw->offloads.vport_reps, rep->vport);
237947dd7e60SParav Pandit kfree(rep);
238047dd7e60SParav Pandit }
238147dd7e60SParav Pandit
esw_offloads_cleanup_reps(struct mlx5_eswitch * esw)2382d2a651efSJiri Pirko static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2383e8d31c4dSMark Bloch {
238447dd7e60SParav Pandit struct mlx5_eswitch_rep *rep;
238547dd7e60SParav Pandit unsigned long i;
238647dd7e60SParav Pandit
238747dd7e60SParav Pandit mlx5_esw_for_each_rep(esw, i, rep)
238847dd7e60SParav Pandit mlx5_esw_offloads_rep_cleanup(esw, rep);
238947dd7e60SParav Pandit xa_destroy(&esw->offloads.vport_reps);
2390e8d31c4dSMark Bloch }
2391e8d31c4dSMark Bloch
esw_offloads_init_reps(struct mlx5_eswitch * esw)2392d2a651efSJiri Pirko static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2393e8d31c4dSMark Bloch {
239447dd7e60SParav Pandit struct mlx5_vport *vport;
239547dd7e60SParav Pandit unsigned long i;
239647dd7e60SParav Pandit int err;
2397e8d31c4dSMark Bloch
239847dd7e60SParav Pandit xa_init(&esw->offloads.vport_reps);
2399e8d31c4dSMark Bloch
240047dd7e60SParav Pandit mlx5_esw_for_each_vport(esw, i, vport) {
240147dd7e60SParav Pandit err = mlx5_esw_offloads_rep_init(esw, vport);
240247dd7e60SParav Pandit if (err)
240347dd7e60SParav Pandit goto err;
2404e8d31c4dSMark Bloch }
2405e8d31c4dSMark Bloch return 0;
240647dd7e60SParav Pandit
240747dd7e60SParav Pandit err:
240847dd7e60SParav Pandit esw_offloads_cleanup_reps(esw);
240947dd7e60SParav Pandit return err;
2410e8d31c4dSMark Bloch }
2411e8d31c4dSMark Bloch
esw_port_metadata_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)2412d2a651efSJiri Pirko static int esw_port_metadata_set(struct devlink *devlink, u32 id,
2413d2a651efSJiri Pirko struct devlink_param_gset_ctx *ctx)
2414d2a651efSJiri Pirko {
2415d2a651efSJiri Pirko struct mlx5_core_dev *dev = devlink_priv(devlink);
2416d2a651efSJiri Pirko struct mlx5_eswitch *esw = dev->priv.eswitch;
2417d2a651efSJiri Pirko int err = 0;
2418d2a651efSJiri Pirko
2419d2a651efSJiri Pirko down_write(&esw->mode_lock);
2420d2a651efSJiri Pirko if (mlx5_esw_is_fdb_created(esw)) {
2421d2a651efSJiri Pirko err = -EBUSY;
2422d2a651efSJiri Pirko goto done;
2423d2a651efSJiri Pirko }
2424d2a651efSJiri Pirko if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2425d2a651efSJiri Pirko err = -EOPNOTSUPP;
2426d2a651efSJiri Pirko goto done;
2427d2a651efSJiri Pirko }
2428d2a651efSJiri Pirko if (ctx->val.vbool)
2429d2a651efSJiri Pirko esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2430d2a651efSJiri Pirko else
2431d2a651efSJiri Pirko esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2432d2a651efSJiri Pirko done:
2433d2a651efSJiri Pirko up_write(&esw->mode_lock);
2434d2a651efSJiri Pirko return err;
2435d2a651efSJiri Pirko }
2436d2a651efSJiri Pirko
esw_port_metadata_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)2437d2a651efSJiri Pirko static int esw_port_metadata_get(struct devlink *devlink, u32 id,
2438d2a651efSJiri Pirko struct devlink_param_gset_ctx *ctx)
2439d2a651efSJiri Pirko {
2440d2a651efSJiri Pirko struct mlx5_core_dev *dev = devlink_priv(devlink);
2441d2a651efSJiri Pirko
2442d2a651efSJiri Pirko ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
2443d2a651efSJiri Pirko return 0;
2444d2a651efSJiri Pirko }
2445d2a651efSJiri Pirko
esw_port_metadata_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)2446d2a651efSJiri Pirko static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
2447d2a651efSJiri Pirko union devlink_param_value val,
2448d2a651efSJiri Pirko struct netlink_ext_ack *extack)
2449d2a651efSJiri Pirko {
2450d2a651efSJiri Pirko struct mlx5_core_dev *dev = devlink_priv(devlink);
2451d2a651efSJiri Pirko u8 esw_mode;
2452d2a651efSJiri Pirko
2453d2a651efSJiri Pirko esw_mode = mlx5_eswitch_mode(dev);
2454d2a651efSJiri Pirko if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
2455d2a651efSJiri Pirko NL_SET_ERR_MSG_MOD(extack,
2456d2a651efSJiri Pirko "E-Switch must either disabled or non switchdev mode");
2457d2a651efSJiri Pirko return -EBUSY;
2458d2a651efSJiri Pirko }
2459d2a651efSJiri Pirko return 0;
2460d2a651efSJiri Pirko }
2461d2a651efSJiri Pirko
2462d2a651efSJiri Pirko static const struct devlink_param esw_devlink_params[] = {
2463d2a651efSJiri Pirko DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
2464d2a651efSJiri Pirko "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
2465d2a651efSJiri Pirko BIT(DEVLINK_PARAM_CMODE_RUNTIME),
2466d2a651efSJiri Pirko esw_port_metadata_get,
2467d2a651efSJiri Pirko esw_port_metadata_set,
2468d2a651efSJiri Pirko esw_port_metadata_validate),
2469d2a651efSJiri Pirko };
2470d2a651efSJiri Pirko
esw_offloads_init(struct mlx5_eswitch * esw)2471d2a651efSJiri Pirko int esw_offloads_init(struct mlx5_eswitch *esw)
2472d2a651efSJiri Pirko {
2473d2a651efSJiri Pirko int err;
2474d2a651efSJiri Pirko
2475d2a651efSJiri Pirko err = esw_offloads_init_reps(esw);
2476d2a651efSJiri Pirko if (err)
2477d2a651efSJiri Pirko return err;
2478d2a651efSJiri Pirko
2479388a7302SShay Drory if (MLX5_ESWITCH_MANAGER(esw->dev) &&
2480388a7302SShay Drory mlx5_esw_vport_match_metadata_supported(esw))
2481388a7302SShay Drory esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2482388a7302SShay Drory
2483d2a651efSJiri Pirko err = devl_params_register(priv_to_devlink(esw->dev),
2484d2a651efSJiri Pirko esw_devlink_params,
2485d2a651efSJiri Pirko ARRAY_SIZE(esw_devlink_params));
2486d2a651efSJiri Pirko if (err)
2487d2a651efSJiri Pirko goto err_params;
2488d2a651efSJiri Pirko
2489d2a651efSJiri Pirko return 0;
2490d2a651efSJiri Pirko
2491d2a651efSJiri Pirko err_params:
2492d2a651efSJiri Pirko esw_offloads_cleanup_reps(esw);
2493d2a651efSJiri Pirko return err;
2494d2a651efSJiri Pirko }
2495d2a651efSJiri Pirko
esw_offloads_cleanup(struct mlx5_eswitch * esw)2496d2a651efSJiri Pirko void esw_offloads_cleanup(struct mlx5_eswitch *esw)
2497d2a651efSJiri Pirko {
2498d2a651efSJiri Pirko devl_params_unregister(priv_to_devlink(esw->dev),
2499d2a651efSJiri Pirko esw_devlink_params,
2500d2a651efSJiri Pirko ARRAY_SIZE(esw_devlink_params));
2501d2a651efSJiri Pirko esw_offloads_cleanup_reps(esw);
2502d2a651efSJiri Pirko }
2503d2a651efSJiri Pirko
__esw_offloads_unload_rep(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,u8 rep_type)2504c9b99abcSBodong Wang static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2505c9b99abcSBodong Wang struct mlx5_eswitch_rep *rep, u8 rep_type)
2506c9b99abcSBodong Wang {
25078693115aSParav Pandit if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
25086f4e0219SBodong Wang REP_LOADED, REP_REGISTERED) == REP_LOADED)
25098693115aSParav Pandit esw->offloads.rep_ops[rep_type]->unload(rep);
2510c9b99abcSBodong Wang }
2511c9b99abcSBodong Wang
__unload_reps_all_vport(struct mlx5_eswitch * esw,u8 rep_type)25124110fc59SBodong Wang static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
2513c930a3adSOr Gerlitz {
2514cb67b832SHadar Hen Zion struct mlx5_eswitch_rep *rep;
251547dd7e60SParav Pandit unsigned long i;
25164110fc59SBodong Wang
251718a92b05SDaniel Jurgens mlx5_esw_for_each_rep(esw, i, rep)
2518c9b99abcSBodong Wang __esw_offloads_unload_rep(esw, rep, rep_type);
25196ed1803aSMark Bloch }
25206ed1803aSMark Bloch
mlx5_esw_offloads_rep_load(struct mlx5_eswitch * esw,u16 vport_num)2521b7186387SJiri Pirko static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
252229d9fd7dSBodong Wang {
2523c2d7712cSBodong Wang struct mlx5_eswitch_rep *rep;
2524c2d7712cSBodong Wang int rep_type;
252529d9fd7dSBodong Wang int err;
252629d9fd7dSBodong Wang
2527c2d7712cSBodong Wang rep = mlx5_eswitch_get_rep(esw, vport_num);
2528c2d7712cSBodong Wang for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2529c2d7712cSBodong Wang if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2530c2d7712cSBodong Wang REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2531c2d7712cSBodong Wang err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
253229d9fd7dSBodong Wang if (err)
253329d9fd7dSBodong Wang goto err_reps;
253429d9fd7dSBodong Wang }
253529d9fd7dSBodong Wang
2536c2d7712cSBodong Wang return 0;
253729d9fd7dSBodong Wang
253829d9fd7dSBodong Wang err_reps:
2539c2d7712cSBodong Wang atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2540c2d7712cSBodong Wang for (--rep_type; rep_type >= 0; rep_type--)
2541c2d7712cSBodong Wang __esw_offloads_unload_rep(esw, rep, rep_type);
25426ed1803aSMark Bloch return err;
25436ed1803aSMark Bloch }
25446ed1803aSMark Bloch
mlx5_esw_offloads_rep_unload(struct mlx5_eswitch * esw,u16 vport_num)2545b7186387SJiri Pirko static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
2546c2d7712cSBodong Wang {
2547c2d7712cSBodong Wang struct mlx5_eswitch_rep *rep;
2548c2d7712cSBodong Wang int rep_type;
2549c2d7712cSBodong Wang
2550c2d7712cSBodong Wang rep = mlx5_eswitch_get_rep(esw, vport_num);
2551c2d7712cSBodong Wang for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2552c2d7712cSBodong Wang __esw_offloads_unload_rep(esw, rep, rep_type);
2553c2d7712cSBodong Wang }
2554c2d7712cSBodong Wang
mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)25552caa2a39SJiri Pirko int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2556d9833bcfSJiri Pirko {
2557d9833bcfSJiri Pirko if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2558d9833bcfSJiri Pirko return 0;
2559d9833bcfSJiri Pirko
25602caa2a39SJiri Pirko return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport);
2561d9833bcfSJiri Pirko }
2562d9833bcfSJiri Pirko
mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)25632caa2a39SJiri Pirko void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2564d9833bcfSJiri Pirko {
2565d9833bcfSJiri Pirko if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2566d9833bcfSJiri Pirko return;
2567d9833bcfSJiri Pirko
25682caa2a39SJiri Pirko mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport);
2569d9833bcfSJiri Pirko }
2570d9833bcfSJiri Pirko
mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport,struct mlx5_devlink_port * dl_port,u32 controller,u32 sfnum)25712caa2a39SJiri Pirko int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
25722c5f33f6SJiri Pirko struct mlx5_devlink_port *dl_port,
2573e855afd7SJiri Pirko u32 controller, u32 sfnum)
2574e855afd7SJiri Pirko {
25752caa2a39SJiri Pirko return mlx5_esw_offloads_sf_devlink_port_init(esw, vport, dl_port, controller, sfnum);
2576e855afd7SJiri Pirko }
2577e855afd7SJiri Pirko
mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)25782caa2a39SJiri Pirko void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2579e855afd7SJiri Pirko {
25802caa2a39SJiri Pirko mlx5_esw_offloads_sf_devlink_port_cleanup(esw, vport);
2581e855afd7SJiri Pirko }
2582e855afd7SJiri Pirko
mlx5_esw_offloads_load_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)25832caa2a39SJiri Pirko int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
258438679b5aSParav Pandit {
258538679b5aSParav Pandit int err;
258638679b5aSParav Pandit
258738679b5aSParav Pandit if (esw->mode != MLX5_ESWITCH_OFFLOADS)
258838679b5aSParav Pandit return 0;
258938679b5aSParav Pandit
25902caa2a39SJiri Pirko err = mlx5_esw_offloads_devlink_port_register(esw, vport);
2591c7eddc60SParav Pandit if (err)
2592c7eddc60SParav Pandit return err;
2593c7eddc60SParav Pandit
25942caa2a39SJiri Pirko err = mlx5_esw_offloads_rep_load(esw, vport->vport);
2595c7eddc60SParav Pandit if (err)
2596c7eddc60SParav Pandit goto load_err;
2597c7eddc60SParav Pandit return err;
2598c7eddc60SParav Pandit
2599c7eddc60SParav Pandit load_err:
26002caa2a39SJiri Pirko mlx5_esw_offloads_devlink_port_unregister(esw, vport);
260138679b5aSParav Pandit return err;
260238679b5aSParav Pandit }
260338679b5aSParav Pandit
mlx5_esw_offloads_unload_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)26042caa2a39SJiri Pirko void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
260538679b5aSParav Pandit {
260638679b5aSParav Pandit if (esw->mode != MLX5_ESWITCH_OFFLOADS)
260738679b5aSParav Pandit return;
260838679b5aSParav Pandit
26092caa2a39SJiri Pirko mlx5_esw_offloads_rep_unload(esw, vport->vport);
2610865d6d1cSRoi Dayan
26112caa2a39SJiri Pirko mlx5_esw_offloads_devlink_port_unregister(esw, vport);
261238679b5aSParav Pandit }
261338679b5aSParav Pandit
esw_set_slave_root_fdb(struct mlx5_core_dev * master,struct mlx5_core_dev * slave)2614db202995SMark Bloch static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
2615db202995SMark Bloch struct mlx5_core_dev *slave)
2616db202995SMark Bloch {
2617db202995SMark Bloch u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
2618db202995SMark Bloch u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
2619db202995SMark Bloch struct mlx5_flow_root_namespace *root;
2620db202995SMark Bloch struct mlx5_flow_namespace *ns;
2621db202995SMark Bloch int err;
2622db202995SMark Bloch
2623db202995SMark Bloch MLX5_SET(set_flow_table_root_in, in, opcode,
2624db202995SMark Bloch MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
2625db202995SMark Bloch MLX5_SET(set_flow_table_root_in, in, table_type,
2626db202995SMark Bloch FS_FT_FDB);
2627db202995SMark Bloch
2628db202995SMark Bloch if (master) {
2629db202995SMark Bloch ns = mlx5_get_flow_namespace(master,
2630db202995SMark Bloch MLX5_FLOW_NAMESPACE_FDB);
2631db202995SMark Bloch root = find_root(&ns->node);
2632db202995SMark Bloch mutex_lock(&root->chain_lock);
2633db202995SMark Bloch MLX5_SET(set_flow_table_root_in, in,
2634db202995SMark Bloch table_eswitch_owner_vhca_id_valid, 1);
2635db202995SMark Bloch MLX5_SET(set_flow_table_root_in, in,
2636db202995SMark Bloch table_eswitch_owner_vhca_id,
2637db202995SMark Bloch MLX5_CAP_GEN(master, vhca_id));
2638db202995SMark Bloch MLX5_SET(set_flow_table_root_in, in, table_id,
2639db202995SMark Bloch root->root_ft->id);
2640db202995SMark Bloch } else {
2641db202995SMark Bloch ns = mlx5_get_flow_namespace(slave,
2642db202995SMark Bloch MLX5_FLOW_NAMESPACE_FDB);
2643db202995SMark Bloch root = find_root(&ns->node);
2644db202995SMark Bloch mutex_lock(&root->chain_lock);
2645db202995SMark Bloch MLX5_SET(set_flow_table_root_in, in, table_id,
2646db202995SMark Bloch root->root_ft->id);
2647db202995SMark Bloch }
2648db202995SMark Bloch
2649db202995SMark Bloch err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
2650db202995SMark Bloch mutex_unlock(&root->chain_lock);
2651db202995SMark Bloch
2652db202995SMark Bloch return err;
2653db202995SMark Bloch }
2654db202995SMark Bloch
__esw_set_master_egress_rule(struct mlx5_core_dev * master,struct mlx5_core_dev * slave,struct mlx5_vport * vport,struct mlx5_flow_table * acl)2655db202995SMark Bloch static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
2656db202995SMark Bloch struct mlx5_core_dev *slave,
2657db202995SMark Bloch struct mlx5_vport *vport,
2658db202995SMark Bloch struct mlx5_flow_table *acl)
2659db202995SMark Bloch {
26605e0202ebSShay Drory u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
2661db202995SMark Bloch struct mlx5_flow_handle *flow_rule = NULL;
2662db202995SMark Bloch struct mlx5_flow_destination dest = {};
2663db202995SMark Bloch struct mlx5_flow_act flow_act = {};
2664db202995SMark Bloch struct mlx5_flow_spec *spec;
2665db202995SMark Bloch int err = 0;
2666db202995SMark Bloch void *misc;
2667db202995SMark Bloch
2668db202995SMark Bloch spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2669db202995SMark Bloch if (!spec)
2670db202995SMark Bloch return -ENOMEM;
2671db202995SMark Bloch
2672db202995SMark Bloch spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2673db202995SMark Bloch misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2674db202995SMark Bloch misc_parameters);
2675db202995SMark Bloch MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
26765e0202ebSShay Drory MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, slave_index);
2677db202995SMark Bloch
2678db202995SMark Bloch misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2679db202995SMark Bloch MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2680db202995SMark Bloch MLX5_SET_TO_ONES(fte_match_set_misc, misc,
2681db202995SMark Bloch source_eswitch_owner_vhca_id);
2682db202995SMark Bloch
2683db202995SMark Bloch flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2684db202995SMark Bloch dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2685db202995SMark Bloch dest.vport.num = slave->priv.eswitch->manager_vport;
2686db202995SMark Bloch dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id);
2687db202995SMark Bloch dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
2688db202995SMark Bloch
2689db202995SMark Bloch flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
2690db202995SMark Bloch &dest, 1);
26915e0202ebSShay Drory if (IS_ERR(flow_rule)) {
2692db202995SMark Bloch err = PTR_ERR(flow_rule);
26935e0202ebSShay Drory } else {
26945e0202ebSShay Drory err = xa_insert(&vport->egress.offloads.bounce_rules,
26955e0202ebSShay Drory slave_index, flow_rule, GFP_KERNEL);
26965e0202ebSShay Drory if (err)
26975e0202ebSShay Drory mlx5_del_flow_rules(flow_rule);
26985e0202ebSShay Drory }
2699db202995SMark Bloch
2700db202995SMark Bloch kvfree(spec);
2701db202995SMark Bloch return err;
2702db202995SMark Bloch }
2703db202995SMark Bloch
esw_master_egress_create_resources(struct mlx5_eswitch * esw,struct mlx5_flow_namespace * egress_ns,struct mlx5_vport * vport,size_t count)27044575ab3bSRoi Dayan static int esw_master_egress_create_resources(struct mlx5_eswitch *esw,
27054575ab3bSRoi Dayan struct mlx5_flow_namespace *egress_ns,
2706014e4d48SShay Drory struct mlx5_vport *vport, size_t count)
2707db202995SMark Bloch {
2708db202995SMark Bloch int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2709db202995SMark Bloch struct mlx5_flow_table_attr ft_attr = {
2710014e4d48SShay Drory .max_fte = count, .prio = 0, .level = 0,
2711db202995SMark Bloch };
2712db202995SMark Bloch struct mlx5_flow_table *acl;
2713db202995SMark Bloch struct mlx5_flow_group *g;
2714db202995SMark Bloch void *match_criteria;
2715db202995SMark Bloch u32 *flow_group_in;
2716db202995SMark Bloch int err;
2717db202995SMark Bloch
2718db202995SMark Bloch if (vport->egress.acl)
27195e0202ebSShay Drory return 0;
2720db202995SMark Bloch
2721db202995SMark Bloch flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2722db202995SMark Bloch if (!flow_group_in)
2723db202995SMark Bloch return -ENOMEM;
2724db202995SMark Bloch
27254575ab3bSRoi Dayan if (vport->vport || mlx5_core_is_ecpf(esw->dev))
27264575ab3bSRoi Dayan ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
27274575ab3bSRoi Dayan
2728db202995SMark Bloch acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
2729db202995SMark Bloch if (IS_ERR(acl)) {
2730db202995SMark Bloch err = PTR_ERR(acl);
2731db202995SMark Bloch goto out;
2732db202995SMark Bloch }
2733db202995SMark Bloch
2734db202995SMark Bloch match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2735db202995SMark Bloch match_criteria);
2736db202995SMark Bloch MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2737db202995SMark Bloch misc_parameters.source_port);
2738db202995SMark Bloch MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2739db202995SMark Bloch misc_parameters.source_eswitch_owner_vhca_id);
2740db202995SMark Bloch MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2741db202995SMark Bloch MLX5_MATCH_MISC_PARAMETERS);
2742db202995SMark Bloch
2743db202995SMark Bloch MLX5_SET(create_flow_group_in, flow_group_in,
2744db202995SMark Bloch source_eswitch_owner_vhca_id_valid, 1);
2745db202995SMark Bloch MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2746014e4d48SShay Drory MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, count);
2747db202995SMark Bloch
2748db202995SMark Bloch g = mlx5_create_flow_group(acl, flow_group_in);
2749db202995SMark Bloch if (IS_ERR(g)) {
2750db202995SMark Bloch err = PTR_ERR(g);
2751db202995SMark Bloch goto err_group;
2752db202995SMark Bloch }
2753db202995SMark Bloch
2754db202995SMark Bloch vport->egress.acl = acl;
2755db202995SMark Bloch vport->egress.offloads.bounce_grp = g;
27565e0202ebSShay Drory vport->egress.type = VPORT_EGRESS_ACL_TYPE_SHARED_FDB;
27575e0202ebSShay Drory xa_init_flags(&vport->egress.offloads.bounce_rules, XA_FLAGS_ALLOC);
2758db202995SMark Bloch
2759db202995SMark Bloch kvfree(flow_group_in);
2760db202995SMark Bloch
2761db202995SMark Bloch return 0;
2762db202995SMark Bloch
2763db202995SMark Bloch err_group:
2764db202995SMark Bloch mlx5_destroy_flow_table(acl);
2765db202995SMark Bloch out:
2766db202995SMark Bloch kvfree(flow_group_in);
2767db202995SMark Bloch return err;
2768db202995SMark Bloch }
2769db202995SMark Bloch
esw_master_egress_destroy_resources(struct mlx5_vport * vport)27705e0202ebSShay Drory static void esw_master_egress_destroy_resources(struct mlx5_vport *vport)
27715e0202ebSShay Drory {
277215ddd72eSRoi Dayan if (!xa_empty(&vport->egress.offloads.bounce_rules))
277315ddd72eSRoi Dayan return;
27745e0202ebSShay Drory mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
277515ddd72eSRoi Dayan vport->egress.offloads.bounce_grp = NULL;
27765e0202ebSShay Drory mlx5_destroy_flow_table(vport->egress.acl);
277715ddd72eSRoi Dayan vport->egress.acl = NULL;
27785e0202ebSShay Drory }
27795e0202ebSShay Drory
esw_set_master_egress_rule(struct mlx5_core_dev * master,struct mlx5_core_dev * slave,size_t count)27805e0202ebSShay Drory static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
2781014e4d48SShay Drory struct mlx5_core_dev *slave, size_t count)
27825e0202ebSShay Drory {
27835e0202ebSShay Drory struct mlx5_eswitch *esw = master->priv.eswitch;
27845e0202ebSShay Drory u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
27855e0202ebSShay Drory struct mlx5_flow_namespace *egress_ns;
27865e0202ebSShay Drory struct mlx5_vport *vport;
27875e0202ebSShay Drory int err;
27885e0202ebSShay Drory
27895e0202ebSShay Drory vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
27905e0202ebSShay Drory if (IS_ERR(vport))
27915e0202ebSShay Drory return PTR_ERR(vport);
27925e0202ebSShay Drory
27935e0202ebSShay Drory egress_ns = mlx5_get_flow_vport_acl_namespace(master,
27945e0202ebSShay Drory MLX5_FLOW_NAMESPACE_ESW_EGRESS,
27955e0202ebSShay Drory vport->index);
27965e0202ebSShay Drory if (!egress_ns)
27975e0202ebSShay Drory return -EINVAL;
27985e0202ebSShay Drory
27995e0202ebSShay Drory if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB)
28005e0202ebSShay Drory return 0;
28015e0202ebSShay Drory
28024575ab3bSRoi Dayan err = esw_master_egress_create_resources(esw, egress_ns, vport, count);
28035e0202ebSShay Drory if (err)
28045e0202ebSShay Drory return err;
28055e0202ebSShay Drory
28065e0202ebSShay Drory if (xa_load(&vport->egress.offloads.bounce_rules, slave_index))
28075e0202ebSShay Drory return -EINVAL;
28085e0202ebSShay Drory
28095e0202ebSShay Drory err = __esw_set_master_egress_rule(master, slave, vport, vport->egress.acl);
28105e0202ebSShay Drory if (err)
28115e0202ebSShay Drory goto err_rule;
28125e0202ebSShay Drory
28135e0202ebSShay Drory return 0;
28145e0202ebSShay Drory
28155e0202ebSShay Drory err_rule:
28165e0202ebSShay Drory esw_master_egress_destroy_resources(vport);
28175e0202ebSShay Drory return err;
28185e0202ebSShay Drory }
28195e0202ebSShay Drory
esw_unset_master_egress_rule(struct mlx5_core_dev * dev,struct mlx5_core_dev * slave_dev)2820014e4d48SShay Drory static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev,
2821014e4d48SShay Drory struct mlx5_core_dev *slave_dev)
2822db202995SMark Bloch {
2823db202995SMark Bloch struct mlx5_vport *vport;
2824db202995SMark Bloch
2825db202995SMark Bloch vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
2826db202995SMark Bloch dev->priv.eswitch->manager_vport);
2827db202995SMark Bloch
2828014e4d48SShay Drory esw_acl_egress_ofld_bounce_rule_destroy(vport, MLX5_CAP_GEN(slave_dev, vhca_id));
2829014e4d48SShay Drory
2830014e4d48SShay Drory if (xa_empty(&vport->egress.offloads.bounce_rules)) {
2831db202995SMark Bloch esw_acl_egress_ofld_cleanup(vport);
28325e0202ebSShay Drory xa_destroy(&vport->egress.offloads.bounce_rules);
2833db202995SMark Bloch }
2834014e4d48SShay Drory }
2835db202995SMark Bloch
mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw,int max_slaves)2836014e4d48SShay Drory int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
2837014e4d48SShay Drory struct mlx5_eswitch *slave_esw, int max_slaves)
2838db202995SMark Bloch {
2839db202995SMark Bloch int err;
2840db202995SMark Bloch
2841db202995SMark Bloch err = esw_set_slave_root_fdb(master_esw->dev,
2842db202995SMark Bloch slave_esw->dev);
2843db202995SMark Bloch if (err)
284482e86a6cSMark Bloch return err;
2845db202995SMark Bloch
2846db202995SMark Bloch err = esw_set_master_egress_rule(master_esw->dev,
2847014e4d48SShay Drory slave_esw->dev, max_slaves);
2848db202995SMark Bloch if (err)
2849db202995SMark Bloch goto err_acl;
2850db202995SMark Bloch
2851db202995SMark Bloch return err;
2852db202995SMark Bloch
2853db202995SMark Bloch err_acl:
2854db202995SMark Bloch esw_set_slave_root_fdb(NULL, slave_esw->dev);
2855db202995SMark Bloch return err;
2856db202995SMark Bloch }
2857db202995SMark Bloch
mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw)2858014e4d48SShay Drory void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
2859db202995SMark Bloch struct mlx5_eswitch *slave_esw)
2860db202995SMark Bloch {
2861db202995SMark Bloch esw_set_slave_root_fdb(NULL, slave_esw->dev);
2862014e4d48SShay Drory esw_unset_master_egress_rule(master_esw->dev, slave_esw->dev);
2863db202995SMark Bloch }
2864db202995SMark Bloch
2865ac004b83SRoi Dayan #define ESW_OFFLOADS_DEVCOM_PAIR (0)
2866ac004b83SRoi Dayan #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2867ac004b83SRoi Dayan
mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw)2868ed7a8fe7SMark Bloch static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw,
2869ed7a8fe7SMark Bloch struct mlx5_eswitch *peer_esw)
2870ac004b83SRoi Dayan {
2871c8e6a9e6SMark Bloch const struct mlx5_eswitch_rep_ops *ops;
2872c8e6a9e6SMark Bloch struct mlx5_eswitch_rep *rep;
2873c8e6a9e6SMark Bloch unsigned long i;
2874c8e6a9e6SMark Bloch u8 rep_type;
2875ac004b83SRoi Dayan
2876c8e6a9e6SMark Bloch mlx5_esw_for_each_rep(esw, i, rep) {
2877c8e6a9e6SMark Bloch rep_type = NUM_REP_TYPES;
2878c8e6a9e6SMark Bloch while (rep_type--) {
2879c8e6a9e6SMark Bloch ops = esw->offloads.rep_ops[rep_type];
2880c8e6a9e6SMark Bloch if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2881c8e6a9e6SMark Bloch ops->event)
2882ed7a8fe7SMark Bloch ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, peer_esw);
2883c8e6a9e6SMark Bloch }
2884c8e6a9e6SMark Bloch }
2885ac004b83SRoi Dayan }
2886ac004b83SRoi Dayan
mlx5_esw_offloads_unpair(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw)2887ed7a8fe7SMark Bloch static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw,
2888ed7a8fe7SMark Bloch struct mlx5_eswitch *peer_esw)
2889ac004b83SRoi Dayan {
2890d956873fSVlad Buslov #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
289104de7ddaSRoi Dayan mlx5e_tc_clean_fdb_peer_flows(esw);
2892d956873fSVlad Buslov #endif
2893ed7a8fe7SMark Bloch mlx5_esw_offloads_rep_event_unpair(esw, peer_esw);
28949bee385aSShay Drory esw_del_fdb_peer_miss_rules(esw, peer_esw->dev);
2895ac004b83SRoi Dayan }
2896ac004b83SRoi Dayan
mlx5_esw_offloads_pair(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw)2897c8e6a9e6SMark Bloch static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2898c8e6a9e6SMark Bloch struct mlx5_eswitch *peer_esw)
2899c8e6a9e6SMark Bloch {
2900c8e6a9e6SMark Bloch const struct mlx5_eswitch_rep_ops *ops;
2901c8e6a9e6SMark Bloch struct mlx5_eswitch_rep *rep;
2902c8e6a9e6SMark Bloch unsigned long i;
2903c8e6a9e6SMark Bloch u8 rep_type;
2904c8e6a9e6SMark Bloch int err;
2905c8e6a9e6SMark Bloch
2906c8e6a9e6SMark Bloch err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2907c8e6a9e6SMark Bloch if (err)
2908c8e6a9e6SMark Bloch return err;
2909c8e6a9e6SMark Bloch
2910c8e6a9e6SMark Bloch mlx5_esw_for_each_rep(esw, i, rep) {
2911c8e6a9e6SMark Bloch for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
2912c8e6a9e6SMark Bloch ops = esw->offloads.rep_ops[rep_type];
2913c8e6a9e6SMark Bloch if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2914c8e6a9e6SMark Bloch ops->event) {
2915c8e6a9e6SMark Bloch err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
2916c8e6a9e6SMark Bloch if (err)
2917c8e6a9e6SMark Bloch goto err_out;
2918c8e6a9e6SMark Bloch }
2919c8e6a9e6SMark Bloch }
2920c8e6a9e6SMark Bloch }
2921c8e6a9e6SMark Bloch
2922c8e6a9e6SMark Bloch return 0;
2923c8e6a9e6SMark Bloch
2924c8e6a9e6SMark Bloch err_out:
2925ed7a8fe7SMark Bloch mlx5_esw_offloads_unpair(esw, peer_esw);
2926c8e6a9e6SMark Bloch return err;
2927c8e6a9e6SMark Bloch }
2928c8e6a9e6SMark Bloch
mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw,bool pair)29298463daf1SMaor Gottlieb static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
29308463daf1SMaor Gottlieb struct mlx5_eswitch *peer_esw,
29318463daf1SMaor Gottlieb bool pair)
29328463daf1SMaor Gottlieb {
293362752c0bSShay Drory u16 peer_vhca_id = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
293462752c0bSShay Drory u16 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
29358463daf1SMaor Gottlieb struct mlx5_flow_root_namespace *peer_ns;
29368463daf1SMaor Gottlieb struct mlx5_flow_root_namespace *ns;
29378463daf1SMaor Gottlieb int err;
29388463daf1SMaor Gottlieb
29398463daf1SMaor Gottlieb peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
29408463daf1SMaor Gottlieb ns = esw->dev->priv.steering->fdb_root_ns;
29418463daf1SMaor Gottlieb
29428463daf1SMaor Gottlieb if (pair) {
294362752c0bSShay Drory err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_vhca_id);
29448463daf1SMaor Gottlieb if (err)
29458463daf1SMaor Gottlieb return err;
29468463daf1SMaor Gottlieb
294762752c0bSShay Drory err = mlx5_flow_namespace_set_peer(peer_ns, ns, vhca_id);
29488463daf1SMaor Gottlieb if (err) {
294962752c0bSShay Drory mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
29508463daf1SMaor Gottlieb return err;
29518463daf1SMaor Gottlieb }
29528463daf1SMaor Gottlieb } else {
295362752c0bSShay Drory mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
295462752c0bSShay Drory mlx5_flow_namespace_set_peer(peer_ns, NULL, vhca_id);
29558463daf1SMaor Gottlieb }
29568463daf1SMaor Gottlieb
29578463daf1SMaor Gottlieb return 0;
29588463daf1SMaor Gottlieb }
29598463daf1SMaor Gottlieb
mlx5_esw_offloads_devcom_event(int event,void * my_data,void * event_data)2960ac004b83SRoi Dayan static int mlx5_esw_offloads_devcom_event(int event,
2961ac004b83SRoi Dayan void *my_data,
2962ac004b83SRoi Dayan void *event_data)
2963ac004b83SRoi Dayan {
2964ac004b83SRoi Dayan struct mlx5_eswitch *esw = my_data;
29658463daf1SMaor Gottlieb struct mlx5_eswitch *peer_esw = event_data;
296670c36438SRoi Dayan u16 esw_i, peer_esw_i;
296770c36438SRoi Dayan bool esw_paired;
2968ac004b83SRoi Dayan int err;
2969ac004b83SRoi Dayan
297070c36438SRoi Dayan peer_esw_i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
297170c36438SRoi Dayan esw_i = MLX5_CAP_GEN(esw->dev, vhca_id);
297270c36438SRoi Dayan esw_paired = !!xa_load(&esw->paired, peer_esw_i);
297370c36438SRoi Dayan
2974ac004b83SRoi Dayan switch (event) {
2975ac004b83SRoi Dayan case ESW_OFFLOADS_DEVCOM_PAIR:
2976a5641cb5SJianbo Liu if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2977a5641cb5SJianbo Liu mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2978a5641cb5SJianbo Liu break;
2979a5641cb5SJianbo Liu
298070c36438SRoi Dayan if (esw_paired)
29818c253dfcSShay Drory break;
29828c253dfcSShay Drory
29838463daf1SMaor Gottlieb err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
2984ac004b83SRoi Dayan if (err)
2985ac004b83SRoi Dayan goto err_out;
298688d162b4SRoi Dayan
29878463daf1SMaor Gottlieb err = mlx5_esw_offloads_pair(esw, peer_esw);
29888463daf1SMaor Gottlieb if (err)
29898463daf1SMaor Gottlieb goto err_peer;
2990ac004b83SRoi Dayan
2991ac004b83SRoi Dayan err = mlx5_esw_offloads_pair(peer_esw, esw);
2992ac004b83SRoi Dayan if (err)
2993ac004b83SRoi Dayan goto err_pair;
2994ac004b83SRoi Dayan
299570c36438SRoi Dayan err = xa_insert(&esw->paired, peer_esw_i, peer_esw, GFP_KERNEL);
299670c36438SRoi Dayan if (err)
299770c36438SRoi Dayan goto err_xa;
299870c36438SRoi Dayan
299970c36438SRoi Dayan err = xa_insert(&peer_esw->paired, esw_i, esw, GFP_KERNEL);
300070c36438SRoi Dayan if (err)
300170c36438SRoi Dayan goto err_peer_xa;
300270c36438SRoi Dayan
30038611df72SShay Drory esw->num_peers++;
30048611df72SShay Drory peer_esw->num_peers++;
300588d162b4SRoi Dayan mlx5_devcom_comp_set_ready(esw->devcom, true);
3006ac004b83SRoi Dayan break;
3007ac004b83SRoi Dayan
3008ac004b83SRoi Dayan case ESW_OFFLOADS_DEVCOM_UNPAIR:
300970c36438SRoi Dayan if (!esw_paired)
3010ac004b83SRoi Dayan break;
3011ac004b83SRoi Dayan
30128611df72SShay Drory peer_esw->num_peers--;
30138611df72SShay Drory esw->num_peers--;
30148611df72SShay Drory if (!esw->num_peers && !peer_esw->num_peers)
301588d162b4SRoi Dayan mlx5_devcom_comp_set_ready(esw->devcom, false);
301670c36438SRoi Dayan xa_erase(&peer_esw->paired, esw_i);
301770c36438SRoi Dayan xa_erase(&esw->paired, peer_esw_i);
3018ed7a8fe7SMark Bloch mlx5_esw_offloads_unpair(peer_esw, esw);
3019ed7a8fe7SMark Bloch mlx5_esw_offloads_unpair(esw, peer_esw);
30208463daf1SMaor Gottlieb mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
3021ac004b83SRoi Dayan break;
3022ac004b83SRoi Dayan }
3023ac004b83SRoi Dayan
3024ac004b83SRoi Dayan return 0;
3025ac004b83SRoi Dayan
302670c36438SRoi Dayan err_peer_xa:
302770c36438SRoi Dayan xa_erase(&esw->paired, peer_esw_i);
302870c36438SRoi Dayan err_xa:
302970c36438SRoi Dayan mlx5_esw_offloads_unpair(peer_esw, esw);
3030ac004b83SRoi Dayan err_pair:
3031ed7a8fe7SMark Bloch mlx5_esw_offloads_unpair(esw, peer_esw);
30328463daf1SMaor Gottlieb err_peer:
30338463daf1SMaor Gottlieb mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
3034ac004b83SRoi Dayan err_out:
3035ac004b83SRoi Dayan mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
3036ac004b83SRoi Dayan event, err);
3037ac004b83SRoi Dayan return err;
3038ac004b83SRoi Dayan }
3039ac004b83SRoi Dayan
mlx5_esw_offloads_devcom_init(struct mlx5_eswitch * esw,u64 key)30401161d22dSRoi Dayan void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
3041ac004b83SRoi Dayan {
30429be6c21fSShay Drory int i;
3043ac004b83SRoi Dayan
30449be6c21fSShay Drory for (i = 0; i < MLX5_MAX_PORTS; i++)
30459be6c21fSShay Drory INIT_LIST_HEAD(&esw->offloads.peer_flows[i]);
304604de7ddaSRoi Dayan mutex_init(&esw->offloads.peer_mutex);
304704de7ddaSRoi Dayan
3048ac004b83SRoi Dayan if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
3049ac004b83SRoi Dayan return;
3050ac004b83SRoi Dayan
3051e2bb7984SRoi Dayan if ((MLX5_VPORT_MANAGER(esw->dev) || mlx5_core_is_ecpf_esw_manager(esw->dev)) &&
3052e2bb7984SRoi Dayan !mlx5_lag_is_supported(esw->dev))
30533008e6a0SMark Bloch return;
30543008e6a0SMark Bloch
305570c36438SRoi Dayan xa_init(&esw->paired);
30568611df72SShay Drory esw->num_peers = 0;
305788d162b4SRoi Dayan esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
3058ac004b83SRoi Dayan MLX5_DEVCOM_ESW_OFFLOADS,
30591161d22dSRoi Dayan key,
306088d162b4SRoi Dayan mlx5_esw_offloads_devcom_event,
306188d162b4SRoi Dayan esw);
306288d162b4SRoi Dayan if (IS_ERR_OR_NULL(esw->devcom))
306388d162b4SRoi Dayan return;
306488d162b4SRoi Dayan
306588d162b4SRoi Dayan mlx5_devcom_send_event(esw->devcom,
3066e2a82bf8SShay Drory ESW_OFFLOADS_DEVCOM_PAIR,
306788d162b4SRoi Dayan ESW_OFFLOADS_DEVCOM_UNPAIR,
306888d162b4SRoi Dayan esw);
3069ac004b83SRoi Dayan }
3070ac004b83SRoi Dayan
mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch * esw)30712be5bd42SShay Drory void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
3072ac004b83SRoi Dayan {
307388d162b4SRoi Dayan if (IS_ERR_OR_NULL(esw->devcom))
3074ac004b83SRoi Dayan return;
3075ac004b83SRoi Dayan
307688d162b4SRoi Dayan mlx5_devcom_send_event(esw->devcom,
3077e2a82bf8SShay Drory ESW_OFFLOADS_DEVCOM_UNPAIR,
307888d162b4SRoi Dayan ESW_OFFLOADS_DEVCOM_UNPAIR,
307988d162b4SRoi Dayan esw);
3080ac004b83SRoi Dayan
308188d162b4SRoi Dayan mlx5_devcom_unregister_component(esw->devcom);
308270c36438SRoi Dayan xa_destroy(&esw->paired);
308388d162b4SRoi Dayan esw->devcom = NULL;
308488d162b4SRoi Dayan }
308588d162b4SRoi Dayan
mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch * esw)308688d162b4SRoi Dayan bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw)
308788d162b4SRoi Dayan {
308888d162b4SRoi Dayan return mlx5_devcom_comp_is_ready(esw->devcom);
3089ac004b83SRoi Dayan }
3090ac004b83SRoi Dayan
mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch * esw)30917bf481d7SParav Pandit bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
309292ab1eb3SJianbo Liu {
309392ab1eb3SJianbo Liu if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
309492ab1eb3SJianbo Liu return false;
309592ab1eb3SJianbo Liu
309692ab1eb3SJianbo Liu if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
309792ab1eb3SJianbo Liu MLX5_FDB_TO_VPORT_REG_C_0))
309892ab1eb3SJianbo Liu return false;
309992ab1eb3SJianbo Liu
310092ab1eb3SJianbo Liu return true;
310192ab1eb3SJianbo Liu }
310292ab1eb3SJianbo Liu
31030b0ea3c5SSunil Rani #define MLX5_ESW_METADATA_RSVD_UPLINK 1
31040b0ea3c5SSunil Rani
31050b0ea3c5SSunil Rani /* Share the same metadata for uplink's. This is fine because:
31060b0ea3c5SSunil Rani * (a) In shared FDB mode (LAG) both uplink's are treated the
31070b0ea3c5SSunil Rani * same and tagged with the same metadata.
31080b0ea3c5SSunil Rani * (b) In non shared FDB mode, packets from physical port0
31090b0ea3c5SSunil Rani * cannot hit eswitch of PF1 and vice versa.
31100b0ea3c5SSunil Rani */
mlx5_esw_match_metadata_reserved(struct mlx5_eswitch * esw)31110b0ea3c5SSunil Rani static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
31120b0ea3c5SSunil Rani {
31130b0ea3c5SSunil Rani return MLX5_ESW_METADATA_RSVD_UPLINK;
31140b0ea3c5SSunil Rani }
31150b0ea3c5SSunil Rani
mlx5_esw_match_metadata_alloc(struct mlx5_eswitch * esw)3116133dcfc5SVu Pham u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
3117133dcfc5SVu Pham {
31187cd7becdSsunils u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
31194f4edcc2SAriel Levkovich /* Reserve 0xf for internal port offload */
31204f4edcc2SAriel Levkovich u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
31217cd7becdSsunils u32 pf_num;
3122133dcfc5SVu Pham int id;
3123133dcfc5SVu Pham
31247cd7becdSsunils /* Only 4 bits of pf_num */
31252ec16dddSRongwei Liu pf_num = mlx5_get_dev_index(esw->dev);
31267cd7becdSsunils if (pf_num > max_pf_num)
31277cd7becdSsunils return 0;
3128133dcfc5SVu Pham
31297cd7becdSsunils /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
31300b0ea3c5SSunil Rani /* Use only non-zero vport_id (2-4095) for all PF's */
31310b0ea3c5SSunil Rani id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
31320b0ea3c5SSunil Rani MLX5_ESW_METADATA_RSVD_UPLINK + 1,
31330b0ea3c5SSunil Rani vport_end_ida, GFP_KERNEL);
31347cd7becdSsunils if (id < 0)
31357cd7becdSsunils return 0;
31367cd7becdSsunils id = (pf_num << ESW_VPORT_BITS) | id;
31377cd7becdSsunils return id;
3138133dcfc5SVu Pham }
3139133dcfc5SVu Pham
mlx5_esw_match_metadata_free(struct mlx5_eswitch * esw,u32 metadata)3140133dcfc5SVu Pham void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
3141133dcfc5SVu Pham {
31427cd7becdSsunils u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
31437cd7becdSsunils
31447cd7becdSsunils /* Metadata contains only 12 bits of actual ida id */
31457cd7becdSsunils ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
3146133dcfc5SVu Pham }
3147133dcfc5SVu Pham
esw_offloads_vport_metadata_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)3148133dcfc5SVu Pham static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
3149133dcfc5SVu Pham struct mlx5_vport *vport)
3150133dcfc5SVu Pham {
31510b0ea3c5SSunil Rani if (vport->vport == MLX5_VPORT_UPLINK)
31520b0ea3c5SSunil Rani vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
31530b0ea3c5SSunil Rani else
3154133dcfc5SVu Pham vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
31550b0ea3c5SSunil Rani
3156133dcfc5SVu Pham vport->metadata = vport->default_metadata;
3157133dcfc5SVu Pham return vport->metadata ? 0 : -ENOSPC;
3158133dcfc5SVu Pham }
3159133dcfc5SVu Pham
esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)3160133dcfc5SVu Pham static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
3161133dcfc5SVu Pham struct mlx5_vport *vport)
3162133dcfc5SVu Pham {
3163406493a5SVu Pham if (!vport->default_metadata)
3164133dcfc5SVu Pham return;
3165133dcfc5SVu Pham
31660b0ea3c5SSunil Rani if (vport->vport == MLX5_VPORT_UPLINK)
31670b0ea3c5SSunil Rani return;
31680b0ea3c5SSunil Rani
3169133dcfc5SVu Pham WARN_ON(vport->metadata != vport->default_metadata);
3170133dcfc5SVu Pham mlx5_esw_match_metadata_free(esw, vport->default_metadata);
3171133dcfc5SVu Pham }
3172133dcfc5SVu Pham
esw_offloads_metadata_uninit(struct mlx5_eswitch * esw)3173fc99c3d6SVu Pham static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
3174fc99c3d6SVu Pham {
3175fc99c3d6SVu Pham struct mlx5_vport *vport;
317647dd7e60SParav Pandit unsigned long i;
3177fc99c3d6SVu Pham
3178fc99c3d6SVu Pham if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
3179fc99c3d6SVu Pham return;
3180fc99c3d6SVu Pham
318147dd7e60SParav Pandit mlx5_esw_for_each_vport(esw, i, vport)
3182fc99c3d6SVu Pham esw_offloads_vport_metadata_cleanup(esw, vport);
3183fc99c3d6SVu Pham }
3184fc99c3d6SVu Pham
esw_offloads_metadata_init(struct mlx5_eswitch * esw)3185fc99c3d6SVu Pham static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
3186fc99c3d6SVu Pham {
3187fc99c3d6SVu Pham struct mlx5_vport *vport;
318847dd7e60SParav Pandit unsigned long i;
3189fc99c3d6SVu Pham int err;
3190fc99c3d6SVu Pham
3191fc99c3d6SVu Pham if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
3192fc99c3d6SVu Pham return 0;
3193fc99c3d6SVu Pham
319447dd7e60SParav Pandit mlx5_esw_for_each_vport(esw, i, vport) {
3195fc99c3d6SVu Pham err = esw_offloads_vport_metadata_setup(esw, vport);
3196fc99c3d6SVu Pham if (err)
3197fc99c3d6SVu Pham goto metadata_err;
3198fc99c3d6SVu Pham }
3199fc99c3d6SVu Pham
3200fc99c3d6SVu Pham return 0;
3201fc99c3d6SVu Pham
3202fc99c3d6SVu Pham metadata_err:
3203fc99c3d6SVu Pham esw_offloads_metadata_uninit(esw);
3204fc99c3d6SVu Pham return err;
3205fc99c3d6SVu Pham }
3206fc99c3d6SVu Pham
3207748da30bSVu Pham int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch * esw,struct mlx5_vport * vport)320889a0f1fbSParav Pandit esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
320989a0f1fbSParav Pandit struct mlx5_vport *vport)
321089a0f1fbSParav Pandit {
321189a0f1fbSParav Pandit int err;
321289a0f1fbSParav Pandit
321307bab950SVu Pham err = esw_acl_ingress_ofld_setup(esw, vport);
321489a0f1fbSParav Pandit if (err)
3215fc99c3d6SVu Pham return err;
321689a0f1fbSParav Pandit
3217ea651a86SVu Pham err = esw_acl_egress_ofld_setup(esw, vport);
321807bab950SVu Pham if (err)
321907bab950SVu Pham goto egress_err;
322007bab950SVu Pham
322107bab950SVu Pham return 0;
322207bab950SVu Pham
322307bab950SVu Pham egress_err:
322407bab950SVu Pham esw_acl_ingress_ofld_cleanup(esw, vport);
322589a0f1fbSParav Pandit return err;
322689a0f1fbSParav Pandit }
322789a0f1fbSParav Pandit
3228748da30bSVu Pham void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch * esw,struct mlx5_vport * vport)322989a0f1fbSParav Pandit esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
323089a0f1fbSParav Pandit struct mlx5_vport *vport)
323189a0f1fbSParav Pandit {
3232ea651a86SVu Pham esw_acl_egress_ofld_cleanup(vport);
323307bab950SVu Pham esw_acl_ingress_ofld_cleanup(esw, vport);
323489a0f1fbSParav Pandit }
323589a0f1fbSParav Pandit
esw_create_offloads_acl_tables(struct mlx5_eswitch * esw)323634413460SBodong Wang static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
32377445cfb1SJianbo Liu {
323834413460SBodong Wang struct mlx5_vport *uplink, *manager;
323934413460SBodong Wang int ret;
324092ab1eb3SJianbo Liu
324134413460SBodong Wang uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
324234413460SBodong Wang if (IS_ERR(uplink))
324334413460SBodong Wang return PTR_ERR(uplink);
32447bef147aSSaeed Mahameed
324534413460SBodong Wang ret = esw_vport_create_offloads_acl_tables(esw, uplink);
324634413460SBodong Wang if (ret)
324734413460SBodong Wang return ret;
324834413460SBodong Wang
324934413460SBodong Wang manager = mlx5_eswitch_get_vport(esw, esw->manager_vport);
325034413460SBodong Wang if (IS_ERR(manager)) {
325134413460SBodong Wang ret = PTR_ERR(manager);
325234413460SBodong Wang goto err_manager;
325318486737SEli Britstein }
325418486737SEli Britstein
325534413460SBodong Wang ret = esw_vport_create_offloads_acl_tables(esw, manager);
325634413460SBodong Wang if (ret)
325734413460SBodong Wang goto err_manager;
325834413460SBodong Wang
325934413460SBodong Wang return 0;
326034413460SBodong Wang
326134413460SBodong Wang err_manager:
326234413460SBodong Wang esw_vport_destroy_offloads_acl_tables(esw, uplink);
326334413460SBodong Wang return ret;
326434413460SBodong Wang }
326534413460SBodong Wang
esw_destroy_offloads_acl_tables(struct mlx5_eswitch * esw)326634413460SBodong Wang static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
326718486737SEli Britstein {
3268786ef904SParav Pandit struct mlx5_vport *vport;
326918486737SEli Britstein
327034413460SBodong Wang vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
327134413460SBodong Wang if (!IS_ERR(vport))
327234413460SBodong Wang esw_vport_destroy_offloads_acl_tables(esw, vport);
32737bef147aSSaeed Mahameed
327434413460SBodong Wang vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
327534413460SBodong Wang if (!IS_ERR(vport))
327689a0f1fbSParav Pandit esw_vport_destroy_offloads_acl_tables(esw, vport);
327718486737SEli Britstein }
327818486737SEli Britstein
mlx5_eswitch_reload_reps(struct mlx5_eswitch * esw)3279db202995SMark Bloch int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
3280db202995SMark Bloch {
3281db202995SMark Bloch struct mlx5_eswitch_rep *rep;
3282db202995SMark Bloch unsigned long i;
3283db202995SMark Bloch int ret;
3284db202995SMark Bloch
3285db202995SMark Bloch if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
3286db202995SMark Bloch return 0;
3287db202995SMark Bloch
3288db202995SMark Bloch rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3289db202995SMark Bloch if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
3290db202995SMark Bloch return 0;
3291db202995SMark Bloch
3292db202995SMark Bloch ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3293db202995SMark Bloch if (ret)
3294db202995SMark Bloch return ret;
3295db202995SMark Bloch
3296db202995SMark Bloch mlx5_esw_for_each_rep(esw, i, rep) {
3297db202995SMark Bloch if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
3298db202995SMark Bloch mlx5_esw_offloads_rep_load(esw, rep->vport);
3299db202995SMark Bloch }
3300db202995SMark Bloch
3301db202995SMark Bloch return 0;
3302db202995SMark Bloch }
3303db202995SMark Bloch
esw_offloads_steering_init(struct mlx5_eswitch * esw)3304062f4bf4SBodong Wang static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
33056ed1803aSMark Bloch {
330634ca6535SVlad Buslov struct mlx5_esw_indir_table *indir;
3307c930a3adSOr Gerlitz int err;
3308c930a3adSOr Gerlitz
33095c1d260eSRoi Dayan memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
3310f8d1eddaSParav Pandit mutex_init(&esw->fdb_table.offloads.vports.lock);
3311f8d1eddaSParav Pandit hash_init(esw->fdb_table.offloads.vports.table);
33127dc84de9SRoi Dayan atomic64_set(&esw->user_count, 0);
3313e52c2802SPaul Blakey
331434ca6535SVlad Buslov indir = mlx5_esw_indir_table_init();
331534ca6535SVlad Buslov if (IS_ERR(indir)) {
331634ca6535SVlad Buslov err = PTR_ERR(indir);
331734ca6535SVlad Buslov goto create_indir_err;
331834ca6535SVlad Buslov }
331934ca6535SVlad Buslov esw->fdb_table.offloads.indir = indir;
332034ca6535SVlad Buslov
332134413460SBodong Wang err = esw_create_offloads_acl_tables(esw);
332218486737SEli Britstein if (err)
3323f8d1eddaSParav Pandit goto create_acl_err;
332418486737SEli Britstein
33258d6bd3c3SParav Pandit err = esw_create_offloads_table(esw);
332611b717d6SPaul Blakey if (err)
332711b717d6SPaul Blakey goto create_offloads_err;
332811b717d6SPaul Blakey
332911b717d6SPaul Blakey err = esw_create_restore_table(esw);
333011b717d6SPaul Blakey if (err)
333111b717d6SPaul Blakey goto create_restore_err;
333211b717d6SPaul Blakey
33330da3c12dSParav Pandit err = esw_create_offloads_fdb_tables(esw);
3334c930a3adSOr Gerlitz if (err)
33357445cfb1SJianbo Liu goto create_fdb_err;
3336c930a3adSOr Gerlitz
33378d6bd3c3SParav Pandit err = esw_create_vport_rx_group(esw);
3338c930a3adSOr Gerlitz if (err)
3339c930a3adSOr Gerlitz goto create_fg_err;
3340c930a3adSOr Gerlitz
33418ea7bcf6SJianbo Liu err = esw_create_vport_rx_drop_group(esw);
33428ea7bcf6SJianbo Liu if (err)
33438ea7bcf6SJianbo Liu goto create_rx_drop_fg_err;
33448ea7bcf6SJianbo Liu
33458ea7bcf6SJianbo Liu err = esw_create_vport_rx_drop_rule(esw);
33468ea7bcf6SJianbo Liu if (err)
33478ea7bcf6SJianbo Liu goto create_rx_drop_rule_err;
33488ea7bcf6SJianbo Liu
3349eca8cc38SBodong Wang return 0;
3350eca8cc38SBodong Wang
33518ea7bcf6SJianbo Liu create_rx_drop_rule_err:
33528ea7bcf6SJianbo Liu esw_destroy_vport_rx_drop_group(esw);
33538ea7bcf6SJianbo Liu create_rx_drop_fg_err:
33548ea7bcf6SJianbo Liu esw_destroy_vport_rx_group(esw);
3355eca8cc38SBodong Wang create_fg_err:
3356eca8cc38SBodong Wang esw_destroy_offloads_fdb_tables(esw);
33577445cfb1SJianbo Liu create_fdb_err:
335811b717d6SPaul Blakey esw_destroy_restore_table(esw);
335911b717d6SPaul Blakey create_restore_err:
336011b717d6SPaul Blakey esw_destroy_offloads_table(esw);
336111b717d6SPaul Blakey create_offloads_err:
336234413460SBodong Wang esw_destroy_offloads_acl_tables(esw);
3363f8d1eddaSParav Pandit create_acl_err:
336434ca6535SVlad Buslov mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
336534ca6535SVlad Buslov create_indir_err:
3366f8d1eddaSParav Pandit mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3367eca8cc38SBodong Wang return err;
3368eca8cc38SBodong Wang }
3369eca8cc38SBodong Wang
esw_offloads_steering_cleanup(struct mlx5_eswitch * esw)3370eca8cc38SBodong Wang static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
3371eca8cc38SBodong Wang {
33728ea7bcf6SJianbo Liu esw_destroy_vport_rx_drop_rule(esw);
33738ea7bcf6SJianbo Liu esw_destroy_vport_rx_drop_group(esw);
3374eca8cc38SBodong Wang esw_destroy_vport_rx_group(esw);
3375eca8cc38SBodong Wang esw_destroy_offloads_fdb_tables(esw);
337611b717d6SPaul Blakey esw_destroy_restore_table(esw);
337711b717d6SPaul Blakey esw_destroy_offloads_table(esw);
337834413460SBodong Wang esw_destroy_offloads_acl_tables(esw);
337934ca6535SVlad Buslov mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3380f8d1eddaSParav Pandit mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3381eca8cc38SBodong Wang }
3382eca8cc38SBodong Wang
33837e736f9aSParav Pandit static void
esw_vfs_changed_event_handler(struct mlx5_eswitch * esw,const u32 * out)33847e736f9aSParav Pandit esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
3385a3888f33SBodong Wang {
3386f1bc646cSMoshe Shemesh struct devlink *devlink;
33877e736f9aSParav Pandit bool host_pf_disabled;
33887e736f9aSParav Pandit u16 new_num_vfs;
3389a3888f33SBodong Wang
33907e736f9aSParav Pandit new_num_vfs = MLX5_GET(query_esw_functions_out, out,
339110ee82ceSBodong Wang host_params_context.host_num_of_vfs);
33927e736f9aSParav Pandit host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
33937e736f9aSParav Pandit host_params_context.host_pf_disabled);
33947e736f9aSParav Pandit
33957e736f9aSParav Pandit if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
33967e736f9aSParav Pandit return;
3397a3888f33SBodong Wang
3398f1bc646cSMoshe Shemesh devlink = priv_to_devlink(esw->dev);
3399f1bc646cSMoshe Shemesh devl_lock(devlink);
3400a3888f33SBodong Wang /* Number of VFs can only change from "0 to x" or "x to 0". */
3401cd56f929SVu Pham if (esw->esw_funcs.num_vfs > 0) {
340223bb50cfSBodong Wang mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
3403a3888f33SBodong Wang } else {
34047e736f9aSParav Pandit int err;
3405a3888f33SBodong Wang
340623bb50cfSBodong Wang err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
340723bb50cfSBodong Wang MLX5_VPORT_UC_ADDR_CHANGE);
3408b868c8feSDan Carpenter if (err) {
3409b868c8feSDan Carpenter devl_unlock(devlink);
34107e736f9aSParav Pandit return;
34117e736f9aSParav Pandit }
3412b868c8feSDan Carpenter }
34137e736f9aSParav Pandit esw->esw_funcs.num_vfs = new_num_vfs;
3414f1bc646cSMoshe Shemesh devl_unlock(devlink);
3415a3888f33SBodong Wang }
3416a3888f33SBodong Wang
esw_functions_changed_event_handler(struct work_struct * work)3417eca8cc38SBodong Wang static void esw_functions_changed_event_handler(struct work_struct *work)
3418a3888f33SBodong Wang {
3419a3888f33SBodong Wang struct mlx5_host_work *host_work;
3420a3888f33SBodong Wang struct mlx5_eswitch *esw;
3421dd28087cSParav Pandit const u32 *out;
3422a3888f33SBodong Wang
3423a3888f33SBodong Wang host_work = container_of(work, struct mlx5_host_work, work);
3424a3888f33SBodong Wang esw = host_work->esw;
3425a3888f33SBodong Wang
3426dd28087cSParav Pandit out = mlx5_esw_query_functions(esw->dev);
3427dd28087cSParav Pandit if (IS_ERR(out))
3428a3888f33SBodong Wang goto out;
3429a3888f33SBodong Wang
34307e736f9aSParav Pandit esw_vfs_changed_event_handler(esw, out);
3431dd28087cSParav Pandit kvfree(out);
3432a3888f33SBodong Wang out:
3433a3888f33SBodong Wang kfree(host_work);
3434a3888f33SBodong Wang }
3435a3888f33SBodong Wang
mlx5_esw_funcs_changed_handler(struct notifier_block * nb,unsigned long type,void * data)343616fff98aSBodong Wang int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
3437a3888f33SBodong Wang {
3438cd56f929SVu Pham struct mlx5_esw_functions *esw_funcs;
3439a3888f33SBodong Wang struct mlx5_host_work *host_work;
3440a3888f33SBodong Wang struct mlx5_eswitch *esw;
3441a3888f33SBodong Wang
3442a3888f33SBodong Wang host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
3443a3888f33SBodong Wang if (!host_work)
3444a3888f33SBodong Wang return NOTIFY_DONE;
3445a3888f33SBodong Wang
3446cd56f929SVu Pham esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
3447cd56f929SVu Pham esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
3448a3888f33SBodong Wang
3449a3888f33SBodong Wang host_work->esw = esw;
3450a3888f33SBodong Wang
3451062f4bf4SBodong Wang INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
3452a3888f33SBodong Wang queue_work(esw->work_queue, &host_work->work);
3453a3888f33SBodong Wang
3454a3888f33SBodong Wang return NOTIFY_OK;
3455a3888f33SBodong Wang }
3456a3888f33SBodong Wang
mlx5_esw_host_number_init(struct mlx5_eswitch * esw)3457a53cf949SParav Pandit static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
3458a53cf949SParav Pandit {
3459a53cf949SParav Pandit const u32 *query_host_out;
3460a53cf949SParav Pandit
3461a53cf949SParav Pandit if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3462a53cf949SParav Pandit return 0;
3463a53cf949SParav Pandit
3464a53cf949SParav Pandit query_host_out = mlx5_esw_query_functions(esw->dev);
3465a53cf949SParav Pandit if (IS_ERR(query_host_out))
3466a53cf949SParav Pandit return PTR_ERR(query_host_out);
3467a53cf949SParav Pandit
3468a53cf949SParav Pandit /* Mark non local controller with non zero controller number. */
3469a53cf949SParav Pandit esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
3470a53cf949SParav Pandit host_params_context.host_number);
3471a53cf949SParav Pandit kvfree(query_host_out);
3472a53cf949SParav Pandit return 0;
3473a53cf949SParav Pandit }
3474a53cf949SParav Pandit
mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch * esw,u32 controller)3475f1b9acd3SParav Pandit bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
3476f1b9acd3SParav Pandit {
3477f1b9acd3SParav Pandit /* Local controller is always valid */
3478f1b9acd3SParav Pandit if (controller == 0)
3479f1b9acd3SParav Pandit return true;
3480f1b9acd3SParav Pandit
3481f1b9acd3SParav Pandit if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3482f1b9acd3SParav Pandit return false;
3483f1b9acd3SParav Pandit
3484f1b9acd3SParav Pandit /* External host number starts with zero in device */
3485f1b9acd3SParav Pandit return (controller == esw->offloads.host_number + 1);
3486f1b9acd3SParav Pandit }
3487f1b9acd3SParav Pandit
esw_offloads_enable(struct mlx5_eswitch * esw)34885896b972SParav Pandit int esw_offloads_enable(struct mlx5_eswitch *esw)
3489eca8cc38SBodong Wang {
3490c9355682SChris Mi struct mapping_ctx *reg_c0_obj_pool;
34913b83b6c2SDmytro Linkin struct mlx5_vport *vport;
349247dd7e60SParav Pandit unsigned long i;
34932198b932SRoi Dayan u64 mapping_id;
349447dd7e60SParav Pandit int err;
3495eca8cc38SBodong Wang
34962bb72e7eSParav Pandit mutex_init(&esw->offloads.termtbl_mutex);
34978463daf1SMaor Gottlieb mlx5_rdma_enable_roce(esw->dev);
3498eca8cc38SBodong Wang
3499a53cf949SParav Pandit err = mlx5_esw_host_number_init(esw);
3500a53cf949SParav Pandit if (err)
3501cd1ef966SVu Pham goto err_metadata;
3502a53cf949SParav Pandit
3503fc99c3d6SVu Pham err = esw_offloads_metadata_init(esw);
3504fc99c3d6SVu Pham if (err)
3505fc99c3d6SVu Pham goto err_metadata;
3506fc99c3d6SVu Pham
3507332bd3a5SParav Pandit err = esw_set_passing_vport_metadata(esw, true);
3508c1286050SJianbo Liu if (err)
3509c1286050SJianbo Liu goto err_vport_metadata;
3510c1286050SJianbo Liu
35112198b932SRoi Dayan mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
35122198b932SRoi Dayan
35132198b932SRoi Dayan reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
35142198b932SRoi Dayan sizeof(struct mlx5_mapped_obj),
3515c9355682SChris Mi ESW_REG_C0_USER_DATA_METADATA_MASK,
3516c9355682SChris Mi true);
35172198b932SRoi Dayan
3518c9355682SChris Mi if (IS_ERR(reg_c0_obj_pool)) {
3519c9355682SChris Mi err = PTR_ERR(reg_c0_obj_pool);
3520c9355682SChris Mi goto err_pool;
3521c9355682SChris Mi }
3522c9355682SChris Mi esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
3523c9355682SChris Mi
35247983a675SPaul Blakey err = esw_offloads_steering_init(esw);
35257983a675SPaul Blakey if (err)
35267983a675SPaul Blakey goto err_steering_init;
35277983a675SPaul Blakey
35283b83b6c2SDmytro Linkin /* Representor will control the vport link state */
35293b83b6c2SDmytro Linkin mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
35303b83b6c2SDmytro Linkin vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3531a7719b29SDaniel Jurgens if (mlx5_core_ec_sriov_enabled(esw->dev))
3532a7719b29SDaniel Jurgens mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs)
3533a7719b29SDaniel Jurgens vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
35343b83b6c2SDmytro Linkin
3535c2d7712cSBodong Wang /* Uplink vport rep must load first. */
3536ba3d85f0SJiri Pirko err = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3537c2d7712cSBodong Wang if (err)
3538c2d7712cSBodong Wang goto err_uplink;
3539c2d7712cSBodong Wang
3540925a6accSParav Pandit err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
3541925a6accSParav Pandit if (err)
3542925a6accSParav Pandit goto err_vports;
3543c930a3adSOr Gerlitz
3544c930a3adSOr Gerlitz return 0;
3545c930a3adSOr Gerlitz
3546925a6accSParav Pandit err_vports:
3547ba3d85f0SJiri Pirko mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
3548c2d7712cSBodong Wang err_uplink:
35497983a675SPaul Blakey esw_offloads_steering_cleanup(esw);
355079949985SParav Pandit err_steering_init:
3551c9355682SChris Mi mapping_destroy(reg_c0_obj_pool);
3552c9355682SChris Mi err_pool:
355379949985SParav Pandit esw_set_passing_vport_metadata(esw, false);
35547983a675SPaul Blakey err_vport_metadata:
3555fc99c3d6SVu Pham esw_offloads_metadata_uninit(esw);
3556fc99c3d6SVu Pham err_metadata:
35578463daf1SMaor Gottlieb mlx5_rdma_disable_roce(esw->dev);
35582bb72e7eSParav Pandit mutex_destroy(&esw->offloads.termtbl_mutex);
3559c930a3adSOr Gerlitz return err;
3560c930a3adSOr Gerlitz }
3561c930a3adSOr Gerlitz
esw_offloads_stop(struct mlx5_eswitch * esw,struct netlink_ext_ack * extack)3562db7ff19eSEli Britstein static int esw_offloads_stop(struct mlx5_eswitch *esw,
3563db7ff19eSEli Britstein struct netlink_ext_ack *extack)
3564c930a3adSOr Gerlitz {
3565e12de39cSChris Mi int err;
3566c930a3adSOr Gerlitz
3567b6f2846aSChris Mi esw->mode = MLX5_ESWITCH_LEGACY;
35682318b8bbSChris Mi
35692318b8bbSChris Mi /* If changing from switchdev to legacy mode without sriov enabled,
35702318b8bbSChris Mi * no need to create legacy fdb.
35712318b8bbSChris Mi */
3572bea416c7SRoi Dayan if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev))
35732318b8bbSChris Mi return 0;
35742318b8bbSChris Mi
3575b6f2846aSChris Mi err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
3576e12de39cSChris Mi if (err)
35778c98ee77SEli Britstein NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
3578c930a3adSOr Gerlitz
3579c930a3adSOr Gerlitz return err;
3580c930a3adSOr Gerlitz }
3581c930a3adSOr Gerlitz
esw_offloads_disable(struct mlx5_eswitch * esw)35825896b972SParav Pandit void esw_offloads_disable(struct mlx5_eswitch *esw)
3583c930a3adSOr Gerlitz {
35845896b972SParav Pandit mlx5_eswitch_disable_pf_vf_vports(esw);
3585ba3d85f0SJiri Pirko mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
3586332bd3a5SParav Pandit esw_set_passing_vport_metadata(esw, false);
3587eca8cc38SBodong Wang esw_offloads_steering_cleanup(esw);
3588c9355682SChris Mi mapping_destroy(esw->offloads.reg_c0_obj_pool);
3589fc99c3d6SVu Pham esw_offloads_metadata_uninit(esw);
35908463daf1SMaor Gottlieb mlx5_rdma_disable_roce(esw->dev);
35912bb72e7eSParav Pandit mutex_destroy(&esw->offloads.termtbl_mutex);
3592c930a3adSOr Gerlitz }
3593c930a3adSOr Gerlitz
esw_mode_from_devlink(u16 mode,u16 * mlx5_mode)3594ef78618bSOr Gerlitz static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
3595c930a3adSOr Gerlitz {
3596c930a3adSOr Gerlitz switch (mode) {
3597c930a3adSOr Gerlitz case DEVLINK_ESWITCH_MODE_LEGACY:
3598f6455de0SBodong Wang *mlx5_mode = MLX5_ESWITCH_LEGACY;
3599c930a3adSOr Gerlitz break;
3600c930a3adSOr Gerlitz case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3601f6455de0SBodong Wang *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
3602c930a3adSOr Gerlitz break;
3603c930a3adSOr Gerlitz default:
3604c930a3adSOr Gerlitz return -EINVAL;
3605c930a3adSOr Gerlitz }
3606c930a3adSOr Gerlitz
3607c930a3adSOr Gerlitz return 0;
3608c930a3adSOr Gerlitz }
3609c930a3adSOr Gerlitz
esw_mode_to_devlink(u16 mlx5_mode,u16 * mode)3610ef78618bSOr Gerlitz static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
3611ef78618bSOr Gerlitz {
3612ef78618bSOr Gerlitz switch (mlx5_mode) {
3613f6455de0SBodong Wang case MLX5_ESWITCH_LEGACY:
3614ef78618bSOr Gerlitz *mode = DEVLINK_ESWITCH_MODE_LEGACY;
3615ef78618bSOr Gerlitz break;
3616f6455de0SBodong Wang case MLX5_ESWITCH_OFFLOADS:
3617ef78618bSOr Gerlitz *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
3618ef78618bSOr Gerlitz break;
3619ef78618bSOr Gerlitz default:
3620ef78618bSOr Gerlitz return -EINVAL;
3621ef78618bSOr Gerlitz }
3622ef78618bSOr Gerlitz
3623ef78618bSOr Gerlitz return 0;
3624ef78618bSOr Gerlitz }
3625ef78618bSOr Gerlitz
esw_inline_mode_from_devlink(u8 mode,u8 * mlx5_mode)3626bffaa916SRoi Dayan static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
3627bffaa916SRoi Dayan {
3628bffaa916SRoi Dayan switch (mode) {
3629bffaa916SRoi Dayan case DEVLINK_ESWITCH_INLINE_MODE_NONE:
3630bffaa916SRoi Dayan *mlx5_mode = MLX5_INLINE_MODE_NONE;
3631bffaa916SRoi Dayan break;
3632bffaa916SRoi Dayan case DEVLINK_ESWITCH_INLINE_MODE_LINK:
3633bffaa916SRoi Dayan *mlx5_mode = MLX5_INLINE_MODE_L2;
3634bffaa916SRoi Dayan break;
3635bffaa916SRoi Dayan case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
3636bffaa916SRoi Dayan *mlx5_mode = MLX5_INLINE_MODE_IP;
3637bffaa916SRoi Dayan break;
3638bffaa916SRoi Dayan case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
3639bffaa916SRoi Dayan *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
3640bffaa916SRoi Dayan break;
3641bffaa916SRoi Dayan default:
3642bffaa916SRoi Dayan return -EINVAL;
3643bffaa916SRoi Dayan }
3644bffaa916SRoi Dayan
3645bffaa916SRoi Dayan return 0;
3646bffaa916SRoi Dayan }
3647bffaa916SRoi Dayan
esw_inline_mode_to_devlink(u8 mlx5_mode,u8 * mode)3648bffaa916SRoi Dayan static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
3649bffaa916SRoi Dayan {
3650bffaa916SRoi Dayan switch (mlx5_mode) {
3651bffaa916SRoi Dayan case MLX5_INLINE_MODE_NONE:
3652bffaa916SRoi Dayan *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
3653bffaa916SRoi Dayan break;
3654bffaa916SRoi Dayan case MLX5_INLINE_MODE_L2:
3655bffaa916SRoi Dayan *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
3656bffaa916SRoi Dayan break;
3657bffaa916SRoi Dayan case MLX5_INLINE_MODE_IP:
3658bffaa916SRoi Dayan *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
3659bffaa916SRoi Dayan break;
3660bffaa916SRoi Dayan case MLX5_INLINE_MODE_TCP_UDP:
3661bffaa916SRoi Dayan *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
3662bffaa916SRoi Dayan break;
3663bffaa916SRoi Dayan default:
3664bffaa916SRoi Dayan return -EINVAL;
3665bffaa916SRoi Dayan }
3666bffaa916SRoi Dayan
3667bffaa916SRoi Dayan return 0;
3668bffaa916SRoi Dayan }
3669bffaa916SRoi Dayan
mlx5_eswitch_block_mode(struct mlx5_core_dev * dev)3670e2537341SLeon Romanovsky int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
3671366e4624SJianbo Liu {
3672e2537341SLeon Romanovsky struct mlx5_eswitch *esw = dev->priv.eswitch;
3673366e4624SJianbo Liu int err;
3674366e4624SJianbo Liu
3675e2537341SLeon Romanovsky if (!mlx5_esw_allowed(esw))
3676366e4624SJianbo Liu return 0;
3677366e4624SJianbo Liu
3678e2537341SLeon Romanovsky /* Take TC into account */
3679366e4624SJianbo Liu err = mlx5_esw_try_lock(esw);
3680e2537341SLeon Romanovsky if (err < 0)
3681366e4624SJianbo Liu return err;
3682366e4624SJianbo Liu
3683366e4624SJianbo Liu esw->offloads.num_block_mode++;
3684366e4624SJianbo Liu mlx5_esw_unlock(esw);
3685e2537341SLeon Romanovsky return 0;
3686366e4624SJianbo Liu }
3687366e4624SJianbo Liu
mlx5_eswitch_unblock_mode(struct mlx5_core_dev * dev)3688e2537341SLeon Romanovsky void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
3689366e4624SJianbo Liu {
3690e2537341SLeon Romanovsky struct mlx5_eswitch *esw = dev->priv.eswitch;
3691366e4624SJianbo Liu
3692e2537341SLeon Romanovsky if (!mlx5_esw_allowed(esw))
3693366e4624SJianbo Liu return;
3694366e4624SJianbo Liu
3695366e4624SJianbo Liu down_write(&esw->mode_lock);
3696366e4624SJianbo Liu esw->offloads.num_block_mode--;
3697366e4624SJianbo Liu up_write(&esw->mode_lock);
3698366e4624SJianbo Liu }
3699366e4624SJianbo Liu
mlx5_devlink_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)3700db7ff19eSEli Britstein int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
3701db7ff19eSEli Britstein struct netlink_ext_ack *extack)
37029d1cef19SOr Gerlitz {
37039d1cef19SOr Gerlitz u16 cur_mlx5_mode, mlx5_mode = 0;
3704bd939753SParav Pandit struct mlx5_eswitch *esw;
3705ea2128fdSParav Pandit int err = 0;
37069d1cef19SOr Gerlitz
3707bd939753SParav Pandit esw = mlx5_devlink_eswitch_get(devlink);
3708bd939753SParav Pandit if (IS_ERR(esw))
3709bd939753SParav Pandit return PTR_ERR(esw);
37109d1cef19SOr Gerlitz
3711ef78618bSOr Gerlitz if (esw_mode_from_devlink(mode, &mlx5_mode))
3712c930a3adSOr Gerlitz return -EINVAL;
3713c930a3adSOr Gerlitz
3714cac1eb2cSMark Bloch mlx5_lag_disable_change(esw->dev);
37157dc84de9SRoi Dayan err = mlx5_esw_try_lock(esw);
37167dc84de9SRoi Dayan if (err < 0) {
37177dc84de9SRoi Dayan NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
3718cac1eb2cSMark Bloch goto enable_lag;
37197dc84de9SRoi Dayan }
37207dc84de9SRoi Dayan cur_mlx5_mode = err;
37217dc84de9SRoi Dayan err = 0;
37227dc84de9SRoi Dayan
3723c930a3adSOr Gerlitz if (cur_mlx5_mode == mlx5_mode)
37248e0aa4bcSParav Pandit goto unlock;
3725c930a3adSOr Gerlitz
3726366e4624SJianbo Liu if (esw->offloads.num_block_mode) {
3727366e4624SJianbo Liu NL_SET_ERR_MSG_MOD(extack,
3728366e4624SJianbo Liu "Can't change eswitch mode when IPsec SA and/or policies are configured");
3729366e4624SJianbo Liu err = -EOPNOTSUPP;
3730366e4624SJianbo Liu goto unlock;
3731366e4624SJianbo Liu }
3732366e4624SJianbo Liu
3733594a3064SJianbo Liu esw->eswitch_operation_in_progress = true;
3734594a3064SJianbo Liu up_write(&esw->mode_lock);
3735594a3064SJianbo Liu
3736f019679eSChris Mi mlx5_eswitch_disable_locked(esw);
3737c85a6b8fSAya Levin if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
3738c85a6b8fSAya Levin if (mlx5_devlink_trap_get_num_active(esw->dev)) {
3739c85a6b8fSAya Levin NL_SET_ERR_MSG_MOD(extack,
3740c85a6b8fSAya Levin "Can't change mode while devlink traps are active");
3741c85a6b8fSAya Levin err = -EOPNOTSUPP;
3742594a3064SJianbo Liu goto skip;
3743c85a6b8fSAya Levin }
37448e0aa4bcSParav Pandit err = esw_offloads_start(esw, extack);
3745c85a6b8fSAya Levin } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
37468e0aa4bcSParav Pandit err = esw_offloads_stop(esw, extack);
3747f019679eSChris Mi mlx5_rescan_drivers(esw->dev);
3748c85a6b8fSAya Levin } else {
37498e0aa4bcSParav Pandit err = -EINVAL;
3750c85a6b8fSAya Levin }
37518e0aa4bcSParav Pandit
3752594a3064SJianbo Liu skip:
3753594a3064SJianbo Liu down_write(&esw->mode_lock);
3754594a3064SJianbo Liu esw->eswitch_operation_in_progress = false;
37558e0aa4bcSParav Pandit unlock:
37567dc84de9SRoi Dayan mlx5_esw_unlock(esw);
3757cac1eb2cSMark Bloch enable_lag:
3758cac1eb2cSMark Bloch mlx5_lag_enable_change(esw->dev);
37598e0aa4bcSParav Pandit return err;
3760feae9087SOr Gerlitz }
3761feae9087SOr Gerlitz
mlx5_devlink_eswitch_mode_get(struct devlink * devlink,u16 * mode)3762feae9087SOr Gerlitz int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3763feae9087SOr Gerlitz {
3764bd939753SParav Pandit struct mlx5_eswitch *esw;
3765c930a3adSOr Gerlitz
3766bd939753SParav Pandit esw = mlx5_devlink_eswitch_get(devlink);
3767bd939753SParav Pandit if (IS_ERR(esw))
3768bd939753SParav Pandit return PTR_ERR(esw);
3769c930a3adSOr Gerlitz
3770594a3064SJianbo Liu return esw_mode_to_devlink(esw->mode, mode);
3771feae9087SOr Gerlitz }
3772127ea380SHadar Hen Zion
mlx5_esw_vports_inline_set(struct mlx5_eswitch * esw,u8 mlx5_mode,struct netlink_ext_ack * extack)377347dd7e60SParav Pandit static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
377447dd7e60SParav Pandit struct netlink_ext_ack *extack)
377547dd7e60SParav Pandit {
377647dd7e60SParav Pandit struct mlx5_core_dev *dev = esw->dev;
377747dd7e60SParav Pandit struct mlx5_vport *vport;
377847dd7e60SParav Pandit u16 err_vport_num = 0;
377947dd7e60SParav Pandit unsigned long i;
378047dd7e60SParav Pandit int err = 0;
378147dd7e60SParav Pandit
378247dd7e60SParav Pandit mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
378347dd7e60SParav Pandit err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
378447dd7e60SParav Pandit if (err) {
378547dd7e60SParav Pandit err_vport_num = vport->vport;
378647dd7e60SParav Pandit NL_SET_ERR_MSG_MOD(extack,
378747dd7e60SParav Pandit "Failed to set min inline on vport");
378847dd7e60SParav Pandit goto revert_inline_mode;
378947dd7e60SParav Pandit }
379047dd7e60SParav Pandit }
3791a7719b29SDaniel Jurgens if (mlx5_core_ec_sriov_enabled(esw->dev)) {
3792a7719b29SDaniel Jurgens mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
3793a7719b29SDaniel Jurgens err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3794a7719b29SDaniel Jurgens if (err) {
3795a7719b29SDaniel Jurgens err_vport_num = vport->vport;
3796a7719b29SDaniel Jurgens NL_SET_ERR_MSG_MOD(extack,
3797a7719b29SDaniel Jurgens "Failed to set min inline on vport");
3798a7719b29SDaniel Jurgens goto revert_ec_vf_inline_mode;
3799a7719b29SDaniel Jurgens }
3800a7719b29SDaniel Jurgens }
3801a7719b29SDaniel Jurgens }
380247dd7e60SParav Pandit return 0;
380347dd7e60SParav Pandit
3804a7719b29SDaniel Jurgens revert_ec_vf_inline_mode:
3805a7719b29SDaniel Jurgens mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
3806a7719b29SDaniel Jurgens if (vport->vport == err_vport_num)
3807a7719b29SDaniel Jurgens break;
3808a7719b29SDaniel Jurgens mlx5_modify_nic_vport_min_inline(dev,
3809a7719b29SDaniel Jurgens vport->vport,
3810a7719b29SDaniel Jurgens esw->offloads.inline_mode);
3811a7719b29SDaniel Jurgens }
381247dd7e60SParav Pandit revert_inline_mode:
381347dd7e60SParav Pandit mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
381447dd7e60SParav Pandit if (vport->vport == err_vport_num)
381547dd7e60SParav Pandit break;
381647dd7e60SParav Pandit mlx5_modify_nic_vport_min_inline(dev,
381747dd7e60SParav Pandit vport->vport,
381847dd7e60SParav Pandit esw->offloads.inline_mode);
381947dd7e60SParav Pandit }
382047dd7e60SParav Pandit return err;
382147dd7e60SParav Pandit }
382247dd7e60SParav Pandit
mlx5_devlink_eswitch_inline_mode_set(struct devlink * devlink,u8 mode,struct netlink_ext_ack * extack)3823db7ff19eSEli Britstein int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3824db7ff19eSEli Britstein struct netlink_ext_ack *extack)
3825bffaa916SRoi Dayan {
3826bffaa916SRoi Dayan struct mlx5_core_dev *dev = devlink_priv(devlink);
3827bd939753SParav Pandit struct mlx5_eswitch *esw;
3828bffaa916SRoi Dayan u8 mlx5_mode;
382947dd7e60SParav Pandit int err;
3830bffaa916SRoi Dayan
3831bd939753SParav Pandit esw = mlx5_devlink_eswitch_get(devlink);
3832bd939753SParav Pandit if (IS_ERR(esw))
3833bd939753SParav Pandit return PTR_ERR(esw);
3834bffaa916SRoi Dayan
3835367dfa12SMoshe Shemesh down_write(&esw->mode_lock);
3836ae24432cSParav Pandit
3837c415f704SOr Gerlitz switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
3838c415f704SOr Gerlitz case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
3839bcd68c04SJiapeng Chong if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
3840bcd68c04SJiapeng Chong err = 0;
38418e0aa4bcSParav Pandit goto out;
3842bcd68c04SJiapeng Chong }
3843bcd68c04SJiapeng Chong
3844c8b838d1SGustavo A. R. Silva fallthrough;
3845c415f704SOr Gerlitz case MLX5_CAP_INLINE_MODE_L2:
38468c98ee77SEli Britstein NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
38478e0aa4bcSParav Pandit err = -EOPNOTSUPP;
38488e0aa4bcSParav Pandit goto out;
3849c415f704SOr Gerlitz case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3850c415f704SOr Gerlitz break;
3851c415f704SOr Gerlitz }
3852bffaa916SRoi Dayan
3853525e84beSVlad Buslov if (atomic64_read(&esw->offloads.num_flows) > 0) {
38548c98ee77SEli Britstein NL_SET_ERR_MSG_MOD(extack,
38558c98ee77SEli Britstein "Can't set inline mode when flows are configured");
38568e0aa4bcSParav Pandit err = -EOPNOTSUPP;
38578e0aa4bcSParav Pandit goto out;
3858375f51e2SRoi Dayan }
3859375f51e2SRoi Dayan
3860bffaa916SRoi Dayan err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
3861bffaa916SRoi Dayan if (err)
3862bffaa916SRoi Dayan goto out;
3863bffaa916SRoi Dayan
3864594a3064SJianbo Liu esw->eswitch_operation_in_progress = true;
3865594a3064SJianbo Liu up_write(&esw->mode_lock);
3866bffaa916SRoi Dayan
3867594a3064SJianbo Liu err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
3868594a3064SJianbo Liu if (!err)
3869bffaa916SRoi Dayan esw->offloads.inline_mode = mlx5_mode;
3870594a3064SJianbo Liu
3871594a3064SJianbo Liu down_write(&esw->mode_lock);
3872594a3064SJianbo Liu esw->eswitch_operation_in_progress = false;
3873367dfa12SMoshe Shemesh up_write(&esw->mode_lock);
3874bffaa916SRoi Dayan return 0;
3875bffaa916SRoi Dayan
3876bffaa916SRoi Dayan out:
3877367dfa12SMoshe Shemesh up_write(&esw->mode_lock);
3878bffaa916SRoi Dayan return err;
3879bffaa916SRoi Dayan }
3880bffaa916SRoi Dayan
mlx5_devlink_eswitch_inline_mode_get(struct devlink * devlink,u8 * mode)3881bffaa916SRoi Dayan int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3882bffaa916SRoi Dayan {
3883bd939753SParav Pandit struct mlx5_eswitch *esw;
3884bffaa916SRoi Dayan
3885bd939753SParav Pandit esw = mlx5_devlink_eswitch_get(devlink);
3886bd939753SParav Pandit if (IS_ERR(esw))
3887bd939753SParav Pandit return PTR_ERR(esw);
3888bffaa916SRoi Dayan
3889594a3064SJianbo Liu return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3890bffaa916SRoi Dayan }
3891bffaa916SRoi Dayan
mlx5_eswitch_block_encap(struct mlx5_core_dev * dev)3892acc10929SLeon Romanovsky bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
3893acc10929SLeon Romanovsky {
3894c46fb773SLeon Romanovsky struct mlx5_eswitch *esw = dev->priv.eswitch;
3895acc10929SLeon Romanovsky
3896c46fb773SLeon Romanovsky if (!mlx5_esw_allowed(esw))
3897acc10929SLeon Romanovsky return true;
3898acc10929SLeon Romanovsky
3899acc10929SLeon Romanovsky down_write(&esw->mode_lock);
3900acc10929SLeon Romanovsky if (esw->mode != MLX5_ESWITCH_LEGACY &&
3901acc10929SLeon Romanovsky esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
3902acc10929SLeon Romanovsky up_write(&esw->mode_lock);
3903acc10929SLeon Romanovsky return false;
3904acc10929SLeon Romanovsky }
3905acc10929SLeon Romanovsky
3906acc10929SLeon Romanovsky esw->offloads.num_block_encap++;
3907acc10929SLeon Romanovsky up_write(&esw->mode_lock);
3908acc10929SLeon Romanovsky return true;
3909acc10929SLeon Romanovsky }
3910acc10929SLeon Romanovsky
mlx5_eswitch_unblock_encap(struct mlx5_core_dev * dev)3911acc10929SLeon Romanovsky void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
3912acc10929SLeon Romanovsky {
3913c46fb773SLeon Romanovsky struct mlx5_eswitch *esw = dev->priv.eswitch;
3914acc10929SLeon Romanovsky
3915c46fb773SLeon Romanovsky if (!mlx5_esw_allowed(esw))
3916acc10929SLeon Romanovsky return;
3917acc10929SLeon Romanovsky
3918acc10929SLeon Romanovsky down_write(&esw->mode_lock);
3919acc10929SLeon Romanovsky esw->offloads.num_block_encap--;
3920acc10929SLeon Romanovsky up_write(&esw->mode_lock);
3921acc10929SLeon Romanovsky }
3922acc10929SLeon Romanovsky
mlx5_devlink_eswitch_encap_mode_set(struct devlink * devlink,enum devlink_eswitch_encap_mode encap,struct netlink_ext_ack * extack)392398fdbea5SLeon Romanovsky int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
392498fdbea5SLeon Romanovsky enum devlink_eswitch_encap_mode encap,
3925db7ff19eSEli Britstein struct netlink_ext_ack *extack)
39267768d197SRoi Dayan {
39277768d197SRoi Dayan struct mlx5_core_dev *dev = devlink_priv(devlink);
3928bd939753SParav Pandit struct mlx5_eswitch *esw;
3929f019679eSChris Mi int err = 0;
39307768d197SRoi Dayan
3931bd939753SParav Pandit esw = mlx5_devlink_eswitch_get(devlink);
3932bd939753SParav Pandit if (IS_ERR(esw))
3933bd939753SParav Pandit return PTR_ERR(esw);
39347768d197SRoi Dayan
3935367dfa12SMoshe Shemesh down_write(&esw->mode_lock);
3936ae24432cSParav Pandit
39377768d197SRoi Dayan if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
393860786f09SMark Bloch (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
39398e0aa4bcSParav Pandit !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
39408e0aa4bcSParav Pandit err = -EOPNOTSUPP;
39418e0aa4bcSParav Pandit goto unlock;
39428e0aa4bcSParav Pandit }
39437768d197SRoi Dayan
39448e0aa4bcSParav Pandit if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
39458e0aa4bcSParav Pandit err = -EOPNOTSUPP;
39468e0aa4bcSParav Pandit goto unlock;
39478e0aa4bcSParav Pandit }
39487768d197SRoi Dayan
3949f6455de0SBodong Wang if (esw->mode == MLX5_ESWITCH_LEGACY) {
39507768d197SRoi Dayan esw->offloads.encap = encap;
39518e0aa4bcSParav Pandit goto unlock;
39527768d197SRoi Dayan }
39537768d197SRoi Dayan
39547768d197SRoi Dayan if (esw->offloads.encap == encap)
39558e0aa4bcSParav Pandit goto unlock;
39567768d197SRoi Dayan
3957525e84beSVlad Buslov if (atomic64_read(&esw->offloads.num_flows) > 0) {
39588c98ee77SEli Britstein NL_SET_ERR_MSG_MOD(extack,
39598c98ee77SEli Britstein "Can't set encapsulation when flows are configured");
39608e0aa4bcSParav Pandit err = -EOPNOTSUPP;
39618e0aa4bcSParav Pandit goto unlock;
39627768d197SRoi Dayan }
39637768d197SRoi Dayan
3964acc10929SLeon Romanovsky if (esw->offloads.num_block_encap) {
3965acc10929SLeon Romanovsky NL_SET_ERR_MSG_MOD(extack,
3966acc10929SLeon Romanovsky "Can't set encapsulation when IPsec SA and/or policies are configured");
3967acc10929SLeon Romanovsky err = -EOPNOTSUPP;
3968acc10929SLeon Romanovsky goto unlock;
3969acc10929SLeon Romanovsky }
3970acc10929SLeon Romanovsky
3971594a3064SJianbo Liu esw->eswitch_operation_in_progress = true;
3972594a3064SJianbo Liu up_write(&esw->mode_lock);
3973594a3064SJianbo Liu
3974e52c2802SPaul Blakey esw_destroy_offloads_fdb_tables(esw);
39757768d197SRoi Dayan
39767768d197SRoi Dayan esw->offloads.encap = encap;
3977e52c2802SPaul Blakey
39780da3c12dSParav Pandit err = esw_create_offloads_fdb_tables(esw);
3979e52c2802SPaul Blakey
39807768d197SRoi Dayan if (err) {
39818c98ee77SEli Britstein NL_SET_ERR_MSG_MOD(extack,
39828c98ee77SEli Britstein "Failed re-creating fast FDB table");
39837768d197SRoi Dayan esw->offloads.encap = !encap;
39840da3c12dSParav Pandit (void)esw_create_offloads_fdb_tables(esw);
39857768d197SRoi Dayan }
3986e52c2802SPaul Blakey
3987594a3064SJianbo Liu down_write(&esw->mode_lock);
3988594a3064SJianbo Liu esw->eswitch_operation_in_progress = false;
3989594a3064SJianbo Liu
39908e0aa4bcSParav Pandit unlock:
3991367dfa12SMoshe Shemesh up_write(&esw->mode_lock);
39927768d197SRoi Dayan return err;
39937768d197SRoi Dayan }
39947768d197SRoi Dayan
mlx5_devlink_eswitch_encap_mode_get(struct devlink * devlink,enum devlink_eswitch_encap_mode * encap)399598fdbea5SLeon Romanovsky int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
399698fdbea5SLeon Romanovsky enum devlink_eswitch_encap_mode *encap)
39977768d197SRoi Dayan {
3998bd939753SParav Pandit struct mlx5_eswitch *esw;
39997768d197SRoi Dayan
4000bd939753SParav Pandit esw = mlx5_devlink_eswitch_get(devlink);
4001bd939753SParav Pandit if (IS_ERR(esw))
4002bd939753SParav Pandit return PTR_ERR(esw);
4003bd939753SParav Pandit
40047768d197SRoi Dayan *encap = esw->offloads.encap;
4005f019679eSChris Mi return 0;
40067768d197SRoi Dayan }
40077768d197SRoi Dayan
4008c2d7712cSBodong Wang static bool
mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch * esw,u16 vport_num)4009c2d7712cSBodong Wang mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
4010c2d7712cSBodong Wang {
4011c2d7712cSBodong Wang /* Currently, only ECPF based device has representor for host PF. */
4012c2d7712cSBodong Wang if (vport_num == MLX5_VPORT_PF &&
4013c2d7712cSBodong Wang !mlx5_core_is_ecpf_esw_manager(esw->dev))
4014c2d7712cSBodong Wang return false;
4015c2d7712cSBodong Wang
4016c2d7712cSBodong Wang if (vport_num == MLX5_VPORT_ECPF &&
4017c2d7712cSBodong Wang !mlx5_ecpf_vport_exists(esw->dev))
4018c2d7712cSBodong Wang return false;
4019c2d7712cSBodong Wang
4020c2d7712cSBodong Wang return true;
4021c2d7712cSBodong Wang }
4022c2d7712cSBodong Wang
mlx5_eswitch_register_vport_reps(struct mlx5_eswitch * esw,const struct mlx5_eswitch_rep_ops * ops,u8 rep_type)4023f8e8fa02SBodong Wang void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
40248693115aSParav Pandit const struct mlx5_eswitch_rep_ops *ops,
4025a4b97ab4SMark Bloch u8 rep_type)
4026127ea380SHadar Hen Zion {
40278693115aSParav Pandit struct mlx5_eswitch_rep_data *rep_data;
4028f8e8fa02SBodong Wang struct mlx5_eswitch_rep *rep;
402947dd7e60SParav Pandit unsigned long i;
4030cb67b832SHadar Hen Zion
40318693115aSParav Pandit esw->offloads.rep_ops[rep_type] = ops;
403247dd7e60SParav Pandit mlx5_esw_for_each_rep(esw, i, rep) {
403347dd7e60SParav Pandit if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
403459c904c8SMark Bloch rep->esw = esw;
40358693115aSParav Pandit rep_data = &rep->rep_data[rep_type];
40368693115aSParav Pandit atomic_set(&rep_data->state, REP_REGISTERED);
40379deb2241SOr Gerlitz }
4038f8e8fa02SBodong Wang }
4039c2d7712cSBodong Wang }
4040f8e8fa02SBodong Wang EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
40419deb2241SOr Gerlitz
mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch * esw,u8 rep_type)4042f8e8fa02SBodong Wang void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
40439deb2241SOr Gerlitz {
40449deb2241SOr Gerlitz struct mlx5_eswitch_rep *rep;
404547dd7e60SParav Pandit unsigned long i;
40469deb2241SOr Gerlitz
4047f6455de0SBodong Wang if (esw->mode == MLX5_ESWITCH_OFFLOADS)
4048062f4bf4SBodong Wang __unload_reps_all_vport(esw, rep_type);
40499deb2241SOr Gerlitz
405047dd7e60SParav Pandit mlx5_esw_for_each_rep(esw, i, rep)
40518693115aSParav Pandit atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
4052127ea380SHadar Hen Zion }
4053f8e8fa02SBodong Wang EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
4054726293f1SHadar Hen Zion
mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch * esw,u8 rep_type)4055a4b97ab4SMark Bloch void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
4056726293f1SHadar Hen Zion {
4057726293f1SHadar Hen Zion struct mlx5_eswitch_rep *rep;
4058726293f1SHadar Hen Zion
4059879c8f84SBodong Wang rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
40608693115aSParav Pandit return rep->rep_data[rep_type].priv;
4061726293f1SHadar Hen Zion }
406222215908SMark Bloch
mlx5_eswitch_get_proto_dev(struct mlx5_eswitch * esw,u16 vport,u8 rep_type)406322215908SMark Bloch void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
406402f3afd9SParav Pandit u16 vport,
406522215908SMark Bloch u8 rep_type)
406622215908SMark Bloch {
406722215908SMark Bloch struct mlx5_eswitch_rep *rep;
406822215908SMark Bloch
4069879c8f84SBodong Wang rep = mlx5_eswitch_get_rep(esw, vport);
407022215908SMark Bloch
40718693115aSParav Pandit if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
40728693115aSParav Pandit esw->offloads.rep_ops[rep_type]->get_proto_dev)
40738693115aSParav Pandit return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
407422215908SMark Bloch return NULL;
407522215908SMark Bloch }
407657cbd893SMark Bloch EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
407722215908SMark Bloch
mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch * esw,u8 rep_type)407822215908SMark Bloch void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
407922215908SMark Bloch {
4080879c8f84SBodong Wang return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
408122215908SMark Bloch }
408257cbd893SMark Bloch EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
408357cbd893SMark Bloch
mlx5_eswitch_vport_rep(struct mlx5_eswitch * esw,u16 vport)408457cbd893SMark Bloch struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
408502f3afd9SParav Pandit u16 vport)
408657cbd893SMark Bloch {
4087879c8f84SBodong Wang return mlx5_eswitch_get_rep(esw, vport);
408857cbd893SMark Bloch }
408957cbd893SMark Bloch EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
409091d6291cSParav Pandit
mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch * esw)40915b7cb745SPaul Blakey bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
40925b7cb745SPaul Blakey {
40935b7cb745SPaul Blakey return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
40945b7cb745SPaul Blakey }
40955b7cb745SPaul Blakey EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
40965b7cb745SPaul Blakey
mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch * esw)40977445cfb1SJianbo Liu bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
40987445cfb1SJianbo Liu {
40997445cfb1SJianbo Liu return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
41007445cfb1SJianbo Liu }
41017445cfb1SJianbo Liu EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
41027445cfb1SJianbo Liu
mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch * esw,u16 vport_num)41030f0d3827SPaul Blakey u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
41047445cfb1SJianbo Liu u16 vport_num)
41057445cfb1SJianbo Liu {
4106133dcfc5SVu Pham struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
41070f0d3827SPaul Blakey
4108133dcfc5SVu Pham if (WARN_ON_ONCE(IS_ERR(vport)))
4109133dcfc5SVu Pham return 0;
41100f0d3827SPaul Blakey
4111133dcfc5SVu Pham return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
41127445cfb1SJianbo Liu }
41137445cfb1SJianbo Liu EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
4114d970812bSParav Pandit
mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch * esw,u16 vport_num,u16 * vhca_id)411584ae9c1fSVlad Buslov static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
411684ae9c1fSVlad Buslov {
411784ae9c1fSVlad Buslov int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
411884ae9c1fSVlad Buslov void *query_ctx;
411984ae9c1fSVlad Buslov void *hca_caps;
412084ae9c1fSVlad Buslov int err;
412184ae9c1fSVlad Buslov
412284ae9c1fSVlad Buslov *vhca_id = 0;
412384ae9c1fSVlad Buslov
412484ae9c1fSVlad Buslov query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
412584ae9c1fSVlad Buslov if (!query_ctx)
412684ae9c1fSVlad Buslov return -ENOMEM;
412784ae9c1fSVlad Buslov
412847d0c500SShay Drory err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
412984ae9c1fSVlad Buslov if (err)
413084ae9c1fSVlad Buslov goto out_free;
413184ae9c1fSVlad Buslov
413284ae9c1fSVlad Buslov hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
413384ae9c1fSVlad Buslov *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
413484ae9c1fSVlad Buslov
413584ae9c1fSVlad Buslov out_free:
413684ae9c1fSVlad Buslov kfree(query_ctx);
413784ae9c1fSVlad Buslov return err;
413884ae9c1fSVlad Buslov }
413984ae9c1fSVlad Buslov
mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch * esw,u16 vport_num)414084ae9c1fSVlad Buslov int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
414184ae9c1fSVlad Buslov {
414284ae9c1fSVlad Buslov u16 *old_entry, *vhca_map_entry, vhca_id;
414384ae9c1fSVlad Buslov int err;
414484ae9c1fSVlad Buslov
414584ae9c1fSVlad Buslov err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
414684ae9c1fSVlad Buslov if (err) {
414784ae9c1fSVlad Buslov esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
414884ae9c1fSVlad Buslov vport_num, err);
414984ae9c1fSVlad Buslov return err;
415084ae9c1fSVlad Buslov }
415184ae9c1fSVlad Buslov
415284ae9c1fSVlad Buslov vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
415384ae9c1fSVlad Buslov if (!vhca_map_entry)
415484ae9c1fSVlad Buslov return -ENOMEM;
415584ae9c1fSVlad Buslov
415684ae9c1fSVlad Buslov *vhca_map_entry = vport_num;
415784ae9c1fSVlad Buslov old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
415884ae9c1fSVlad Buslov if (xa_is_err(old_entry)) {
415984ae9c1fSVlad Buslov kfree(vhca_map_entry);
416084ae9c1fSVlad Buslov return xa_err(old_entry);
416184ae9c1fSVlad Buslov }
416284ae9c1fSVlad Buslov kfree(old_entry);
416384ae9c1fSVlad Buslov return 0;
416484ae9c1fSVlad Buslov }
416584ae9c1fSVlad Buslov
mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch * esw,u16 vport_num)416684ae9c1fSVlad Buslov void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
416784ae9c1fSVlad Buslov {
416884ae9c1fSVlad Buslov u16 *vhca_map_entry, vhca_id;
416984ae9c1fSVlad Buslov int err;
417084ae9c1fSVlad Buslov
417184ae9c1fSVlad Buslov err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
417284ae9c1fSVlad Buslov if (err)
417384ae9c1fSVlad Buslov esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
417484ae9c1fSVlad Buslov vport_num, err);
417584ae9c1fSVlad Buslov
417684ae9c1fSVlad Buslov vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
417784ae9c1fSVlad Buslov kfree(vhca_map_entry);
417884ae9c1fSVlad Buslov }
417984ae9c1fSVlad Buslov
mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch * esw,u16 vhca_id,u16 * vport_num)418084ae9c1fSVlad Buslov int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
418184ae9c1fSVlad Buslov {
418284ae9c1fSVlad Buslov u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
418384ae9c1fSVlad Buslov
418484ae9c1fSVlad Buslov if (!res)
418584ae9c1fSVlad Buslov return -ENOENT;
418684ae9c1fSVlad Buslov
418784ae9c1fSVlad Buslov *vport_num = *res;
418884ae9c1fSVlad Buslov return 0;
418984ae9c1fSVlad Buslov }
419010742efcSVlad Buslov
mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch * esw,u16 vport_num)419110742efcSVlad Buslov u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
419210742efcSVlad Buslov u16 vport_num)
419310742efcSVlad Buslov {
419410742efcSVlad Buslov struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
419510742efcSVlad Buslov
419610742efcSVlad Buslov if (WARN_ON_ONCE(IS_ERR(vport)))
419710742efcSVlad Buslov return 0;
419810742efcSVlad Buslov
419910742efcSVlad Buslov return vport->metadata;
420010742efcSVlad Buslov }
420110742efcSVlad Buslov EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
4202e9d491a6SParav Pandit
mlx5_devlink_port_fn_hw_addr_get(struct devlink_port * port,u8 * hw_addr,int * hw_addr_len,struct netlink_ext_ack * extack)420371c93e37SJiri Pirko int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
4204e9d491a6SParav Pandit u8 *hw_addr, int *hw_addr_len,
4205e9d491a6SParav Pandit struct netlink_ext_ack *extack)
4206e9d491a6SParav Pandit {
42075c632cc3SJiri Pirko struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
42087d833520SJiri Pirko struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4209e9d491a6SParav Pandit
4210e9d491a6SParav Pandit mutex_lock(&esw->state_lock);
4211e9d491a6SParav Pandit ether_addr_copy(hw_addr, vport->info.mac);
4212e9d491a6SParav Pandit *hw_addr_len = ETH_ALEN;
4213e9d491a6SParav Pandit mutex_unlock(&esw->state_lock);
4214e9d491a6SParav Pandit return 0;
4215e9d491a6SParav Pandit }
4216e9d491a6SParav Pandit
mlx5_devlink_port_fn_hw_addr_set(struct devlink_port * port,const u8 * hw_addr,int hw_addr_len,struct netlink_ext_ack * extack)421771c93e37SJiri Pirko int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
4218e9d491a6SParav Pandit const u8 *hw_addr, int hw_addr_len,
4219e9d491a6SParav Pandit struct netlink_ext_ack *extack)
4220e9d491a6SParav Pandit {
42215c632cc3SJiri Pirko struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
42227d833520SJiri Pirko struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4223e9d491a6SParav Pandit
42247d833520SJiri Pirko return mlx5_eswitch_set_vport_mac(esw, vport->vport, hw_addr);
42257db98396SYishai Hadas }
42267db98396SYishai Hadas
mlx5_devlink_port_fn_migratable_get(struct devlink_port * port,bool * is_enabled,struct netlink_ext_ack * extack)4227e5b9642aSShay Drory int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
4228e5b9642aSShay Drory struct netlink_ext_ack *extack)
4229e5b9642aSShay Drory {
42305c632cc3SJiri Pirko struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
42317d833520SJiri Pirko struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4232e5b9642aSShay Drory
4233e5b9642aSShay Drory if (!MLX5_CAP_GEN(esw->dev, migration)) {
4234e5b9642aSShay Drory NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4235550449d8SJiri Pirko return -EOPNOTSUPP;
4236e5b9642aSShay Drory }
4237e5b9642aSShay Drory
4238eb555e34SJiri Pirko if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4239eb555e34SJiri Pirko NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
4240eb555e34SJiri Pirko return -EOPNOTSUPP;
4241eb555e34SJiri Pirko }
4242eb555e34SJiri Pirko
4243e5b9642aSShay Drory mutex_lock(&esw->state_lock);
4244e5b9642aSShay Drory *is_enabled = vport->info.mig_enabled;
4245e5b9642aSShay Drory mutex_unlock(&esw->state_lock);
4246550449d8SJiri Pirko return 0;
4247e5b9642aSShay Drory }
4248e5b9642aSShay Drory
mlx5_devlink_port_fn_migratable_set(struct devlink_port * port,bool enable,struct netlink_ext_ack * extack)4249e5b9642aSShay Drory int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
4250e5b9642aSShay Drory struct netlink_ext_ack *extack)
4251e5b9642aSShay Drory {
42525c632cc3SJiri Pirko struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
42537d833520SJiri Pirko struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4254e5b9642aSShay Drory int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4255e5b9642aSShay Drory void *query_ctx;
4256e5b9642aSShay Drory void *hca_caps;
4257c0ae0092SJiri Pirko int err;
4258e5b9642aSShay Drory
4259e5b9642aSShay Drory if (!MLX5_CAP_GEN(esw->dev, migration)) {
4260e5b9642aSShay Drory NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4261c0ae0092SJiri Pirko return -EOPNOTSUPP;
4262e5b9642aSShay Drory }
4263e5b9642aSShay Drory
4264eb555e34SJiri Pirko if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4265eb555e34SJiri Pirko NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
4266eb555e34SJiri Pirko return -EOPNOTSUPP;
4267eb555e34SJiri Pirko }
4268eb555e34SJiri Pirko
4269e5b9642aSShay Drory mutex_lock(&esw->state_lock);
4270e5b9642aSShay Drory
4271e5b9642aSShay Drory if (vport->info.mig_enabled == enable) {
4272e5b9642aSShay Drory err = 0;
4273e5b9642aSShay Drory goto out;
4274e5b9642aSShay Drory }
4275e5b9642aSShay Drory
4276e5b9642aSShay Drory query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4277e5b9642aSShay Drory if (!query_ctx) {
4278e5b9642aSShay Drory err = -ENOMEM;
4279e5b9642aSShay Drory goto out;
4280e5b9642aSShay Drory }
4281e5b9642aSShay Drory
4282e5b9642aSShay Drory err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
4283e5b9642aSShay Drory MLX5_CAP_GENERAL_2);
4284e5b9642aSShay Drory if (err) {
4285e5b9642aSShay Drory NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4286e5b9642aSShay Drory goto out_free;
4287e5b9642aSShay Drory }
4288e5b9642aSShay Drory
4289e5b9642aSShay Drory hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
42900507f2c8SShay Drory MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, enable);
4291e5b9642aSShay Drory
4292e5b9642aSShay Drory err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
4293e5b9642aSShay Drory MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
4294e5b9642aSShay Drory if (err) {
4295e5b9642aSShay Drory NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap");
4296e5b9642aSShay Drory goto out_free;
4297e5b9642aSShay Drory }
4298e5b9642aSShay Drory
4299e5b9642aSShay Drory vport->info.mig_enabled = enable;
4300e5b9642aSShay Drory
4301e5b9642aSShay Drory out_free:
4302e5b9642aSShay Drory kfree(query_ctx);
4303e5b9642aSShay Drory out:
4304e5b9642aSShay Drory mutex_unlock(&esw->state_lock);
4305e5b9642aSShay Drory return err;
4306e5b9642aSShay Drory }
4307e5b9642aSShay Drory
mlx5_devlink_port_fn_roce_get(struct devlink_port * port,bool * is_enabled,struct netlink_ext_ack * extack)43087db98396SYishai Hadas int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
43097db98396SYishai Hadas struct netlink_ext_ack *extack)
43107db98396SYishai Hadas {
43115c632cc3SJiri Pirko struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
43127d833520SJiri Pirko struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
43137db98396SYishai Hadas
4314eb555e34SJiri Pirko if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4315eb555e34SJiri Pirko NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
4316eb555e34SJiri Pirko return -EOPNOTSUPP;
4317eb555e34SJiri Pirko }
4318eb555e34SJiri Pirko
43197db98396SYishai Hadas mutex_lock(&esw->state_lock);
43207db98396SYishai Hadas *is_enabled = vport->info.roce_enabled;
43217db98396SYishai Hadas mutex_unlock(&esw->state_lock);
4322550449d8SJiri Pirko return 0;
43237db98396SYishai Hadas }
43247db98396SYishai Hadas
mlx5_devlink_port_fn_roce_set(struct devlink_port * port,bool enable,struct netlink_ext_ack * extack)43257db98396SYishai Hadas int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
43267db98396SYishai Hadas struct netlink_ext_ack *extack)
43277db98396SYishai Hadas {
43285c632cc3SJiri Pirko struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
43297d833520SJiri Pirko struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
43307db98396SYishai Hadas int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
43317d833520SJiri Pirko u16 vport_num = vport->vport;
43327db98396SYishai Hadas void *query_ctx;
43337db98396SYishai Hadas void *hca_caps;
4334550449d8SJiri Pirko int err;
43357db98396SYishai Hadas
4336eb555e34SJiri Pirko if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4337eb555e34SJiri Pirko NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
4338eb555e34SJiri Pirko return -EOPNOTSUPP;
4339eb555e34SJiri Pirko }
4340eb555e34SJiri Pirko
43417db98396SYishai Hadas mutex_lock(&esw->state_lock);
43427db98396SYishai Hadas
43437db98396SYishai Hadas if (vport->info.roce_enabled == enable) {
43447db98396SYishai Hadas err = 0;
43457db98396SYishai Hadas goto out;
43467db98396SYishai Hadas }
43477db98396SYishai Hadas
43487db98396SYishai Hadas query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
43497db98396SYishai Hadas if (!query_ctx) {
43507db98396SYishai Hadas err = -ENOMEM;
43517db98396SYishai Hadas goto out;
43527db98396SYishai Hadas }
43537db98396SYishai Hadas
43547db98396SYishai Hadas err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
43557db98396SYishai Hadas MLX5_CAP_GENERAL);
43567db98396SYishai Hadas if (err) {
43577db98396SYishai Hadas NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
43587db98396SYishai Hadas goto out_free;
43597db98396SYishai Hadas }
43607db98396SYishai Hadas
43617db98396SYishai Hadas hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
43627db98396SYishai Hadas MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
43637db98396SYishai Hadas
43647db98396SYishai Hadas err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
43657db98396SYishai Hadas MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
43667db98396SYishai Hadas if (err) {
43677db98396SYishai Hadas NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
43687db98396SYishai Hadas goto out_free;
43697db98396SYishai Hadas }
43707db98396SYishai Hadas
43717db98396SYishai Hadas vport->info.roce_enabled = enable;
43727db98396SYishai Hadas
43737db98396SYishai Hadas out_free:
43747db98396SYishai Hadas kfree(query_ctx);
43757db98396SYishai Hadas out:
43767db98396SYishai Hadas mutex_unlock(&esw->state_lock);
43777db98396SYishai Hadas return err;
43787db98396SYishai Hadas }
4379d1569537SJianbo Liu
4380d1569537SJianbo Liu int
mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_esw_flow_attr * esw_attr,int attr_idx)4381d1569537SJianbo Liu mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
4382d1569537SJianbo Liu struct mlx5_esw_flow_attr *esw_attr, int attr_idx)
4383d1569537SJianbo Liu {
4384d1569537SJianbo Liu struct mlx5_flow_destination new_dest = {};
4385d1569537SJianbo Liu struct mlx5_flow_destination old_dest = {};
4386d1569537SJianbo Liu
4387d1569537SJianbo Liu if (!esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
4388d1569537SJianbo Liu return 0;
4389d1569537SJianbo Liu
4390d1569537SJianbo Liu esw_setup_dest_fwd_ipsec(&old_dest, NULL, esw, esw_attr, attr_idx, 0, false);
4391d1569537SJianbo Liu esw_setup_dest_fwd_vport(&new_dest, NULL, esw, esw_attr, attr_idx, 0, false);
4392d1569537SJianbo Liu
4393d1569537SJianbo Liu return mlx5_modify_rule_destination(rule, &new_dest, &old_dest);
4394d1569537SJianbo Liu }
439506bab696SDima Chumak
439606bab696SDima Chumak #ifdef CONFIG_XFRM_OFFLOAD
mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port * port,bool * is_enabled,struct netlink_ext_ack * extack)439706bab696SDima Chumak int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
439806bab696SDima Chumak struct netlink_ext_ack *extack)
439906bab696SDima Chumak {
440006bab696SDima Chumak struct mlx5_eswitch *esw;
440106bab696SDima Chumak struct mlx5_vport *vport;
440206bab696SDima Chumak int err = 0;
440306bab696SDima Chumak
440406bab696SDima Chumak esw = mlx5_devlink_eswitch_get(port->devlink);
440506bab696SDima Chumak if (IS_ERR(esw))
440606bab696SDima Chumak return PTR_ERR(esw);
440706bab696SDima Chumak
440806bab696SDima Chumak if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
440906bab696SDima Chumak NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPSec crypto");
441006bab696SDima Chumak return -EOPNOTSUPP;
441106bab696SDima Chumak }
441206bab696SDima Chumak
441306bab696SDima Chumak vport = mlx5_devlink_port_vport_get(port);
441406bab696SDima Chumak
441506bab696SDima Chumak mutex_lock(&esw->state_lock);
441606bab696SDima Chumak if (!vport->enabled) {
441706bab696SDima Chumak err = -EOPNOTSUPP;
441806bab696SDima Chumak goto unlock;
441906bab696SDima Chumak }
442006bab696SDima Chumak
442106bab696SDima Chumak *is_enabled = vport->info.ipsec_crypto_enabled;
442206bab696SDima Chumak unlock:
442306bab696SDima Chumak mutex_unlock(&esw->state_lock);
442406bab696SDima Chumak return err;
442506bab696SDima Chumak }
442606bab696SDima Chumak
mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port * port,bool enable,struct netlink_ext_ack * extack)442706bab696SDima Chumak int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
442806bab696SDima Chumak struct netlink_ext_ack *extack)
442906bab696SDima Chumak {
443006bab696SDima Chumak struct mlx5_eswitch *esw;
443106bab696SDima Chumak struct mlx5_vport *vport;
443206bab696SDima Chumak u16 vport_num;
443306bab696SDima Chumak int err;
443406bab696SDima Chumak
443506bab696SDima Chumak esw = mlx5_devlink_eswitch_get(port->devlink);
443606bab696SDima Chumak if (IS_ERR(esw))
443706bab696SDima Chumak return PTR_ERR(esw);
443806bab696SDima Chumak
443906bab696SDima Chumak vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
444006bab696SDima Chumak err = mlx5_esw_ipsec_vf_crypto_offload_supported(esw->dev, vport_num);
444106bab696SDima Chumak if (err) {
444206bab696SDima Chumak NL_SET_ERR_MSG_MOD(extack,
444306bab696SDima Chumak "Device doesn't support IPsec crypto");
444406bab696SDima Chumak return err;
444506bab696SDima Chumak }
444606bab696SDima Chumak
444706bab696SDima Chumak vport = mlx5_devlink_port_vport_get(port);
444806bab696SDima Chumak
444906bab696SDima Chumak mutex_lock(&esw->state_lock);
445006bab696SDima Chumak if (!vport->enabled) {
445106bab696SDima Chumak err = -EOPNOTSUPP;
445206bab696SDima Chumak NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
445306bab696SDima Chumak goto unlock;
445406bab696SDima Chumak }
445506bab696SDima Chumak
445606bab696SDima Chumak if (vport->info.ipsec_crypto_enabled == enable)
445706bab696SDima Chumak goto unlock;
445806bab696SDima Chumak
445906bab696SDima Chumak if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
446006bab696SDima Chumak err = -EBUSY;
446106bab696SDima Chumak goto unlock;
446206bab696SDima Chumak }
446306bab696SDima Chumak
446406bab696SDima Chumak err = mlx5_esw_ipsec_vf_crypto_offload_set(esw, vport, enable);
446506bab696SDima Chumak if (err) {
446606bab696SDima Chumak NL_SET_ERR_MSG_MOD(extack, "Failed to set IPsec crypto");
446706bab696SDima Chumak goto unlock;
446806bab696SDima Chumak }
446906bab696SDima Chumak
447006bab696SDima Chumak vport->info.ipsec_crypto_enabled = enable;
447106bab696SDima Chumak if (enable)
447206bab696SDima Chumak esw->enabled_ipsec_vf_count++;
447306bab696SDima Chumak else
447406bab696SDima Chumak esw->enabled_ipsec_vf_count--;
447506bab696SDima Chumak unlock:
447606bab696SDima Chumak mutex_unlock(&esw->state_lock);
447706bab696SDima Chumak return err;
447806bab696SDima Chumak }
4479b691b111SDima Chumak
mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port * port,bool * is_enabled,struct netlink_ext_ack * extack)4480b691b111SDima Chumak int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
4481b691b111SDima Chumak struct netlink_ext_ack *extack)
4482b691b111SDima Chumak {
4483b691b111SDima Chumak struct mlx5_eswitch *esw;
4484b691b111SDima Chumak struct mlx5_vport *vport;
4485b691b111SDima Chumak int err = 0;
4486b691b111SDima Chumak
4487b691b111SDima Chumak esw = mlx5_devlink_eswitch_get(port->devlink);
4488b691b111SDima Chumak if (IS_ERR(esw))
4489b691b111SDima Chumak return PTR_ERR(esw);
4490b691b111SDima Chumak
4491b691b111SDima Chumak if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
4492b691b111SDima Chumak NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet");
4493b691b111SDima Chumak return -EOPNOTSUPP;
4494b691b111SDima Chumak }
4495b691b111SDima Chumak
4496b691b111SDima Chumak vport = mlx5_devlink_port_vport_get(port);
4497b691b111SDima Chumak
4498b691b111SDima Chumak mutex_lock(&esw->state_lock);
4499b691b111SDima Chumak if (!vport->enabled) {
4500b691b111SDima Chumak err = -EOPNOTSUPP;
4501b691b111SDima Chumak goto unlock;
4502b691b111SDima Chumak }
4503b691b111SDima Chumak
4504b691b111SDima Chumak *is_enabled = vport->info.ipsec_packet_enabled;
4505b691b111SDima Chumak unlock:
4506b691b111SDima Chumak mutex_unlock(&esw->state_lock);
4507b691b111SDima Chumak return err;
4508b691b111SDima Chumak }
4509b691b111SDima Chumak
mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port * port,bool enable,struct netlink_ext_ack * extack)4510b691b111SDima Chumak int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
4511b691b111SDima Chumak bool enable,
4512b691b111SDima Chumak struct netlink_ext_ack *extack)
4513b691b111SDima Chumak {
4514b691b111SDima Chumak struct mlx5_eswitch *esw;
4515b691b111SDima Chumak struct mlx5_vport *vport;
4516b691b111SDima Chumak u16 vport_num;
4517b691b111SDima Chumak int err;
4518b691b111SDima Chumak
4519b691b111SDima Chumak esw = mlx5_devlink_eswitch_get(port->devlink);
4520b691b111SDima Chumak if (IS_ERR(esw))
4521b691b111SDima Chumak return PTR_ERR(esw);
4522b691b111SDima Chumak
4523b691b111SDima Chumak vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4524b691b111SDima Chumak err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num);
4525b691b111SDima Chumak if (err) {
4526b691b111SDima Chumak NL_SET_ERR_MSG_MOD(extack,
4527b691b111SDima Chumak "Device doesn't support IPsec packet mode");
4528b691b111SDima Chumak return err;
4529b691b111SDima Chumak }
4530b691b111SDima Chumak
4531b691b111SDima Chumak vport = mlx5_devlink_port_vport_get(port);
4532b691b111SDima Chumak mutex_lock(&esw->state_lock);
4533b691b111SDima Chumak if (!vport->enabled) {
4534b691b111SDima Chumak err = -EOPNOTSUPP;
4535b691b111SDima Chumak NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4536b691b111SDima Chumak goto unlock;
4537b691b111SDima Chumak }
4538b691b111SDima Chumak
4539b691b111SDima Chumak if (vport->info.ipsec_packet_enabled == enable)
4540b691b111SDima Chumak goto unlock;
4541b691b111SDima Chumak
4542b691b111SDima Chumak if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
4543b691b111SDima Chumak err = -EBUSY;
4544b691b111SDima Chumak goto unlock;
4545b691b111SDima Chumak }
4546b691b111SDima Chumak
4547b691b111SDima Chumak err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable);
4548b691b111SDima Chumak if (err) {
4549b691b111SDima Chumak NL_SET_ERR_MSG_MOD(extack,
4550b691b111SDima Chumak "Failed to set IPsec packet mode");
4551b691b111SDima Chumak goto unlock;
4552b691b111SDima Chumak }
4553b691b111SDima Chumak
4554b691b111SDima Chumak vport->info.ipsec_packet_enabled = enable;
4555b691b111SDima Chumak if (enable)
4556b691b111SDima Chumak esw->enabled_ipsec_vf_count++;
4557b691b111SDima Chumak else
4558b691b111SDima Chumak esw->enabled_ipsec_vf_count--;
4559b691b111SDima Chumak unlock:
4560b691b111SDima Chumak mutex_unlock(&esw->state_lock);
4561b691b111SDima Chumak return err;
4562b691b111SDima Chumak }
456306bab696SDima Chumak #endif /* CONFIG_XFRM_OFFLOAD */
4564