1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/idr.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_core.h"
40 #include "eswitch.h"
41 #include "esw/indir_table.h"
42 #include "esw/acl/ofld.h"
43 #include "rdma.h"
44 #include "en.h"
45 #include "fs_core.h"
46 #include "lib/devcom.h"
47 #include "lib/eq.h"
48 #include "lib/fs_chains.h"
49 #include "en_tc.h"
50 #include "en/mapping.h"
51 #include "devlink.h"
52 #include "lag/lag.h"
53 #include "en/tc/post_meter.h"
54
55 #define mlx5_esw_for_each_rep(esw, i, rep) \
56 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
57
58 /* There are two match-all miss flows, one for unicast dst mac and
59 * one for multicast.
60 */
61 #define MLX5_ESW_MISS_FLOWS (2)
62 #define UPLINK_REP_INDEX 0
63
64 #define MLX5_ESW_VPORT_TBL_SIZE 128
65 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
66
67 #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
68
69 static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
70 .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
71 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
72 .flags = 0,
73 };
74
mlx5_eswitch_get_rep(struct mlx5_eswitch * esw,u16 vport_num)75 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
76 u16 vport_num)
77 {
78 return xa_load(&esw->offloads.vport_reps, vport_num);
79 }
80
81 static void
mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_esw_flow_attr * attr)82 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
83 struct mlx5_flow_spec *spec,
84 struct mlx5_esw_flow_attr *attr)
85 {
86 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
87 return;
88
89 if (attr->int_port) {
90 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
91
92 return;
93 }
94
95 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
96 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
97 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
98 }
99
100 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
101 * are not needed as well in the following process. So clear them all for simplicity.
102 */
103 void
mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec)104 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
105 {
106 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
107 void *misc2;
108
109 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
110 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
111
112 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
113 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
114
115 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
116 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
117 }
118 }
119
120 static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr,struct mlx5_eswitch * src_esw,u16 vport)121 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
122 struct mlx5_flow_spec *spec,
123 struct mlx5_flow_attr *attr,
124 struct mlx5_eswitch *src_esw,
125 u16 vport)
126 {
127 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
128 u32 metadata;
129 void *misc2;
130 void *misc;
131
132 /* Use metadata matching because vport is not represented by single
133 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
134 */
135 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
136 if (mlx5_esw_indir_table_decap_vport(attr))
137 vport = mlx5_esw_indir_table_decap_vport(attr);
138
139 if (!attr->chain && esw_attr && esw_attr->int_port)
140 metadata =
141 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
142 else
143 metadata =
144 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
145
146 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
147 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
148
149 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
150 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
151 mlx5_eswitch_get_vport_metadata_mask());
152
153 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
154 } else {
155 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
156 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
157
158 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
159 MLX5_SET(fte_match_set_misc, misc,
160 source_eswitch_owner_vhca_id,
161 MLX5_CAP_GEN(src_esw->dev, vhca_id));
162
163 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
164 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
165 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
166 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
167 source_eswitch_owner_vhca_id);
168
169 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
170 }
171 }
172
173 static int
esw_setup_decap_indir(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)174 esw_setup_decap_indir(struct mlx5_eswitch *esw,
175 struct mlx5_flow_attr *attr)
176 {
177 struct mlx5_flow_table *ft;
178
179 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
180 return -EOPNOTSUPP;
181
182 ft = mlx5_esw_indir_table_get(esw, attr,
183 mlx5_esw_indir_table_decap_vport(attr), true);
184 return PTR_ERR_OR_ZERO(ft);
185 }
186
187 static void
esw_cleanup_decap_indir(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)188 esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
189 struct mlx5_flow_attr *attr)
190 {
191 if (mlx5_esw_indir_table_decap_vport(attr))
192 mlx5_esw_indir_table_put(esw,
193 mlx5_esw_indir_table_decap_vport(attr),
194 true);
195 }
196
197 static int
esw_setup_mtu_dest(struct mlx5_flow_destination * dest,struct mlx5e_meter_attr * meter,int i)198 esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
199 struct mlx5e_meter_attr *meter,
200 int i)
201 {
202 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE;
203 dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
204 dest[i].range.min = 0;
205 dest[i].range.max = meter->params.mtu;
206 dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
207 dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
208
209 return 0;
210 }
211
212 static int
esw_setup_sampler_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,u32 sampler_id,int i)213 esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
214 struct mlx5_flow_act *flow_act,
215 u32 sampler_id,
216 int i)
217 {
218 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
219 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
220 dest[i].sampler_id = sampler_id;
221
222 return 0;
223 }
224
225 static int
esw_setup_ft_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,int i)226 esw_setup_ft_dest(struct mlx5_flow_destination *dest,
227 struct mlx5_flow_act *flow_act,
228 struct mlx5_eswitch *esw,
229 struct mlx5_flow_attr *attr,
230 int i)
231 {
232 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
233 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
234 dest[i].ft = attr->dest_ft;
235
236 if (mlx5_esw_indir_table_decap_vport(attr))
237 return esw_setup_decap_indir(esw, attr);
238 return 0;
239 }
240
241 static void
esw_setup_accept_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_fs_chains * chains,int i)242 esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
243 struct mlx5_fs_chains *chains, int i)
244 {
245 if (mlx5_chains_ignore_flow_level_supported(chains))
246 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
247 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
248 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
249 }
250
251 static void
esw_setup_slow_path_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,int i)252 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
253 struct mlx5_eswitch *esw, int i)
254 {
255 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
256 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
257 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
258 dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
259 }
260
261 static int
esw_setup_chain_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level,int i)262 esw_setup_chain_dest(struct mlx5_flow_destination *dest,
263 struct mlx5_flow_act *flow_act,
264 struct mlx5_fs_chains *chains,
265 u32 chain, u32 prio, u32 level,
266 int i)
267 {
268 struct mlx5_flow_table *ft;
269
270 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
271 ft = mlx5_chains_get_table(chains, chain, prio, level);
272 if (IS_ERR(ft))
273 return PTR_ERR(ft);
274
275 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
276 dest[i].ft = ft;
277 return 0;
278 }
279
esw_put_dest_tables_loop(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,int from,int to)280 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
281 int from, int to)
282 {
283 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
284 struct mlx5_fs_chains *chains = esw_chains(esw);
285 int i;
286
287 for (i = from; i < to; i++)
288 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
289 mlx5_chains_put_table(chains, 0, 1, 0);
290 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
291 esw_attr->dests[i].mdev))
292 mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false);
293 }
294
295 static bool
esw_is_chain_src_port_rewrite(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr)296 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
297 {
298 int i;
299
300 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
301 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
302 return true;
303 return false;
304 }
305
306 static int
esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_fs_chains * chains,struct mlx5_flow_attr * attr,int * i)307 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
308 struct mlx5_flow_act *flow_act,
309 struct mlx5_eswitch *esw,
310 struct mlx5_fs_chains *chains,
311 struct mlx5_flow_attr *attr,
312 int *i)
313 {
314 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
315 int err;
316
317 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
318 return -EOPNOTSUPP;
319
320 /* flow steering cannot handle more than one dest with the same ft
321 * in a single flow
322 */
323 if (esw_attr->out_count - esw_attr->split_count > 1)
324 return -EOPNOTSUPP;
325
326 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
327 if (err)
328 return err;
329
330 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) {
331 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
332 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat;
333 }
334 (*i)++;
335
336 return 0;
337 }
338
esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)339 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
340 struct mlx5_flow_attr *attr)
341 {
342 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
343
344 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
345 }
346
347 static bool
esw_is_indir_table(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)348 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
349 {
350 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
351 bool result = false;
352 int i;
353
354 /* Indirect table is supported only for flows with in_port uplink
355 * and the destination is vport on the same eswitch as the uplink,
356 * return false in case at least one of destinations doesn't meet
357 * this criteria.
358 */
359 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
360 if (esw_attr->dests[i].vport_valid &&
361 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
362 esw_attr->dests[i].mdev)) {
363 result = true;
364 } else {
365 result = false;
366 break;
367 }
368 }
369 return result;
370 }
371
372 static int
esw_setup_indir_table(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,int * i)373 esw_setup_indir_table(struct mlx5_flow_destination *dest,
374 struct mlx5_flow_act *flow_act,
375 struct mlx5_eswitch *esw,
376 struct mlx5_flow_attr *attr,
377 int *i)
378 {
379 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
380 int j, err;
381
382 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
383 return -EOPNOTSUPP;
384
385 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
386 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
387 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
388
389 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
390 esw_attr->dests[j].vport, false);
391 if (IS_ERR(dest[*i].ft)) {
392 err = PTR_ERR(dest[*i].ft);
393 goto err_indir_tbl_get;
394 }
395 }
396
397 if (mlx5_esw_indir_table_decap_vport(attr)) {
398 err = esw_setup_decap_indir(esw, attr);
399 if (err)
400 goto err_indir_tbl_get;
401 }
402
403 return 0;
404
405 err_indir_tbl_get:
406 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
407 return err;
408 }
409
esw_cleanup_indir_table(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)410 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
411 {
412 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
413
414 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
415 esw_cleanup_decap_indir(esw, attr);
416 }
417
418 static void
esw_cleanup_chain_dest(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)419 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
420 {
421 mlx5_chains_put_table(chains, chain, prio, level);
422 }
423
esw_same_vhca_id(struct mlx5_core_dev * mdev1,struct mlx5_core_dev * mdev2)424 static bool esw_same_vhca_id(struct mlx5_core_dev *mdev1, struct mlx5_core_dev *mdev2)
425 {
426 return MLX5_CAP_GEN(mdev1, vhca_id) == MLX5_CAP_GEN(mdev2, vhca_id);
427 }
428
esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int attr_idx)429 static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
430 struct mlx5_esw_flow_attr *esw_attr,
431 int attr_idx)
432 {
433 if (esw->offloads.ft_ipsec_tx_pol &&
434 esw_attr->dests[attr_idx].vport_valid &&
435 esw_attr->dests[attr_idx].vport == MLX5_VPORT_UPLINK &&
436 /* To be aligned with software, encryption is needed only for tunnel device */
437 (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) &&
438 esw_attr->dests[attr_idx].vport != esw_attr->in_rep->vport &&
439 esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
440 return true;
441
442 return false;
443 }
444
esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr)445 static bool esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch *esw,
446 struct mlx5_esw_flow_attr *esw_attr)
447 {
448 int i;
449
450 if (!esw->offloads.ft_ipsec_tx_pol)
451 return true;
452
453 for (i = 0; i < esw_attr->split_count; i++)
454 if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i))
455 return false;
456
457 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
458 if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i) &&
459 (esw_attr->out_count - esw_attr->split_count > 1))
460 return false;
461
462 return true;
463 }
464
465 static void
esw_setup_dest_fwd_vport(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int attr_idx,int dest_idx,bool pkt_reformat)466 esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
467 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
468 int attr_idx, int dest_idx, bool pkt_reformat)
469 {
470 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
471 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].vport;
472 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
473 dest[dest_idx].vport.vhca_id =
474 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
475 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
476 if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
477 mlx5_lag_is_mpesw(esw->dev))
478 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
479 }
480 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
481 if (pkt_reformat) {
482 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
483 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
484 }
485 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
486 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
487 }
488 }
489
490 static void
esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int attr_idx,int dest_idx,bool pkt_reformat)491 esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
492 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
493 int attr_idx, int dest_idx, bool pkt_reformat)
494 {
495 dest[dest_idx].ft = esw->offloads.ft_ipsec_tx_pol;
496 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
497 if (pkt_reformat &&
498 esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
499 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
500 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
501 }
502 }
503
504 static void
esw_setup_vport_dest(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int attr_idx,int dest_idx,bool pkt_reformat)505 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
506 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
507 int attr_idx, int dest_idx, bool pkt_reformat)
508 {
509 if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
510 esw_setup_dest_fwd_ipsec(dest, flow_act, esw, esw_attr,
511 attr_idx, dest_idx, pkt_reformat);
512 else
513 esw_setup_dest_fwd_vport(dest, flow_act, esw, esw_attr,
514 attr_idx, dest_idx, pkt_reformat);
515 }
516
517 static int
esw_setup_vport_dests(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * esw_attr,int i)518 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
519 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
520 int i)
521 {
522 int j;
523
524 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
525 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
526 return i;
527 }
528
529 static bool
esw_src_port_rewrite_supported(struct mlx5_eswitch * esw)530 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
531 {
532 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
533 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
534 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
535 }
536
537 static bool
esw_dests_to_int_external(struct mlx5_flow_destination * dests,int max_dest)538 esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
539 {
540 bool internal_dest = false, external_dest = false;
541 int i;
542
543 for (i = 0; i < max_dest; i++) {
544 if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT &&
545 dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK)
546 continue;
547
548 /* Uplink dest is external, but considered as internal
549 * if there is reformat because firmware uses LB+hairpin to support it.
550 */
551 if (dests[i].vport.num == MLX5_VPORT_UPLINK &&
552 !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID))
553 external_dest = true;
554 else
555 internal_dest = true;
556
557 if (internal_dest && external_dest)
558 return true;
559 }
560
561 return false;
562 }
563
564 static int
esw_setup_dests(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,struct mlx5_flow_spec * spec,int * i)565 esw_setup_dests(struct mlx5_flow_destination *dest,
566 struct mlx5_flow_act *flow_act,
567 struct mlx5_eswitch *esw,
568 struct mlx5_flow_attr *attr,
569 struct mlx5_flow_spec *spec,
570 int *i)
571 {
572 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
573 struct mlx5_fs_chains *chains = esw_chains(esw);
574 int err = 0;
575
576 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
577 esw_src_port_rewrite_supported(esw))
578 attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
579
580 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
581 esw_setup_slow_path_dest(dest, flow_act, esw, *i);
582 (*i)++;
583 goto out;
584 }
585
586 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
587 esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
588 (*i)++;
589 } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
590 esw_setup_accept_dest(dest, flow_act, chains, *i);
591 (*i)++;
592 } else if (attr->flags & MLX5_ATTR_FLAG_MTU) {
593 err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
594 (*i)++;
595 } else if (esw_is_indir_table(esw, attr)) {
596 err = esw_setup_indir_table(dest, flow_act, esw, attr, i);
597 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
598 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
599 } else {
600 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
601
602 if (attr->dest_ft) {
603 err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
604 (*i)++;
605 } else if (attr->dest_chain) {
606 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
607 1, 0, *i);
608 (*i)++;
609 }
610 }
611
612 out:
613 return err;
614 }
615
616 static void
esw_cleanup_dests(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr)617 esw_cleanup_dests(struct mlx5_eswitch *esw,
618 struct mlx5_flow_attr *attr)
619 {
620 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
621 struct mlx5_fs_chains *chains = esw_chains(esw);
622
623 if (attr->dest_ft) {
624 esw_cleanup_decap_indir(esw, attr);
625 } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
626 if (attr->dest_chain)
627 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
628 else if (esw_is_indir_table(esw, attr))
629 esw_cleanup_indir_table(esw, attr);
630 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
631 esw_cleanup_chain_src_port_rewrite(esw, attr);
632 }
633 }
634
635 static void
esw_setup_meter(struct mlx5_flow_attr * attr,struct mlx5_flow_act * flow_act)636 esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
637 {
638 struct mlx5e_flow_meter_handle *meter;
639
640 meter = attr->meter_attr.meter;
641 flow_act->exe_aso.type = attr->exe_aso_type;
642 flow_act->exe_aso.object_id = meter->obj_id;
643 flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
644 flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
645 /* use metadata reg 5 for packet color */
646 flow_act->exe_aso.return_reg_id = 5;
647 }
648
649 struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr)650 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
651 struct mlx5_flow_spec *spec,
652 struct mlx5_flow_attr *attr)
653 {
654 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
655 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
656 struct mlx5_fs_chains *chains = esw_chains(esw);
657 bool split = !!(esw_attr->split_count);
658 struct mlx5_vport_tbl_attr fwd_attr;
659 struct mlx5_flow_destination *dest;
660 struct mlx5_flow_handle *rule;
661 struct mlx5_flow_table *fdb;
662 int i = 0;
663
664 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
665 return ERR_PTR(-EOPNOTSUPP);
666
667 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
668 return ERR_PTR(-EOPNOTSUPP);
669
670 if (!esw_flow_dests_fwd_ipsec_check(esw, esw_attr))
671 return ERR_PTR(-EOPNOTSUPP);
672
673 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
674 if (!dest)
675 return ERR_PTR(-ENOMEM);
676
677 flow_act.action = attr->action;
678
679 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
680 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
681 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
682 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
683 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
684 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
685 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
686 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
687 }
688 }
689
690 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
691
692 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
693 int err;
694
695 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
696 if (err) {
697 rule = ERR_PTR(err);
698 goto err_create_goto_table;
699 }
700
701 /* Header rewrite with combined wire+loopback in FDB is not allowed */
702 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
703 esw_dests_to_int_external(dest, i)) {
704 esw_warn(esw->dev,
705 "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n");
706 rule = ERR_PTR(-EINVAL);
707 goto err_esw_get;
708 }
709 }
710
711 if (esw_attr->decap_pkt_reformat)
712 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
713
714 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
715 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
716 dest[i].counter_id = mlx5_fc_id(attr->counter);
717 i++;
718 }
719
720 if (attr->outer_match_level != MLX5_MATCH_NONE)
721 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
722 if (attr->inner_match_level != MLX5_MATCH_NONE)
723 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
724
725 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
726 flow_act.modify_hdr = attr->modify_hdr;
727
728 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
729 attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
730 esw_setup_meter(attr, &flow_act);
731
732 if (split) {
733 fwd_attr.chain = attr->chain;
734 fwd_attr.prio = attr->prio;
735 fwd_attr.vport = esw_attr->in_rep->vport;
736 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
737
738 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
739 } else {
740 if (attr->chain || attr->prio)
741 fdb = mlx5_chains_get_table(chains, attr->chain,
742 attr->prio, 0);
743 else
744 fdb = attr->ft;
745
746 if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
747 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
748 esw_attr->in_mdev->priv.eswitch,
749 esw_attr->in_rep->vport);
750 }
751 if (IS_ERR(fdb)) {
752 rule = ERR_CAST(fdb);
753 goto err_esw_get;
754 }
755
756 if (!i) {
757 kfree(dest);
758 dest = NULL;
759 }
760
761 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
762 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
763 &flow_act, dest, i);
764 else
765 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
766 if (IS_ERR(rule))
767 goto err_add_rule;
768 else
769 atomic64_inc(&esw->offloads.num_flows);
770
771 kfree(dest);
772 return rule;
773
774 err_add_rule:
775 if (split)
776 mlx5_esw_vporttbl_put(esw, &fwd_attr);
777 else if (attr->chain || attr->prio)
778 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
779 err_esw_get:
780 esw_cleanup_dests(esw, attr);
781 err_create_goto_table:
782 kfree(dest);
783 return rule;
784 }
785
786 struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr)787 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
788 struct mlx5_flow_spec *spec,
789 struct mlx5_flow_attr *attr)
790 {
791 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
792 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
793 struct mlx5_fs_chains *chains = esw_chains(esw);
794 struct mlx5_vport_tbl_attr fwd_attr;
795 struct mlx5_flow_destination *dest;
796 struct mlx5_flow_table *fast_fdb;
797 struct mlx5_flow_table *fwd_fdb;
798 struct mlx5_flow_handle *rule;
799 int i, err = 0;
800
801 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
802 if (!dest)
803 return ERR_PTR(-ENOMEM);
804
805 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
806 if (IS_ERR(fast_fdb)) {
807 rule = ERR_CAST(fast_fdb);
808 goto err_get_fast;
809 }
810
811 fwd_attr.chain = attr->chain;
812 fwd_attr.prio = attr->prio;
813 fwd_attr.vport = esw_attr->in_rep->vport;
814 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
815 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
816 if (IS_ERR(fwd_fdb)) {
817 rule = ERR_CAST(fwd_fdb);
818 goto err_get_fwd;
819 }
820
821 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
822 for (i = 0; i < esw_attr->split_count; i++) {
823 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
824 /* Source port rewrite (forward to ovs internal port or statck device) isn't
825 * supported in the rule of split action.
826 */
827 err = -EOPNOTSUPP;
828 else
829 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
830
831 if (err) {
832 rule = ERR_PTR(err);
833 goto err_chain_src_rewrite;
834 }
835 }
836 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
837 dest[i].ft = fwd_fdb;
838 i++;
839
840 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
841 esw_attr->in_mdev->priv.eswitch,
842 esw_attr->in_rep->vport);
843
844 if (attr->outer_match_level != MLX5_MATCH_NONE)
845 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
846
847 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
848 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
849
850 if (IS_ERR(rule)) {
851 i = esw_attr->split_count;
852 goto err_chain_src_rewrite;
853 }
854
855 atomic64_inc(&esw->offloads.num_flows);
856
857 kfree(dest);
858 return rule;
859 err_chain_src_rewrite:
860 mlx5_esw_vporttbl_put(esw, &fwd_attr);
861 err_get_fwd:
862 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
863 err_get_fast:
864 kfree(dest);
865 return rule;
866 }
867
868 static void
__mlx5_eswitch_del_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr,bool fwd_rule)869 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
870 struct mlx5_flow_handle *rule,
871 struct mlx5_flow_attr *attr,
872 bool fwd_rule)
873 {
874 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
875 struct mlx5_fs_chains *chains = esw_chains(esw);
876 bool split = (esw_attr->split_count > 0);
877 struct mlx5_vport_tbl_attr fwd_attr;
878 int i;
879
880 mlx5_del_flow_rules(rule);
881
882 if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
883 /* unref the term table */
884 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
885 if (esw_attr->dests[i].termtbl)
886 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
887 }
888 }
889
890 atomic64_dec(&esw->offloads.num_flows);
891
892 if (fwd_rule || split) {
893 fwd_attr.chain = attr->chain;
894 fwd_attr.prio = attr->prio;
895 fwd_attr.vport = esw_attr->in_rep->vport;
896 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
897 }
898
899 if (fwd_rule) {
900 mlx5_esw_vporttbl_put(esw, &fwd_attr);
901 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
902 } else {
903 if (split)
904 mlx5_esw_vporttbl_put(esw, &fwd_attr);
905 else if (attr->chain || attr->prio)
906 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
907 esw_cleanup_dests(esw, attr);
908 }
909 }
910
911 void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr)912 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
913 struct mlx5_flow_handle *rule,
914 struct mlx5_flow_attr *attr)
915 {
916 __mlx5_eswitch_del_rule(esw, rule, attr, false);
917 }
918
919 void
mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_flow_attr * attr)920 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
921 struct mlx5_flow_handle *rule,
922 struct mlx5_flow_attr *attr)
923 {
924 __mlx5_eswitch_del_rule(esw, rule, attr, true);
925 }
926
927 struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch * on_esw,struct mlx5_eswitch * from_esw,struct mlx5_eswitch_rep * rep,u32 sqn)928 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
929 struct mlx5_eswitch *from_esw,
930 struct mlx5_eswitch_rep *rep,
931 u32 sqn)
932 {
933 struct mlx5_flow_act flow_act = {0};
934 struct mlx5_flow_destination dest = {};
935 struct mlx5_flow_handle *flow_rule;
936 struct mlx5_flow_spec *spec;
937 void *misc;
938 u16 vport;
939
940 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
941 if (!spec) {
942 flow_rule = ERR_PTR(-ENOMEM);
943 goto out;
944 }
945
946 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
947 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
948
949 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
950 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
951
952 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
953
954 /* source vport is the esw manager */
955 vport = from_esw->manager_vport;
956
957 if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) {
958 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
959 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
960 mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport));
961
962 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
963 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
964 mlx5_eswitch_get_vport_metadata_mask());
965
966 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
967 } else {
968 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
969 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
970
971 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
972 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
973 MLX5_CAP_GEN(from_esw->dev, vhca_id));
974
975 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
976 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
977
978 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
979 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
980 source_eswitch_owner_vhca_id);
981
982 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
983 }
984
985 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
986 dest.vport.num = rep->vport;
987 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
988 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
989 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
990
991 if (rep->vport == MLX5_VPORT_UPLINK &&
992 on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) {
993 dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
994 flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
995 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
996 } else {
997 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
998 dest.vport.num = rep->vport;
999 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
1000 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1001 }
1002
1003 if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
1004 rep->vport == MLX5_VPORT_UPLINK)
1005 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
1006
1007 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
1008 spec, &flow_act, &dest, 1);
1009 if (IS_ERR(flow_rule))
1010 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
1011 PTR_ERR(flow_rule));
1012 out:
1013 kvfree(spec);
1014 return flow_rule;
1015 }
1016 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
1017
mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle * rule)1018 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
1019 {
1020 mlx5_del_flow_rules(rule);
1021 }
1022
mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle * rule)1023 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
1024 {
1025 if (rule)
1026 mlx5_del_flow_rules(rule);
1027 }
1028
1029 struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch * esw,u16 vport_num)1030 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
1031 {
1032 struct mlx5_flow_destination dest = {};
1033 struct mlx5_flow_act flow_act = {0};
1034 struct mlx5_flow_handle *flow_rule;
1035 struct mlx5_flow_spec *spec;
1036
1037 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1038 if (!spec)
1039 return ERR_PTR(-ENOMEM);
1040
1041 MLX5_SET(fte_match_param, spec->match_criteria,
1042 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
1043 MLX5_SET(fte_match_param, spec->match_criteria,
1044 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1045 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
1046 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
1047
1048 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1049 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1050 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1051
1052 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
1053 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
1054 dest.vport.num = vport_num;
1055
1056 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1057 spec, &flow_act, &dest, 1);
1058 if (IS_ERR(flow_rule))
1059 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
1060 vport_num, PTR_ERR(flow_rule));
1061
1062 kvfree(spec);
1063 return flow_rule;
1064 }
1065
mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch * esw)1066 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
1067 {
1068 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1069 MLX5_FDB_TO_VPORT_REG_C_1;
1070 }
1071
esw_set_passing_vport_metadata(struct mlx5_eswitch * esw,bool enable)1072 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
1073 {
1074 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
1075 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
1076 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
1077 u8 curr, wanted;
1078 int err;
1079
1080 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
1081 !mlx5_eswitch_vport_match_metadata_enabled(esw))
1082 return 0;
1083
1084 MLX5_SET(query_esw_vport_context_in, in, opcode,
1085 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
1086 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
1087 if (err)
1088 return err;
1089
1090 curr = MLX5_GET(query_esw_vport_context_out, out,
1091 esw_vport_context.fdb_to_vport_reg_c_id);
1092 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
1093 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
1094 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
1095
1096 if (enable)
1097 curr |= wanted;
1098 else
1099 curr &= ~wanted;
1100
1101 MLX5_SET(modify_esw_vport_context_in, min,
1102 esw_vport_context.fdb_to_vport_reg_c_id, curr);
1103 MLX5_SET(modify_esw_vport_context_in, min,
1104 field_select.fdb_to_vport_reg_c_id, 1);
1105
1106 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
1107 if (!err) {
1108 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
1109 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1110 else
1111 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1112 }
1113
1114 return err;
1115 }
1116
peer_miss_rules_setup(struct mlx5_eswitch * esw,struct mlx5_core_dev * peer_dev,struct mlx5_flow_spec * spec,struct mlx5_flow_destination * dest)1117 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1118 struct mlx5_core_dev *peer_dev,
1119 struct mlx5_flow_spec *spec,
1120 struct mlx5_flow_destination *dest)
1121 {
1122 void *misc;
1123
1124 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1125 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1126 misc_parameters_2);
1127 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1128 mlx5_eswitch_get_vport_metadata_mask());
1129
1130 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1131 } else {
1132 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1133 misc_parameters);
1134
1135 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1136 MLX5_CAP_GEN(peer_dev, vhca_id));
1137
1138 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1139
1140 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1141 misc_parameters);
1142 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1143 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1144 source_eswitch_owner_vhca_id);
1145 }
1146
1147 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1148 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
1149 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
1150 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1151 }
1152
esw_set_peer_miss_rule_source_port(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw,struct mlx5_flow_spec * spec,u16 vport)1153 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1154 struct mlx5_eswitch *peer_esw,
1155 struct mlx5_flow_spec *spec,
1156 u16 vport)
1157 {
1158 void *misc;
1159
1160 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1161 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1162 misc_parameters_2);
1163 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1164 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1165 vport));
1166 } else {
1167 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1168 misc_parameters);
1169 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1170 }
1171 }
1172
esw_add_fdb_peer_miss_rules(struct mlx5_eswitch * esw,struct mlx5_core_dev * peer_dev)1173 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1174 struct mlx5_core_dev *peer_dev)
1175 {
1176 struct mlx5_flow_destination dest = {};
1177 struct mlx5_flow_act flow_act = {0};
1178 struct mlx5_flow_handle **flows;
1179 /* total vports is the same for both e-switches */
1180 int nvports = esw->total_vports;
1181 struct mlx5_flow_handle *flow;
1182 struct mlx5_flow_spec *spec;
1183 struct mlx5_vport *vport;
1184 int err, pfindex;
1185 unsigned long i;
1186 void *misc;
1187
1188 if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
1189 return 0;
1190
1191 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1192 if (!spec)
1193 return -ENOMEM;
1194
1195 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
1196
1197 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
1198 if (!flows) {
1199 err = -ENOMEM;
1200 goto alloc_flows_err;
1201 }
1202
1203 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1204 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1205 misc_parameters);
1206
1207 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1208 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1209 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1210 spec, MLX5_VPORT_PF);
1211
1212 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1213 spec, &flow_act, &dest, 1);
1214 if (IS_ERR(flow)) {
1215 err = PTR_ERR(flow);
1216 goto add_pf_flow_err;
1217 }
1218 flows[vport->index] = flow;
1219 }
1220
1221 if (mlx5_ecpf_vport_exists(esw->dev)) {
1222 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1223 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1224 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1225 spec, &flow_act, &dest, 1);
1226 if (IS_ERR(flow)) {
1227 err = PTR_ERR(flow);
1228 goto add_ecpf_flow_err;
1229 }
1230 flows[vport->index] = flow;
1231 }
1232
1233 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1234 esw_set_peer_miss_rule_source_port(esw,
1235 peer_dev->priv.eswitch,
1236 spec, vport->vport);
1237
1238 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1239 spec, &flow_act, &dest, 1);
1240 if (IS_ERR(flow)) {
1241 err = PTR_ERR(flow);
1242 goto add_vf_flow_err;
1243 }
1244 flows[vport->index] = flow;
1245 }
1246
1247 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1248 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1249 if (i >= mlx5_core_max_ec_vfs(peer_dev))
1250 break;
1251 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1252 spec, vport->vport);
1253 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1254 spec, &flow_act, &dest, 1);
1255 if (IS_ERR(flow)) {
1256 err = PTR_ERR(flow);
1257 goto add_ec_vf_flow_err;
1258 }
1259 flows[vport->index] = flow;
1260 }
1261 }
1262
1263 pfindex = mlx5_get_dev_index(peer_dev);
1264 if (pfindex >= MLX5_MAX_PORTS) {
1265 esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n",
1266 pfindex, MLX5_MAX_PORTS);
1267 err = -EINVAL;
1268 goto add_ec_vf_flow_err;
1269 }
1270 esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows;
1271
1272 kvfree(spec);
1273 return 0;
1274
1275 add_ec_vf_flow_err:
1276 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1277 if (!flows[vport->index])
1278 continue;
1279 mlx5_del_flow_rules(flows[vport->index]);
1280 }
1281 add_vf_flow_err:
1282 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1283 if (!flows[vport->index])
1284 continue;
1285 mlx5_del_flow_rules(flows[vport->index]);
1286 }
1287 if (mlx5_ecpf_vport_exists(esw->dev)) {
1288 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1289 mlx5_del_flow_rules(flows[vport->index]);
1290 }
1291 add_ecpf_flow_err:
1292 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1293 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1294 mlx5_del_flow_rules(flows[vport->index]);
1295 }
1296 add_pf_flow_err:
1297 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1298 kvfree(flows);
1299 alloc_flows_err:
1300 kvfree(spec);
1301 return err;
1302 }
1303
esw_del_fdb_peer_miss_rules(struct mlx5_eswitch * esw,struct mlx5_core_dev * peer_dev)1304 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1305 struct mlx5_core_dev *peer_dev)
1306 {
1307 u16 peer_index = mlx5_get_dev_index(peer_dev);
1308 struct mlx5_flow_handle **flows;
1309 struct mlx5_vport *vport;
1310 unsigned long i;
1311
1312 flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
1313 if (!flows)
1314 return;
1315
1316 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1317 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1318 /* The flow for a particular vport could be NULL if the other ECPF
1319 * has fewer or no VFs enabled
1320 */
1321 if (!flows[vport->index])
1322 continue;
1323 mlx5_del_flow_rules(flows[vport->index]);
1324 }
1325 }
1326
1327 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
1328 mlx5_del_flow_rules(flows[vport->index]);
1329
1330 if (mlx5_ecpf_vport_exists(esw->dev)) {
1331 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1332 mlx5_del_flow_rules(flows[vport->index]);
1333 }
1334
1335 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1336 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1337 mlx5_del_flow_rules(flows[vport->index]);
1338 }
1339
1340 kvfree(flows);
1341 esw->fdb_table.offloads.peer_miss_rules[peer_index] = NULL;
1342 }
1343
esw_add_fdb_miss_rule(struct mlx5_eswitch * esw)1344 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1345 {
1346 struct mlx5_flow_act flow_act = {0};
1347 struct mlx5_flow_destination dest = {};
1348 struct mlx5_flow_handle *flow_rule = NULL;
1349 struct mlx5_flow_spec *spec;
1350 void *headers_c;
1351 void *headers_v;
1352 int err = 0;
1353 u8 *dmac_c;
1354 u8 *dmac_v;
1355
1356 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1357 if (!spec) {
1358 err = -ENOMEM;
1359 goto out;
1360 }
1361
1362 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1363 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1364 outer_headers);
1365 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1366 outer_headers.dmac_47_16);
1367 dmac_c[0] = 0x01;
1368
1369 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1370 dest.vport.num = esw->manager_vport;
1371 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1372
1373 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1374 spec, &flow_act, &dest, 1);
1375 if (IS_ERR(flow_rule)) {
1376 err = PTR_ERR(flow_rule);
1377 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1378 goto out;
1379 }
1380
1381 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1382
1383 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1384 outer_headers);
1385 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1386 outer_headers.dmac_47_16);
1387 dmac_v[0] = 0x01;
1388 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1389 spec, &flow_act, &dest, 1);
1390 if (IS_ERR(flow_rule)) {
1391 err = PTR_ERR(flow_rule);
1392 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1393 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1394 goto out;
1395 }
1396
1397 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1398
1399 out:
1400 kvfree(spec);
1401 return err;
1402 }
1403
1404 struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch * esw,u32 tag)1405 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1406 {
1407 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1408 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1409 struct mlx5_flow_context *flow_context;
1410 struct mlx5_flow_handle *flow_rule;
1411 struct mlx5_flow_destination dest;
1412 struct mlx5_flow_spec *spec;
1413 void *misc;
1414
1415 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1416 return ERR_PTR(-EOPNOTSUPP);
1417
1418 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1419 if (!spec)
1420 return ERR_PTR(-ENOMEM);
1421
1422 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1423 misc_parameters_2);
1424 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1425 ESW_REG_C0_USER_DATA_METADATA_MASK);
1426 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1427 misc_parameters_2);
1428 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1429 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1430 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1431 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1432 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1433
1434 flow_context = &spec->flow_context;
1435 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1436 flow_context->flow_tag = tag;
1437 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1438 dest.ft = esw->offloads.ft_offloads;
1439
1440 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1441 kvfree(spec);
1442
1443 if (IS_ERR(flow_rule))
1444 esw_warn(esw->dev,
1445 "Failed to create restore rule for tag: %d, err(%d)\n",
1446 tag, (int)PTR_ERR(flow_rule));
1447
1448 return flow_rule;
1449 }
1450
1451 #define MAX_PF_SQ 256
1452 #define MAX_SQ_NVPORTS 32
1453
1454 void
mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch * esw,u32 * flow_group_in,int match_params)1455 mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1456 u32 *flow_group_in,
1457 int match_params)
1458 {
1459 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1460 flow_group_in,
1461 match_criteria);
1462
1463 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1464 MLX5_SET(create_flow_group_in, flow_group_in,
1465 match_criteria_enable,
1466 MLX5_MATCH_MISC_PARAMETERS_2 | match_params);
1467
1468 MLX5_SET(fte_match_param, match_criteria,
1469 misc_parameters_2.metadata_reg_c_0,
1470 mlx5_eswitch_get_vport_metadata_mask());
1471 } else {
1472 MLX5_SET(create_flow_group_in, flow_group_in,
1473 match_criteria_enable,
1474 MLX5_MATCH_MISC_PARAMETERS | match_params);
1475
1476 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1477 misc_parameters.source_port);
1478 }
1479 }
1480
1481 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
esw_vport_tbl_put(struct mlx5_eswitch * esw)1482 static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
1483 {
1484 struct mlx5_vport_tbl_attr attr;
1485 struct mlx5_vport *vport;
1486 unsigned long i;
1487
1488 attr.chain = 0;
1489 attr.prio = 1;
1490 mlx5_esw_for_each_vport(esw, i, vport) {
1491 attr.vport = vport->vport;
1492 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1493 mlx5_esw_vporttbl_put(esw, &attr);
1494 }
1495 }
1496
esw_vport_tbl_get(struct mlx5_eswitch * esw)1497 static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
1498 {
1499 struct mlx5_vport_tbl_attr attr;
1500 struct mlx5_flow_table *fdb;
1501 struct mlx5_vport *vport;
1502 unsigned long i;
1503
1504 attr.chain = 0;
1505 attr.prio = 1;
1506 mlx5_esw_for_each_vport(esw, i, vport) {
1507 attr.vport = vport->vport;
1508 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1509 fdb = mlx5_esw_vporttbl_get(esw, &attr);
1510 if (IS_ERR(fdb))
1511 goto out;
1512 }
1513 return 0;
1514
1515 out:
1516 esw_vport_tbl_put(esw);
1517 return PTR_ERR(fdb);
1518 }
1519
1520 #define fdb_modify_header_fwd_to_table_supported(esw) \
1521 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
esw_init_chains_offload_flags(struct mlx5_eswitch * esw,u32 * flags)1522 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1523 {
1524 struct mlx5_core_dev *dev = esw->dev;
1525
1526 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1527 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1528
1529 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1530 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1531 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1532 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1533 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1534 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1535 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1536 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1537 /* Disabled when ttl workaround is needed, e.g
1538 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1539 */
1540 esw_warn(dev,
1541 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1542 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1543 } else {
1544 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1545 esw_info(dev, "Supported tc chains and prios offload\n");
1546 }
1547
1548 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1549 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1550 }
1551
1552 static int
esw_chains_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * miss_fdb)1553 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1554 {
1555 struct mlx5_core_dev *dev = esw->dev;
1556 struct mlx5_flow_table *nf_ft, *ft;
1557 struct mlx5_chains_attr attr = {};
1558 struct mlx5_fs_chains *chains;
1559 int err;
1560
1561 esw_init_chains_offload_flags(esw, &attr.flags);
1562 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1563 attr.max_grp_num = esw->params.large_group_num;
1564 attr.default_ft = miss_fdb;
1565 attr.mapping = esw->offloads.reg_c0_obj_pool;
1566
1567 chains = mlx5_chains_create(dev, &attr);
1568 if (IS_ERR(chains)) {
1569 err = PTR_ERR(chains);
1570 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1571 return err;
1572 }
1573 mlx5_chains_print_info(chains);
1574
1575 esw->fdb_table.offloads.esw_chains_priv = chains;
1576
1577 /* Create tc_end_ft which is the always created ft chain */
1578 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1579 1, 0);
1580 if (IS_ERR(nf_ft)) {
1581 err = PTR_ERR(nf_ft);
1582 goto nf_ft_err;
1583 }
1584
1585 /* Always open the root for fast path */
1586 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1587 if (IS_ERR(ft)) {
1588 err = PTR_ERR(ft);
1589 goto level_0_err;
1590 }
1591
1592 /* Open level 1 for split fdb rules now if prios isn't supported */
1593 if (!mlx5_chains_prios_supported(chains)) {
1594 err = esw_vport_tbl_get(esw);
1595 if (err)
1596 goto level_1_err;
1597 }
1598
1599 mlx5_chains_set_end_ft(chains, nf_ft);
1600
1601 return 0;
1602
1603 level_1_err:
1604 mlx5_chains_put_table(chains, 0, 1, 0);
1605 level_0_err:
1606 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1607 nf_ft_err:
1608 mlx5_chains_destroy(chains);
1609 esw->fdb_table.offloads.esw_chains_priv = NULL;
1610
1611 return err;
1612 }
1613
1614 static void
esw_chains_destroy(struct mlx5_eswitch * esw,struct mlx5_fs_chains * chains)1615 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1616 {
1617 if (!mlx5_chains_prios_supported(chains))
1618 esw_vport_tbl_put(esw);
1619 mlx5_chains_put_table(chains, 0, 1, 0);
1620 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1621 mlx5_chains_destroy(chains);
1622 }
1623
1624 #else /* CONFIG_MLX5_CLS_ACT */
1625
1626 static int
esw_chains_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * miss_fdb)1627 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1628 { return 0; }
1629
1630 static void
esw_chains_destroy(struct mlx5_eswitch * esw,struct mlx5_fs_chains * chains)1631 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1632 {}
1633
1634 #endif
1635
1636 static int
esw_create_send_to_vport_group(struct mlx5_eswitch * esw,struct mlx5_flow_table * fdb,u32 * flow_group_in,int * ix)1637 esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
1638 struct mlx5_flow_table *fdb,
1639 u32 *flow_group_in,
1640 int *ix)
1641 {
1642 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1643 struct mlx5_flow_group *g;
1644 void *match_criteria;
1645 int count, err = 0;
1646
1647 memset(flow_group_in, 0, inlen);
1648
1649 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS);
1650
1651 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1652 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1653
1654 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1655 MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1656 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1657 misc_parameters.source_eswitch_owner_vhca_id);
1658 MLX5_SET(create_flow_group_in, flow_group_in,
1659 source_eswitch_owner_vhca_id_valid, 1);
1660 }
1661
1662 /* See comment at table_size calculation */
1663 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
1664 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1665 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
1666 *ix += count;
1667
1668 g = mlx5_create_flow_group(fdb, flow_group_in);
1669 if (IS_ERR(g)) {
1670 err = PTR_ERR(g);
1671 esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1672 goto out;
1673 }
1674 esw->fdb_table.offloads.send_to_vport_grp = g;
1675
1676 out:
1677 return err;
1678 }
1679
1680 static int
esw_create_meta_send_to_vport_group(struct mlx5_eswitch * esw,struct mlx5_flow_table * fdb,u32 * flow_group_in,int * ix)1681 esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
1682 struct mlx5_flow_table *fdb,
1683 u32 *flow_group_in,
1684 int *ix)
1685 {
1686 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1687 struct mlx5_flow_group *g;
1688 void *match_criteria;
1689 int err = 0;
1690
1691 if (!esw_src_port_rewrite_supported(esw))
1692 return 0;
1693
1694 memset(flow_group_in, 0, inlen);
1695
1696 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1697 MLX5_MATCH_MISC_PARAMETERS_2);
1698
1699 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1700
1701 MLX5_SET(fte_match_param, match_criteria,
1702 misc_parameters_2.metadata_reg_c_0,
1703 mlx5_eswitch_get_vport_metadata_mask());
1704 MLX5_SET(fte_match_param, match_criteria,
1705 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1706
1707 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1708 MLX5_SET(create_flow_group_in, flow_group_in,
1709 end_flow_index, *ix + esw->total_vports - 1);
1710 *ix += esw->total_vports;
1711
1712 g = mlx5_create_flow_group(fdb, flow_group_in);
1713 if (IS_ERR(g)) {
1714 err = PTR_ERR(g);
1715 esw_warn(esw->dev,
1716 "Failed to create send-to-vport meta flow group err(%d)\n", err);
1717 goto send_vport_meta_err;
1718 }
1719 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1720
1721 return 0;
1722
1723 send_vport_meta_err:
1724 return err;
1725 }
1726
1727 static int
esw_create_peer_esw_miss_group(struct mlx5_eswitch * esw,struct mlx5_flow_table * fdb,u32 * flow_group_in,int * ix)1728 esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
1729 struct mlx5_flow_table *fdb,
1730 u32 *flow_group_in,
1731 int *ix)
1732 {
1733 int max_peer_ports = (esw->total_vports - 1) * (MLX5_MAX_PORTS - 1);
1734 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1735 struct mlx5_flow_group *g;
1736 void *match_criteria;
1737 int err = 0;
1738
1739 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1740 return 0;
1741
1742 memset(flow_group_in, 0, inlen);
1743
1744 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
1745
1746 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1747 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1748 flow_group_in,
1749 match_criteria);
1750
1751 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1752 misc_parameters.source_eswitch_owner_vhca_id);
1753
1754 MLX5_SET(create_flow_group_in, flow_group_in,
1755 source_eswitch_owner_vhca_id_valid, 1);
1756 }
1757
1758 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1759 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1760 *ix + max_peer_ports);
1761 *ix += max_peer_ports + 1;
1762
1763 g = mlx5_create_flow_group(fdb, flow_group_in);
1764 if (IS_ERR(g)) {
1765 err = PTR_ERR(g);
1766 esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
1767 goto out;
1768 }
1769 esw->fdb_table.offloads.peer_miss_grp = g;
1770
1771 out:
1772 return err;
1773 }
1774
1775 static int
esw_create_miss_group(struct mlx5_eswitch * esw,struct mlx5_flow_table * fdb,u32 * flow_group_in,int * ix)1776 esw_create_miss_group(struct mlx5_eswitch *esw,
1777 struct mlx5_flow_table *fdb,
1778 u32 *flow_group_in,
1779 int *ix)
1780 {
1781 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1782 struct mlx5_flow_group *g;
1783 void *match_criteria;
1784 int err = 0;
1785 u8 *dmac;
1786
1787 memset(flow_group_in, 0, inlen);
1788
1789 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1790 MLX5_MATCH_OUTER_HEADERS);
1791 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1792 match_criteria);
1793 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1794 outer_headers.dmac_47_16);
1795 dmac[0] = 0x01;
1796
1797 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1798 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1799 *ix + MLX5_ESW_MISS_FLOWS);
1800
1801 g = mlx5_create_flow_group(fdb, flow_group_in);
1802 if (IS_ERR(g)) {
1803 err = PTR_ERR(g);
1804 esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
1805 goto miss_err;
1806 }
1807 esw->fdb_table.offloads.miss_grp = g;
1808
1809 err = esw_add_fdb_miss_rule(esw);
1810 if (err)
1811 goto miss_rule_err;
1812
1813 return 0;
1814
1815 miss_rule_err:
1816 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1817 miss_err:
1818 return err;
1819 }
1820
esw_create_offloads_fdb_tables(struct mlx5_eswitch * esw)1821 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1822 {
1823 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1824 struct mlx5_flow_table_attr ft_attr = {};
1825 struct mlx5_core_dev *dev = esw->dev;
1826 struct mlx5_flow_namespace *root_ns;
1827 struct mlx5_flow_table *fdb = NULL;
1828 int table_size, ix = 0, err = 0;
1829 u32 flags = 0, *flow_group_in;
1830
1831 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1832
1833 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1834 if (!flow_group_in)
1835 return -ENOMEM;
1836
1837 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1838 if (!root_ns) {
1839 esw_warn(dev, "Failed to get FDB flow namespace\n");
1840 err = -EOPNOTSUPP;
1841 goto ns_err;
1842 }
1843 esw->fdb_table.offloads.ns = root_ns;
1844 err = mlx5_flow_namespace_set_mode(root_ns,
1845 esw->dev->priv.steering->mode);
1846 if (err) {
1847 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1848 goto ns_err;
1849 }
1850
1851 /* To be strictly correct:
1852 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ)
1853 * should be:
1854 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1855 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ
1856 * but as the peer device might not be in switchdev mode it's not
1857 * possible. We use the fact that by default FW sets max vfs and max sfs
1858 * to the same value on both devices. If it needs to be changed in the future note
1859 * the peer miss group should also be created based on the number of
1860 * total vports of the peer (currently is also uses esw->total_vports).
1861 */
1862 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
1863 esw->total_vports * MLX5_MAX_PORTS + MLX5_ESW_MISS_FLOWS;
1864
1865 /* create the slow path fdb with encap set, so further table instances
1866 * can be created at run time while VFs are probed if the FW allows that.
1867 */
1868 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1869 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1870 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1871
1872 ft_attr.flags = flags;
1873 ft_attr.max_fte = table_size;
1874 ft_attr.prio = FDB_SLOW_PATH;
1875
1876 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1877 if (IS_ERR(fdb)) {
1878 err = PTR_ERR(fdb);
1879 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1880 goto slow_fdb_err;
1881 }
1882 esw->fdb_table.offloads.slow_fdb = fdb;
1883
1884 /* Create empty TC-miss managed table. This allows plugging in following
1885 * priorities without directly exposing their level 0 table to
1886 * eswitch_offloads and passing it as miss_fdb to following call to
1887 * esw_chains_create().
1888 */
1889 memset(&ft_attr, 0, sizeof(ft_attr));
1890 ft_attr.prio = FDB_TC_MISS;
1891 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
1892 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
1893 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
1894 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
1895 goto tc_miss_table_err;
1896 }
1897
1898 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
1899 if (err) {
1900 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
1901 goto fdb_chains_err;
1902 }
1903
1904 err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1905 if (err)
1906 goto send_vport_err;
1907
1908 err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1909 if (err)
1910 goto send_vport_meta_err;
1911
1912 err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
1913 if (err)
1914 goto peer_miss_err;
1915
1916 err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
1917 if (err)
1918 goto miss_err;
1919
1920 kvfree(flow_group_in);
1921 return 0;
1922
1923 miss_err:
1924 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1925 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1926 peer_miss_err:
1927 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1928 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1929 send_vport_meta_err:
1930 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1931 send_vport_err:
1932 esw_chains_destroy(esw, esw_chains(esw));
1933 fdb_chains_err:
1934 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1935 tc_miss_table_err:
1936 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1937 slow_fdb_err:
1938 /* Holds true only as long as DMFS is the default */
1939 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1940 ns_err:
1941 kvfree(flow_group_in);
1942 return err;
1943 }
1944
esw_destroy_offloads_fdb_tables(struct mlx5_eswitch * esw)1945 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1946 {
1947 if (!mlx5_eswitch_get_slow_fdb(esw))
1948 return;
1949
1950 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1951 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1952 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1953 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1954 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1955 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1956 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1957 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1958 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1959
1960 esw_chains_destroy(esw, esw_chains(esw));
1961
1962 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1963 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1964 /* Holds true only as long as DMFS is the default */
1965 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1966 MLX5_FLOW_STEERING_MODE_DMFS);
1967 atomic64_set(&esw->user_count, 0);
1968 }
1969
esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch * esw)1970 static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
1971 {
1972 int nvports;
1973
1974 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1975 if (mlx5e_tc_int_port_supported(esw))
1976 nvports += MLX5E_TC_MAX_INT_PORT_NUM;
1977
1978 return nvports;
1979 }
1980
esw_create_offloads_table(struct mlx5_eswitch * esw)1981 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1982 {
1983 struct mlx5_flow_table_attr ft_attr = {};
1984 struct mlx5_core_dev *dev = esw->dev;
1985 struct mlx5_flow_table *ft_offloads;
1986 struct mlx5_flow_namespace *ns;
1987 int err = 0;
1988
1989 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1990 if (!ns) {
1991 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1992 return -EOPNOTSUPP;
1993 }
1994
1995 ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
1996 MLX5_ESW_FT_OFFLOADS_DROP_RULE;
1997 ft_attr.prio = 1;
1998
1999 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
2000 if (IS_ERR(ft_offloads)) {
2001 err = PTR_ERR(ft_offloads);
2002 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
2003 return err;
2004 }
2005
2006 esw->offloads.ft_offloads = ft_offloads;
2007 return 0;
2008 }
2009
esw_destroy_offloads_table(struct mlx5_eswitch * esw)2010 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
2011 {
2012 struct mlx5_esw_offload *offloads = &esw->offloads;
2013
2014 mlx5_destroy_flow_table(offloads->ft_offloads);
2015 }
2016
esw_create_vport_rx_group(struct mlx5_eswitch * esw)2017 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
2018 {
2019 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2020 struct mlx5_flow_group *g;
2021 u32 *flow_group_in;
2022 int nvports;
2023 int err = 0;
2024
2025 nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
2026 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2027 if (!flow_group_in)
2028 return -ENOMEM;
2029
2030 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
2031
2032 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2033 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
2034
2035 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
2036
2037 if (IS_ERR(g)) {
2038 err = PTR_ERR(g);
2039 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
2040 goto out;
2041 }
2042
2043 esw->offloads.vport_rx_group = g;
2044 out:
2045 kvfree(flow_group_in);
2046 return err;
2047 }
2048
esw_destroy_vport_rx_group(struct mlx5_eswitch * esw)2049 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
2050 {
2051 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
2052 }
2053
esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch * esw)2054 static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
2055 {
2056 /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
2057 * for the drop rule, which is placed at the end of the table.
2058 * So return the total of vport and int_port as rule index.
2059 */
2060 return esw_get_nr_ft_offloads_steering_src_ports(esw);
2061 }
2062
esw_create_vport_rx_drop_group(struct mlx5_eswitch * esw)2063 static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
2064 {
2065 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2066 struct mlx5_flow_group *g;
2067 u32 *flow_group_in;
2068 int flow_index;
2069 int err = 0;
2070
2071 flow_index = esw_create_vport_rx_drop_rule_index(esw);
2072
2073 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2074 if (!flow_group_in)
2075 return -ENOMEM;
2076
2077 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
2078 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
2079
2080 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
2081
2082 if (IS_ERR(g)) {
2083 err = PTR_ERR(g);
2084 mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
2085 goto out;
2086 }
2087
2088 esw->offloads.vport_rx_drop_group = g;
2089 out:
2090 kvfree(flow_group_in);
2091 return err;
2092 }
2093
esw_destroy_vport_rx_drop_group(struct mlx5_eswitch * esw)2094 static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
2095 {
2096 if (esw->offloads.vport_rx_drop_group)
2097 mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
2098 }
2099
2100 void
mlx5_esw_set_spec_source_port(struct mlx5_eswitch * esw,u16 vport,struct mlx5_flow_spec * spec)2101 mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
2102 u16 vport,
2103 struct mlx5_flow_spec *spec)
2104 {
2105 void *misc;
2106
2107 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2108 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
2109 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2110 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
2111
2112 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
2113 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2114 mlx5_eswitch_get_vport_metadata_mask());
2115
2116 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
2117 } else {
2118 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
2119 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
2120
2121 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2122 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2123
2124 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2125 }
2126 }
2127
2128 struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch * esw,u16 vport,struct mlx5_flow_destination * dest)2129 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
2130 struct mlx5_flow_destination *dest)
2131 {
2132 struct mlx5_flow_act flow_act = {0};
2133 struct mlx5_flow_handle *flow_rule;
2134 struct mlx5_flow_spec *spec;
2135
2136 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2137 if (!spec) {
2138 flow_rule = ERR_PTR(-ENOMEM);
2139 goto out;
2140 }
2141
2142 mlx5_esw_set_spec_source_port(esw, vport, spec);
2143
2144 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2145 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
2146 &flow_act, dest, 1);
2147 if (IS_ERR(flow_rule)) {
2148 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
2149 goto out;
2150 }
2151
2152 out:
2153 kvfree(spec);
2154 return flow_rule;
2155 }
2156
esw_create_vport_rx_drop_rule(struct mlx5_eswitch * esw)2157 static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
2158 {
2159 struct mlx5_flow_act flow_act = {};
2160 struct mlx5_flow_handle *flow_rule;
2161
2162 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
2163 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
2164 &flow_act, NULL, 0);
2165 if (IS_ERR(flow_rule)) {
2166 esw_warn(esw->dev,
2167 "fs offloads: Failed to add vport rx drop rule err %ld\n",
2168 PTR_ERR(flow_rule));
2169 return PTR_ERR(flow_rule);
2170 }
2171
2172 esw->offloads.vport_rx_drop_rule = flow_rule;
2173
2174 return 0;
2175 }
2176
esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch * esw)2177 static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
2178 {
2179 if (esw->offloads.vport_rx_drop_rule)
2180 mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
2181 }
2182
mlx5_eswitch_inline_mode_get(struct mlx5_eswitch * esw,u8 * mode)2183 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
2184 {
2185 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2186 struct mlx5_core_dev *dev = esw->dev;
2187 struct mlx5_vport *vport;
2188 unsigned long i;
2189
2190 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2191 return -EOPNOTSUPP;
2192
2193 if (!mlx5_esw_is_fdb_created(esw))
2194 return -EOPNOTSUPP;
2195
2196 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2197 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2198 mlx5_mode = MLX5_INLINE_MODE_NONE;
2199 goto out;
2200 case MLX5_CAP_INLINE_MODE_L2:
2201 mlx5_mode = MLX5_INLINE_MODE_L2;
2202 goto out;
2203 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2204 goto query_vports;
2205 }
2206
2207 query_vports:
2208 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2209 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
2210 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
2211 if (prev_mlx5_mode != mlx5_mode)
2212 return -EINVAL;
2213 prev_mlx5_mode = mlx5_mode;
2214 }
2215
2216 out:
2217 *mode = mlx5_mode;
2218 return 0;
2219 }
2220
esw_destroy_restore_table(struct mlx5_eswitch * esw)2221 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
2222 {
2223 struct mlx5_esw_offload *offloads = &esw->offloads;
2224
2225 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2226 return;
2227
2228 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
2229 mlx5_destroy_flow_group(offloads->restore_group);
2230 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
2231 }
2232
esw_create_restore_table(struct mlx5_eswitch * esw)2233 static int esw_create_restore_table(struct mlx5_eswitch *esw)
2234 {
2235 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
2236 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2237 struct mlx5_flow_table_attr ft_attr = {};
2238 struct mlx5_core_dev *dev = esw->dev;
2239 struct mlx5_flow_namespace *ns;
2240 struct mlx5_modify_hdr *mod_hdr;
2241 void *match_criteria, *misc;
2242 struct mlx5_flow_table *ft;
2243 struct mlx5_flow_group *g;
2244 u32 *flow_group_in;
2245 int err = 0;
2246
2247 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2248 return 0;
2249
2250 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2251 if (!ns) {
2252 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2253 return -EOPNOTSUPP;
2254 }
2255
2256 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2257 if (!flow_group_in) {
2258 err = -ENOMEM;
2259 goto out_free;
2260 }
2261
2262 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
2263 ft = mlx5_create_flow_table(ns, &ft_attr);
2264 if (IS_ERR(ft)) {
2265 err = PTR_ERR(ft);
2266 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2267 err);
2268 goto out_free;
2269 }
2270
2271 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2272 match_criteria);
2273 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
2274 misc_parameters_2);
2275
2276 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2277 ESW_REG_C0_USER_DATA_METADATA_MASK);
2278 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2279 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2280 ft_attr.max_fte - 1);
2281 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2282 MLX5_MATCH_MISC_PARAMETERS_2);
2283 g = mlx5_create_flow_group(ft, flow_group_in);
2284 if (IS_ERR(g)) {
2285 err = PTR_ERR(g);
2286 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
2287 err);
2288 goto err_group;
2289 }
2290
2291 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
2292 MLX5_SET(copy_action_in, modact, src_field,
2293 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
2294 MLX5_SET(copy_action_in, modact, dst_field,
2295 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
2296 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2297 MLX5_FLOW_NAMESPACE_KERNEL, 1,
2298 modact);
2299 if (IS_ERR(mod_hdr)) {
2300 err = PTR_ERR(mod_hdr);
2301 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
2302 err);
2303 goto err_mod_hdr;
2304 }
2305
2306 esw->offloads.ft_offloads_restore = ft;
2307 esw->offloads.restore_group = g;
2308 esw->offloads.restore_copy_hdr_id = mod_hdr;
2309
2310 kvfree(flow_group_in);
2311
2312 return 0;
2313
2314 err_mod_hdr:
2315 mlx5_destroy_flow_group(g);
2316 err_group:
2317 mlx5_destroy_flow_table(ft);
2318 out_free:
2319 kvfree(flow_group_in);
2320
2321 return err;
2322 }
2323
esw_offloads_start(struct mlx5_eswitch * esw,struct netlink_ext_ack * extack)2324 static int esw_offloads_start(struct mlx5_eswitch *esw,
2325 struct netlink_ext_ack *extack)
2326 {
2327 int err;
2328
2329 esw->mode = MLX5_ESWITCH_OFFLOADS;
2330 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
2331 if (err) {
2332 NL_SET_ERR_MSG_MOD(extack,
2333 "Failed setting eswitch to offloads");
2334 esw->mode = MLX5_ESWITCH_LEGACY;
2335 mlx5_rescan_drivers(esw->dev);
2336 return err;
2337 }
2338 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2339 if (mlx5_eswitch_inline_mode_get(esw,
2340 &esw->offloads.inline_mode)) {
2341 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
2342 NL_SET_ERR_MSG_MOD(extack,
2343 "Inline mode is different between vports");
2344 }
2345 }
2346 return 0;
2347 }
2348
mlx5_esw_offloads_rep_init(struct mlx5_eswitch * esw,const struct mlx5_vport * vport)2349 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
2350 {
2351 struct mlx5_eswitch_rep *rep;
2352 int rep_type;
2353 int err;
2354
2355 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
2356 if (!rep)
2357 return -ENOMEM;
2358
2359 rep->vport = vport->vport;
2360 rep->vport_index = vport->index;
2361 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2362 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2363
2364 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
2365 if (err)
2366 goto insert_err;
2367
2368 return 0;
2369
2370 insert_err:
2371 kfree(rep);
2372 return err;
2373 }
2374
mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep)2375 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
2376 struct mlx5_eswitch_rep *rep)
2377 {
2378 xa_erase(&esw->offloads.vport_reps, rep->vport);
2379 kfree(rep);
2380 }
2381
esw_offloads_cleanup_reps(struct mlx5_eswitch * esw)2382 static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2383 {
2384 struct mlx5_eswitch_rep *rep;
2385 unsigned long i;
2386
2387 mlx5_esw_for_each_rep(esw, i, rep)
2388 mlx5_esw_offloads_rep_cleanup(esw, rep);
2389 xa_destroy(&esw->offloads.vport_reps);
2390 }
2391
esw_offloads_init_reps(struct mlx5_eswitch * esw)2392 static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2393 {
2394 struct mlx5_vport *vport;
2395 unsigned long i;
2396 int err;
2397
2398 xa_init(&esw->offloads.vport_reps);
2399
2400 mlx5_esw_for_each_vport(esw, i, vport) {
2401 err = mlx5_esw_offloads_rep_init(esw, vport);
2402 if (err)
2403 goto err;
2404 }
2405 return 0;
2406
2407 err:
2408 esw_offloads_cleanup_reps(esw);
2409 return err;
2410 }
2411
esw_port_metadata_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)2412 static int esw_port_metadata_set(struct devlink *devlink, u32 id,
2413 struct devlink_param_gset_ctx *ctx)
2414 {
2415 struct mlx5_core_dev *dev = devlink_priv(devlink);
2416 struct mlx5_eswitch *esw = dev->priv.eswitch;
2417 int err = 0;
2418
2419 down_write(&esw->mode_lock);
2420 if (mlx5_esw_is_fdb_created(esw)) {
2421 err = -EBUSY;
2422 goto done;
2423 }
2424 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2425 err = -EOPNOTSUPP;
2426 goto done;
2427 }
2428 if (ctx->val.vbool)
2429 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2430 else
2431 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2432 done:
2433 up_write(&esw->mode_lock);
2434 return err;
2435 }
2436
esw_port_metadata_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)2437 static int esw_port_metadata_get(struct devlink *devlink, u32 id,
2438 struct devlink_param_gset_ctx *ctx)
2439 {
2440 struct mlx5_core_dev *dev = devlink_priv(devlink);
2441
2442 ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
2443 return 0;
2444 }
2445
esw_port_metadata_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)2446 static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
2447 union devlink_param_value val,
2448 struct netlink_ext_ack *extack)
2449 {
2450 struct mlx5_core_dev *dev = devlink_priv(devlink);
2451 u8 esw_mode;
2452
2453 esw_mode = mlx5_eswitch_mode(dev);
2454 if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
2455 NL_SET_ERR_MSG_MOD(extack,
2456 "E-Switch must either disabled or non switchdev mode");
2457 return -EBUSY;
2458 }
2459 return 0;
2460 }
2461
2462 static const struct devlink_param esw_devlink_params[] = {
2463 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
2464 "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
2465 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
2466 esw_port_metadata_get,
2467 esw_port_metadata_set,
2468 esw_port_metadata_validate),
2469 };
2470
esw_offloads_init(struct mlx5_eswitch * esw)2471 int esw_offloads_init(struct mlx5_eswitch *esw)
2472 {
2473 int err;
2474
2475 err = esw_offloads_init_reps(esw);
2476 if (err)
2477 return err;
2478
2479 if (MLX5_ESWITCH_MANAGER(esw->dev) &&
2480 mlx5_esw_vport_match_metadata_supported(esw))
2481 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2482
2483 err = devl_params_register(priv_to_devlink(esw->dev),
2484 esw_devlink_params,
2485 ARRAY_SIZE(esw_devlink_params));
2486 if (err)
2487 goto err_params;
2488
2489 return 0;
2490
2491 err_params:
2492 esw_offloads_cleanup_reps(esw);
2493 return err;
2494 }
2495
esw_offloads_cleanup(struct mlx5_eswitch * esw)2496 void esw_offloads_cleanup(struct mlx5_eswitch *esw)
2497 {
2498 devl_params_unregister(priv_to_devlink(esw->dev),
2499 esw_devlink_params,
2500 ARRAY_SIZE(esw_devlink_params));
2501 esw_offloads_cleanup_reps(esw);
2502 }
2503
__esw_offloads_load_rep(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,u8 rep_type)2504 static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
2505 struct mlx5_eswitch_rep *rep, u8 rep_type)
2506 {
2507 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2508 REP_REGISTERED, REP_LOADED) == REP_REGISTERED)
2509 return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2510
2511 return 0;
2512 }
2513
__esw_offloads_unload_rep(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,u8 rep_type)2514 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2515 struct mlx5_eswitch_rep *rep, u8 rep_type)
2516 {
2517 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2518 REP_LOADED, REP_REGISTERED) == REP_LOADED)
2519 esw->offloads.rep_ops[rep_type]->unload(rep);
2520 }
2521
__unload_reps_all_vport(struct mlx5_eswitch * esw,u8 rep_type)2522 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
2523 {
2524 struct mlx5_eswitch_rep *rep;
2525 unsigned long i;
2526
2527 mlx5_esw_for_each_rep(esw, i, rep)
2528 __esw_offloads_unload_rep(esw, rep, rep_type);
2529 }
2530
mlx5_esw_offloads_rep_load(struct mlx5_eswitch * esw,u16 vport_num)2531 static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
2532 {
2533 struct mlx5_eswitch_rep *rep;
2534 int rep_type;
2535 int err;
2536
2537 rep = mlx5_eswitch_get_rep(esw, vport_num);
2538 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
2539 err = __esw_offloads_load_rep(esw, rep, rep_type);
2540 if (err)
2541 goto err_reps;
2542 }
2543
2544 return 0;
2545
2546 err_reps:
2547 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2548 for (--rep_type; rep_type >= 0; rep_type--)
2549 __esw_offloads_unload_rep(esw, rep, rep_type);
2550 return err;
2551 }
2552
mlx5_esw_offloads_rep_unload(struct mlx5_eswitch * esw,u16 vport_num)2553 static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
2554 {
2555 struct mlx5_eswitch_rep *rep;
2556 int rep_type;
2557
2558 rep = mlx5_eswitch_get_rep(esw, vport_num);
2559 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2560 __esw_offloads_unload_rep(esw, rep, rep_type);
2561 }
2562
mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)2563 int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2564 {
2565 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2566 return 0;
2567
2568 return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport);
2569 }
2570
mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)2571 void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2572 {
2573 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2574 return;
2575
2576 mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport);
2577 }
2578
mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport,struct mlx5_devlink_port * dl_port,u32 controller,u32 sfnum)2579 int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
2580 struct mlx5_devlink_port *dl_port,
2581 u32 controller, u32 sfnum)
2582 {
2583 return mlx5_esw_offloads_sf_devlink_port_init(esw, vport, dl_port, controller, sfnum);
2584 }
2585
mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)2586 void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2587 {
2588 mlx5_esw_offloads_sf_devlink_port_cleanup(esw, vport);
2589 }
2590
mlx5_esw_offloads_load_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)2591 int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2592 {
2593 int err;
2594
2595 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2596 return 0;
2597
2598 err = mlx5_esw_offloads_devlink_port_register(esw, vport);
2599 if (err)
2600 return err;
2601
2602 err = mlx5_esw_offloads_rep_load(esw, vport->vport);
2603 if (err)
2604 goto load_err;
2605 return err;
2606
2607 load_err:
2608 mlx5_esw_offloads_devlink_port_unregister(esw, vport);
2609 return err;
2610 }
2611
mlx5_esw_offloads_unload_rep(struct mlx5_eswitch * esw,struct mlx5_vport * vport)2612 void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2613 {
2614 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2615 return;
2616
2617 mlx5_esw_offloads_rep_unload(esw, vport->vport);
2618
2619 mlx5_esw_offloads_devlink_port_unregister(esw, vport);
2620 }
2621
esw_set_slave_root_fdb(struct mlx5_core_dev * master,struct mlx5_core_dev * slave)2622 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
2623 struct mlx5_core_dev *slave)
2624 {
2625 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
2626 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
2627 struct mlx5_flow_root_namespace *root;
2628 struct mlx5_flow_namespace *ns;
2629 int err;
2630
2631 MLX5_SET(set_flow_table_root_in, in, opcode,
2632 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
2633 MLX5_SET(set_flow_table_root_in, in, table_type,
2634 FS_FT_FDB);
2635
2636 if (master) {
2637 ns = mlx5_get_flow_namespace(master,
2638 MLX5_FLOW_NAMESPACE_FDB);
2639 root = find_root(&ns->node);
2640 mutex_lock(&root->chain_lock);
2641 MLX5_SET(set_flow_table_root_in, in,
2642 table_eswitch_owner_vhca_id_valid, 1);
2643 MLX5_SET(set_flow_table_root_in, in,
2644 table_eswitch_owner_vhca_id,
2645 MLX5_CAP_GEN(master, vhca_id));
2646 MLX5_SET(set_flow_table_root_in, in, table_id,
2647 root->root_ft->id);
2648 } else {
2649 ns = mlx5_get_flow_namespace(slave,
2650 MLX5_FLOW_NAMESPACE_FDB);
2651 root = find_root(&ns->node);
2652 mutex_lock(&root->chain_lock);
2653 MLX5_SET(set_flow_table_root_in, in, table_id,
2654 root->root_ft->id);
2655 }
2656
2657 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
2658 mutex_unlock(&root->chain_lock);
2659
2660 return err;
2661 }
2662
__esw_set_master_egress_rule(struct mlx5_core_dev * master,struct mlx5_core_dev * slave,struct mlx5_vport * vport,struct mlx5_flow_table * acl)2663 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
2664 struct mlx5_core_dev *slave,
2665 struct mlx5_vport *vport,
2666 struct mlx5_flow_table *acl)
2667 {
2668 u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
2669 struct mlx5_flow_handle *flow_rule = NULL;
2670 struct mlx5_flow_destination dest = {};
2671 struct mlx5_flow_act flow_act = {};
2672 struct mlx5_flow_spec *spec;
2673 int err = 0;
2674 void *misc;
2675
2676 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2677 if (!spec)
2678 return -ENOMEM;
2679
2680 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2681 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2682 misc_parameters);
2683 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2684 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, slave_index);
2685
2686 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2687 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2688 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
2689 source_eswitch_owner_vhca_id);
2690
2691 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2692 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2693 dest.vport.num = slave->priv.eswitch->manager_vport;
2694 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id);
2695 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
2696
2697 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
2698 &dest, 1);
2699 if (IS_ERR(flow_rule)) {
2700 err = PTR_ERR(flow_rule);
2701 } else {
2702 err = xa_insert(&vport->egress.offloads.bounce_rules,
2703 slave_index, flow_rule, GFP_KERNEL);
2704 if (err)
2705 mlx5_del_flow_rules(flow_rule);
2706 }
2707
2708 kvfree(spec);
2709 return err;
2710 }
2711
esw_master_egress_create_resources(struct mlx5_eswitch * esw,struct mlx5_flow_namespace * egress_ns,struct mlx5_vport * vport,size_t count)2712 static int esw_master_egress_create_resources(struct mlx5_eswitch *esw,
2713 struct mlx5_flow_namespace *egress_ns,
2714 struct mlx5_vport *vport, size_t count)
2715 {
2716 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2717 struct mlx5_flow_table_attr ft_attr = {
2718 .max_fte = count, .prio = 0, .level = 0,
2719 };
2720 struct mlx5_flow_table *acl;
2721 struct mlx5_flow_group *g;
2722 void *match_criteria;
2723 u32 *flow_group_in;
2724 int err;
2725
2726 if (vport->egress.acl)
2727 return 0;
2728
2729 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2730 if (!flow_group_in)
2731 return -ENOMEM;
2732
2733 if (vport->vport || mlx5_core_is_ecpf(esw->dev))
2734 ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
2735
2736 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
2737 if (IS_ERR(acl)) {
2738 err = PTR_ERR(acl);
2739 goto out;
2740 }
2741
2742 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2743 match_criteria);
2744 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2745 misc_parameters.source_port);
2746 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2747 misc_parameters.source_eswitch_owner_vhca_id);
2748 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2749 MLX5_MATCH_MISC_PARAMETERS);
2750
2751 MLX5_SET(create_flow_group_in, flow_group_in,
2752 source_eswitch_owner_vhca_id_valid, 1);
2753 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2754 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, count);
2755
2756 g = mlx5_create_flow_group(acl, flow_group_in);
2757 if (IS_ERR(g)) {
2758 err = PTR_ERR(g);
2759 goto err_group;
2760 }
2761
2762 vport->egress.acl = acl;
2763 vport->egress.offloads.bounce_grp = g;
2764 vport->egress.type = VPORT_EGRESS_ACL_TYPE_SHARED_FDB;
2765 xa_init_flags(&vport->egress.offloads.bounce_rules, XA_FLAGS_ALLOC);
2766
2767 kvfree(flow_group_in);
2768
2769 return 0;
2770
2771 err_group:
2772 mlx5_destroy_flow_table(acl);
2773 out:
2774 kvfree(flow_group_in);
2775 return err;
2776 }
2777
esw_master_egress_destroy_resources(struct mlx5_vport * vport)2778 static void esw_master_egress_destroy_resources(struct mlx5_vport *vport)
2779 {
2780 if (!xa_empty(&vport->egress.offloads.bounce_rules))
2781 return;
2782 mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
2783 vport->egress.offloads.bounce_grp = NULL;
2784 mlx5_destroy_flow_table(vport->egress.acl);
2785 vport->egress.acl = NULL;
2786 }
2787
esw_set_master_egress_rule(struct mlx5_core_dev * master,struct mlx5_core_dev * slave,size_t count)2788 static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
2789 struct mlx5_core_dev *slave, size_t count)
2790 {
2791 struct mlx5_eswitch *esw = master->priv.eswitch;
2792 u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
2793 struct mlx5_flow_namespace *egress_ns;
2794 struct mlx5_vport *vport;
2795 int err;
2796
2797 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
2798 if (IS_ERR(vport))
2799 return PTR_ERR(vport);
2800
2801 egress_ns = mlx5_get_flow_vport_acl_namespace(master,
2802 MLX5_FLOW_NAMESPACE_ESW_EGRESS,
2803 vport->index);
2804 if (!egress_ns)
2805 return -EINVAL;
2806
2807 if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB)
2808 return 0;
2809
2810 err = esw_master_egress_create_resources(esw, egress_ns, vport, count);
2811 if (err)
2812 return err;
2813
2814 if (xa_load(&vport->egress.offloads.bounce_rules, slave_index))
2815 return -EINVAL;
2816
2817 err = __esw_set_master_egress_rule(master, slave, vport, vport->egress.acl);
2818 if (err)
2819 goto err_rule;
2820
2821 return 0;
2822
2823 err_rule:
2824 esw_master_egress_destroy_resources(vport);
2825 return err;
2826 }
2827
esw_unset_master_egress_rule(struct mlx5_core_dev * dev,struct mlx5_core_dev * slave_dev)2828 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev,
2829 struct mlx5_core_dev *slave_dev)
2830 {
2831 struct mlx5_vport *vport;
2832
2833 vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
2834 dev->priv.eswitch->manager_vport);
2835
2836 esw_acl_egress_ofld_bounce_rule_destroy(vport, MLX5_CAP_GEN(slave_dev, vhca_id));
2837
2838 if (xa_empty(&vport->egress.offloads.bounce_rules)) {
2839 esw_acl_egress_ofld_cleanup(vport);
2840 xa_destroy(&vport->egress.offloads.bounce_rules);
2841 }
2842 }
2843
mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw,int max_slaves)2844 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
2845 struct mlx5_eswitch *slave_esw, int max_slaves)
2846 {
2847 int err;
2848
2849 err = esw_set_slave_root_fdb(master_esw->dev,
2850 slave_esw->dev);
2851 if (err)
2852 return err;
2853
2854 err = esw_set_master_egress_rule(master_esw->dev,
2855 slave_esw->dev, max_slaves);
2856 if (err)
2857 goto err_acl;
2858
2859 return err;
2860
2861 err_acl:
2862 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2863 return err;
2864 }
2865
mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw)2866 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
2867 struct mlx5_eswitch *slave_esw)
2868 {
2869 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2870 esw_unset_master_egress_rule(master_esw->dev, slave_esw->dev);
2871 }
2872
2873 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
2874 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2875
mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw)2876 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw,
2877 struct mlx5_eswitch *peer_esw)
2878 {
2879 const struct mlx5_eswitch_rep_ops *ops;
2880 struct mlx5_eswitch_rep *rep;
2881 unsigned long i;
2882 u8 rep_type;
2883
2884 mlx5_esw_for_each_rep(esw, i, rep) {
2885 rep_type = NUM_REP_TYPES;
2886 while (rep_type--) {
2887 ops = esw->offloads.rep_ops[rep_type];
2888 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2889 ops->event)
2890 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, peer_esw);
2891 }
2892 }
2893 }
2894
mlx5_esw_offloads_unpair(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw)2895 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw,
2896 struct mlx5_eswitch *peer_esw)
2897 {
2898 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
2899 mlx5e_tc_clean_fdb_peer_flows(esw);
2900 #endif
2901 mlx5_esw_offloads_rep_event_unpair(esw, peer_esw);
2902 esw_del_fdb_peer_miss_rules(esw, peer_esw->dev);
2903 }
2904
mlx5_esw_offloads_pair(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw)2905 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2906 struct mlx5_eswitch *peer_esw)
2907 {
2908 const struct mlx5_eswitch_rep_ops *ops;
2909 struct mlx5_eswitch_rep *rep;
2910 unsigned long i;
2911 u8 rep_type;
2912 int err;
2913
2914 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2915 if (err)
2916 return err;
2917
2918 mlx5_esw_for_each_rep(esw, i, rep) {
2919 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
2920 ops = esw->offloads.rep_ops[rep_type];
2921 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2922 ops->event) {
2923 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
2924 if (err)
2925 goto err_out;
2926 }
2927 }
2928 }
2929
2930 return 0;
2931
2932 err_out:
2933 mlx5_esw_offloads_unpair(esw, peer_esw);
2934 return err;
2935 }
2936
mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch * esw,struct mlx5_eswitch * peer_esw,bool pair)2937 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2938 struct mlx5_eswitch *peer_esw,
2939 bool pair)
2940 {
2941 u16 peer_vhca_id = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
2942 u16 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
2943 struct mlx5_flow_root_namespace *peer_ns;
2944 struct mlx5_flow_root_namespace *ns;
2945 int err;
2946
2947 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2948 ns = esw->dev->priv.steering->fdb_root_ns;
2949
2950 if (pair) {
2951 err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_vhca_id);
2952 if (err)
2953 return err;
2954
2955 err = mlx5_flow_namespace_set_peer(peer_ns, ns, vhca_id);
2956 if (err) {
2957 mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
2958 return err;
2959 }
2960 } else {
2961 mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
2962 mlx5_flow_namespace_set_peer(peer_ns, NULL, vhca_id);
2963 }
2964
2965 return 0;
2966 }
2967
mlx5_esw_offloads_devcom_event(int event,void * my_data,void * event_data)2968 static int mlx5_esw_offloads_devcom_event(int event,
2969 void *my_data,
2970 void *event_data)
2971 {
2972 struct mlx5_eswitch *esw = my_data;
2973 struct mlx5_eswitch *peer_esw = event_data;
2974 u16 esw_i, peer_esw_i;
2975 bool esw_paired;
2976 int err;
2977
2978 peer_esw_i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
2979 esw_i = MLX5_CAP_GEN(esw->dev, vhca_id);
2980 esw_paired = !!xa_load(&esw->paired, peer_esw_i);
2981
2982 switch (event) {
2983 case ESW_OFFLOADS_DEVCOM_PAIR:
2984 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2985 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2986 break;
2987
2988 if (esw_paired)
2989 break;
2990
2991 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
2992 if (err)
2993 goto err_out;
2994
2995 err = mlx5_esw_offloads_pair(esw, peer_esw);
2996 if (err)
2997 goto err_peer;
2998
2999 err = mlx5_esw_offloads_pair(peer_esw, esw);
3000 if (err)
3001 goto err_pair;
3002
3003 err = xa_insert(&esw->paired, peer_esw_i, peer_esw, GFP_KERNEL);
3004 if (err)
3005 goto err_xa;
3006
3007 err = xa_insert(&peer_esw->paired, esw_i, esw, GFP_KERNEL);
3008 if (err)
3009 goto err_peer_xa;
3010
3011 esw->num_peers++;
3012 peer_esw->num_peers++;
3013 mlx5_devcom_comp_set_ready(esw->devcom, true);
3014 break;
3015
3016 case ESW_OFFLOADS_DEVCOM_UNPAIR:
3017 if (!esw_paired)
3018 break;
3019
3020 peer_esw->num_peers--;
3021 esw->num_peers--;
3022 if (!esw->num_peers && !peer_esw->num_peers)
3023 mlx5_devcom_comp_set_ready(esw->devcom, false);
3024 xa_erase(&peer_esw->paired, esw_i);
3025 xa_erase(&esw->paired, peer_esw_i);
3026 mlx5_esw_offloads_unpair(peer_esw, esw);
3027 mlx5_esw_offloads_unpair(esw, peer_esw);
3028 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
3029 break;
3030 }
3031
3032 return 0;
3033
3034 err_peer_xa:
3035 xa_erase(&esw->paired, peer_esw_i);
3036 err_xa:
3037 mlx5_esw_offloads_unpair(peer_esw, esw);
3038 err_pair:
3039 mlx5_esw_offloads_unpair(esw, peer_esw);
3040 err_peer:
3041 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
3042 err_out:
3043 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
3044 event, err);
3045 return err;
3046 }
3047
mlx5_esw_offloads_devcom_init(struct mlx5_eswitch * esw,u64 key)3048 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
3049 {
3050 int i;
3051
3052 for (i = 0; i < MLX5_MAX_PORTS; i++)
3053 INIT_LIST_HEAD(&esw->offloads.peer_flows[i]);
3054 mutex_init(&esw->offloads.peer_mutex);
3055
3056 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
3057 return;
3058
3059 if ((MLX5_VPORT_MANAGER(esw->dev) || mlx5_core_is_ecpf_esw_manager(esw->dev)) &&
3060 !mlx5_lag_is_supported(esw->dev))
3061 return;
3062
3063 xa_init(&esw->paired);
3064 esw->num_peers = 0;
3065 esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
3066 MLX5_DEVCOM_ESW_OFFLOADS,
3067 key,
3068 mlx5_esw_offloads_devcom_event,
3069 esw);
3070 if (IS_ERR_OR_NULL(esw->devcom))
3071 return;
3072
3073 mlx5_devcom_send_event(esw->devcom,
3074 ESW_OFFLOADS_DEVCOM_PAIR,
3075 ESW_OFFLOADS_DEVCOM_UNPAIR,
3076 esw);
3077 }
3078
mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch * esw)3079 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
3080 {
3081 if (IS_ERR_OR_NULL(esw->devcom))
3082 return;
3083
3084 mlx5_devcom_send_event(esw->devcom,
3085 ESW_OFFLOADS_DEVCOM_UNPAIR,
3086 ESW_OFFLOADS_DEVCOM_UNPAIR,
3087 esw);
3088
3089 mlx5_devcom_unregister_component(esw->devcom);
3090 xa_destroy(&esw->paired);
3091 esw->devcom = NULL;
3092 }
3093
mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch * esw)3094 bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw)
3095 {
3096 return mlx5_devcom_comp_is_ready(esw->devcom);
3097 }
3098
mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch * esw)3099 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
3100 {
3101 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
3102 return false;
3103
3104 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
3105 MLX5_FDB_TO_VPORT_REG_C_0))
3106 return false;
3107
3108 return true;
3109 }
3110
3111 #define MLX5_ESW_METADATA_RSVD_UPLINK 1
3112
3113 /* Share the same metadata for uplink's. This is fine because:
3114 * (a) In shared FDB mode (LAG) both uplink's are treated the
3115 * same and tagged with the same metadata.
3116 * (b) In non shared FDB mode, packets from physical port0
3117 * cannot hit eswitch of PF1 and vice versa.
3118 */
mlx5_esw_match_metadata_reserved(struct mlx5_eswitch * esw)3119 static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
3120 {
3121 return MLX5_ESW_METADATA_RSVD_UPLINK;
3122 }
3123
mlx5_esw_match_metadata_alloc(struct mlx5_eswitch * esw)3124 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
3125 {
3126 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
3127 /* Reserve 0xf for internal port offload */
3128 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
3129 u32 pf_num;
3130 int id;
3131
3132 /* Only 4 bits of pf_num */
3133 pf_num = mlx5_get_dev_index(esw->dev);
3134 if (pf_num > max_pf_num)
3135 return 0;
3136
3137 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
3138 /* Use only non-zero vport_id (2-4095) for all PF's */
3139 id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
3140 MLX5_ESW_METADATA_RSVD_UPLINK + 1,
3141 vport_end_ida, GFP_KERNEL);
3142 if (id < 0)
3143 return 0;
3144 id = (pf_num << ESW_VPORT_BITS) | id;
3145 return id;
3146 }
3147
mlx5_esw_match_metadata_free(struct mlx5_eswitch * esw,u32 metadata)3148 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
3149 {
3150 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
3151
3152 /* Metadata contains only 12 bits of actual ida id */
3153 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
3154 }
3155
esw_offloads_vport_metadata_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)3156 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
3157 struct mlx5_vport *vport)
3158 {
3159 if (vport->vport == MLX5_VPORT_UPLINK)
3160 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
3161 else
3162 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
3163
3164 vport->metadata = vport->default_metadata;
3165 return vport->metadata ? 0 : -ENOSPC;
3166 }
3167
esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)3168 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
3169 struct mlx5_vport *vport)
3170 {
3171 if (!vport->default_metadata)
3172 return;
3173
3174 if (vport->vport == MLX5_VPORT_UPLINK)
3175 return;
3176
3177 WARN_ON(vport->metadata != vport->default_metadata);
3178 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
3179 }
3180
esw_offloads_metadata_uninit(struct mlx5_eswitch * esw)3181 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
3182 {
3183 struct mlx5_vport *vport;
3184 unsigned long i;
3185
3186 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
3187 return;
3188
3189 mlx5_esw_for_each_vport(esw, i, vport)
3190 esw_offloads_vport_metadata_cleanup(esw, vport);
3191 }
3192
esw_offloads_metadata_init(struct mlx5_eswitch * esw)3193 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
3194 {
3195 struct mlx5_vport *vport;
3196 unsigned long i;
3197 int err;
3198
3199 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
3200 return 0;
3201
3202 mlx5_esw_for_each_vport(esw, i, vport) {
3203 err = esw_offloads_vport_metadata_setup(esw, vport);
3204 if (err)
3205 goto metadata_err;
3206 }
3207
3208 return 0;
3209
3210 metadata_err:
3211 esw_offloads_metadata_uninit(esw);
3212 return err;
3213 }
3214
3215 int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch * esw,struct mlx5_vport * vport)3216 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
3217 struct mlx5_vport *vport)
3218 {
3219 int err;
3220
3221 err = esw_acl_ingress_ofld_setup(esw, vport);
3222 if (err)
3223 return err;
3224
3225 err = esw_acl_egress_ofld_setup(esw, vport);
3226 if (err)
3227 goto egress_err;
3228
3229 return 0;
3230
3231 egress_err:
3232 esw_acl_ingress_ofld_cleanup(esw, vport);
3233 return err;
3234 }
3235
3236 void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch * esw,struct mlx5_vport * vport)3237 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
3238 struct mlx5_vport *vport)
3239 {
3240 esw_acl_egress_ofld_cleanup(vport);
3241 esw_acl_ingress_ofld_cleanup(esw, vport);
3242 }
3243
esw_create_offloads_acl_tables(struct mlx5_eswitch * esw)3244 static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
3245 {
3246 struct mlx5_vport *uplink, *manager;
3247 int ret;
3248
3249 uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3250 if (IS_ERR(uplink))
3251 return PTR_ERR(uplink);
3252
3253 ret = esw_vport_create_offloads_acl_tables(esw, uplink);
3254 if (ret)
3255 return ret;
3256
3257 manager = mlx5_eswitch_get_vport(esw, esw->manager_vport);
3258 if (IS_ERR(manager)) {
3259 ret = PTR_ERR(manager);
3260 goto err_manager;
3261 }
3262
3263 ret = esw_vport_create_offloads_acl_tables(esw, manager);
3264 if (ret)
3265 goto err_manager;
3266
3267 return 0;
3268
3269 err_manager:
3270 esw_vport_destroy_offloads_acl_tables(esw, uplink);
3271 return ret;
3272 }
3273
esw_destroy_offloads_acl_tables(struct mlx5_eswitch * esw)3274 static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
3275 {
3276 struct mlx5_vport *vport;
3277
3278 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
3279 if (!IS_ERR(vport))
3280 esw_vport_destroy_offloads_acl_tables(esw, vport);
3281
3282 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3283 if (!IS_ERR(vport))
3284 esw_vport_destroy_offloads_acl_tables(esw, vport);
3285 }
3286
mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch * esw)3287 int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
3288 {
3289 struct mlx5_eswitch_rep *rep;
3290 unsigned long i;
3291 int ret;
3292
3293 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
3294 return 0;
3295
3296 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3297 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
3298 return 0;
3299
3300 ret = __esw_offloads_load_rep(esw, rep, REP_IB);
3301 if (ret)
3302 return ret;
3303
3304 mlx5_esw_for_each_rep(esw, i, rep) {
3305 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
3306 __esw_offloads_load_rep(esw, rep, REP_IB);
3307 }
3308
3309 return 0;
3310 }
3311
esw_offloads_steering_init(struct mlx5_eswitch * esw)3312 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
3313 {
3314 struct mlx5_esw_indir_table *indir;
3315 int err;
3316
3317 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
3318 mutex_init(&esw->fdb_table.offloads.vports.lock);
3319 hash_init(esw->fdb_table.offloads.vports.table);
3320 atomic64_set(&esw->user_count, 0);
3321
3322 indir = mlx5_esw_indir_table_init();
3323 if (IS_ERR(indir)) {
3324 err = PTR_ERR(indir);
3325 goto create_indir_err;
3326 }
3327 esw->fdb_table.offloads.indir = indir;
3328
3329 err = esw_create_offloads_acl_tables(esw);
3330 if (err)
3331 goto create_acl_err;
3332
3333 err = esw_create_offloads_table(esw);
3334 if (err)
3335 goto create_offloads_err;
3336
3337 err = esw_create_restore_table(esw);
3338 if (err)
3339 goto create_restore_err;
3340
3341 err = esw_create_offloads_fdb_tables(esw);
3342 if (err)
3343 goto create_fdb_err;
3344
3345 err = esw_create_vport_rx_group(esw);
3346 if (err)
3347 goto create_fg_err;
3348
3349 err = esw_create_vport_rx_drop_group(esw);
3350 if (err)
3351 goto create_rx_drop_fg_err;
3352
3353 err = esw_create_vport_rx_drop_rule(esw);
3354 if (err)
3355 goto create_rx_drop_rule_err;
3356
3357 return 0;
3358
3359 create_rx_drop_rule_err:
3360 esw_destroy_vport_rx_drop_group(esw);
3361 create_rx_drop_fg_err:
3362 esw_destroy_vport_rx_group(esw);
3363 create_fg_err:
3364 esw_destroy_offloads_fdb_tables(esw);
3365 create_fdb_err:
3366 esw_destroy_restore_table(esw);
3367 create_restore_err:
3368 esw_destroy_offloads_table(esw);
3369 create_offloads_err:
3370 esw_destroy_offloads_acl_tables(esw);
3371 create_acl_err:
3372 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3373 create_indir_err:
3374 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3375 return err;
3376 }
3377
esw_offloads_steering_cleanup(struct mlx5_eswitch * esw)3378 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
3379 {
3380 esw_destroy_vport_rx_drop_rule(esw);
3381 esw_destroy_vport_rx_drop_group(esw);
3382 esw_destroy_vport_rx_group(esw);
3383 esw_destroy_offloads_fdb_tables(esw);
3384 esw_destroy_restore_table(esw);
3385 esw_destroy_offloads_table(esw);
3386 esw_destroy_offloads_acl_tables(esw);
3387 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3388 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3389 }
3390
3391 static void
esw_vfs_changed_event_handler(struct mlx5_eswitch * esw,const u32 * out)3392 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
3393 {
3394 struct devlink *devlink;
3395 bool host_pf_disabled;
3396 u16 new_num_vfs;
3397
3398 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
3399 host_params_context.host_num_of_vfs);
3400 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
3401 host_params_context.host_pf_disabled);
3402
3403 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
3404 return;
3405
3406 devlink = priv_to_devlink(esw->dev);
3407 devl_lock(devlink);
3408 /* Number of VFs can only change from "0 to x" or "x to 0". */
3409 if (esw->esw_funcs.num_vfs > 0) {
3410 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
3411 } else {
3412 int err;
3413
3414 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
3415 MLX5_VPORT_UC_ADDR_CHANGE);
3416 if (err) {
3417 devl_unlock(devlink);
3418 return;
3419 }
3420 }
3421 esw->esw_funcs.num_vfs = new_num_vfs;
3422 devl_unlock(devlink);
3423 }
3424
esw_functions_changed_event_handler(struct work_struct * work)3425 static void esw_functions_changed_event_handler(struct work_struct *work)
3426 {
3427 struct mlx5_host_work *host_work;
3428 struct mlx5_eswitch *esw;
3429 const u32 *out;
3430
3431 host_work = container_of(work, struct mlx5_host_work, work);
3432 esw = host_work->esw;
3433
3434 out = mlx5_esw_query_functions(esw->dev);
3435 if (IS_ERR(out))
3436 goto out;
3437
3438 esw_vfs_changed_event_handler(esw, out);
3439 kvfree(out);
3440 out:
3441 kfree(host_work);
3442 }
3443
mlx5_esw_funcs_changed_handler(struct notifier_block * nb,unsigned long type,void * data)3444 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
3445 {
3446 struct mlx5_esw_functions *esw_funcs;
3447 struct mlx5_host_work *host_work;
3448 struct mlx5_eswitch *esw;
3449
3450 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
3451 if (!host_work)
3452 return NOTIFY_DONE;
3453
3454 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
3455 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
3456
3457 host_work->esw = esw;
3458
3459 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
3460 queue_work(esw->work_queue, &host_work->work);
3461
3462 return NOTIFY_OK;
3463 }
3464
mlx5_esw_host_number_init(struct mlx5_eswitch * esw)3465 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
3466 {
3467 const u32 *query_host_out;
3468
3469 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3470 return 0;
3471
3472 query_host_out = mlx5_esw_query_functions(esw->dev);
3473 if (IS_ERR(query_host_out))
3474 return PTR_ERR(query_host_out);
3475
3476 /* Mark non local controller with non zero controller number. */
3477 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
3478 host_params_context.host_number);
3479 kvfree(query_host_out);
3480 return 0;
3481 }
3482
mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch * esw,u32 controller)3483 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
3484 {
3485 /* Local controller is always valid */
3486 if (controller == 0)
3487 return true;
3488
3489 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3490 return false;
3491
3492 /* External host number starts with zero in device */
3493 return (controller == esw->offloads.host_number + 1);
3494 }
3495
esw_offloads_enable(struct mlx5_eswitch * esw)3496 int esw_offloads_enable(struct mlx5_eswitch *esw)
3497 {
3498 struct mapping_ctx *reg_c0_obj_pool;
3499 struct mlx5_vport *vport;
3500 unsigned long i;
3501 u64 mapping_id;
3502 int err;
3503
3504 mutex_init(&esw->offloads.termtbl_mutex);
3505 mlx5_rdma_enable_roce(esw->dev);
3506
3507 err = mlx5_esw_host_number_init(esw);
3508 if (err)
3509 goto err_metadata;
3510
3511 err = esw_offloads_metadata_init(esw);
3512 if (err)
3513 goto err_metadata;
3514
3515 err = esw_set_passing_vport_metadata(esw, true);
3516 if (err)
3517 goto err_vport_metadata;
3518
3519 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
3520
3521 reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
3522 sizeof(struct mlx5_mapped_obj),
3523 ESW_REG_C0_USER_DATA_METADATA_MASK,
3524 true);
3525
3526 if (IS_ERR(reg_c0_obj_pool)) {
3527 err = PTR_ERR(reg_c0_obj_pool);
3528 goto err_pool;
3529 }
3530 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
3531
3532 err = esw_offloads_steering_init(esw);
3533 if (err)
3534 goto err_steering_init;
3535
3536 /* Representor will control the vport link state */
3537 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
3538 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3539 if (mlx5_core_ec_sriov_enabled(esw->dev))
3540 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs)
3541 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3542
3543 /* Uplink vport rep must load first. */
3544 err = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3545 if (err)
3546 goto err_uplink;
3547
3548 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
3549 if (err)
3550 goto err_vports;
3551
3552 return 0;
3553
3554 err_vports:
3555 mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
3556 err_uplink:
3557 esw_offloads_steering_cleanup(esw);
3558 err_steering_init:
3559 mapping_destroy(reg_c0_obj_pool);
3560 err_pool:
3561 esw_set_passing_vport_metadata(esw, false);
3562 err_vport_metadata:
3563 esw_offloads_metadata_uninit(esw);
3564 err_metadata:
3565 mlx5_rdma_disable_roce(esw->dev);
3566 mutex_destroy(&esw->offloads.termtbl_mutex);
3567 return err;
3568 }
3569
esw_offloads_stop(struct mlx5_eswitch * esw,struct netlink_ext_ack * extack)3570 static int esw_offloads_stop(struct mlx5_eswitch *esw,
3571 struct netlink_ext_ack *extack)
3572 {
3573 int err;
3574
3575 esw->mode = MLX5_ESWITCH_LEGACY;
3576
3577 /* If changing from switchdev to legacy mode without sriov enabled,
3578 * no need to create legacy fdb.
3579 */
3580 if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev))
3581 return 0;
3582
3583 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
3584 if (err)
3585 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
3586
3587 return err;
3588 }
3589
esw_offloads_disable(struct mlx5_eswitch * esw)3590 void esw_offloads_disable(struct mlx5_eswitch *esw)
3591 {
3592 mlx5_eswitch_disable_pf_vf_vports(esw);
3593 mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
3594 esw_set_passing_vport_metadata(esw, false);
3595 esw_offloads_steering_cleanup(esw);
3596 mapping_destroy(esw->offloads.reg_c0_obj_pool);
3597 esw_offloads_metadata_uninit(esw);
3598 mlx5_rdma_disable_roce(esw->dev);
3599 mutex_destroy(&esw->offloads.termtbl_mutex);
3600 }
3601
esw_mode_from_devlink(u16 mode,u16 * mlx5_mode)3602 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
3603 {
3604 switch (mode) {
3605 case DEVLINK_ESWITCH_MODE_LEGACY:
3606 *mlx5_mode = MLX5_ESWITCH_LEGACY;
3607 break;
3608 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3609 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
3610 break;
3611 default:
3612 return -EINVAL;
3613 }
3614
3615 return 0;
3616 }
3617
esw_mode_to_devlink(u16 mlx5_mode,u16 * mode)3618 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
3619 {
3620 switch (mlx5_mode) {
3621 case MLX5_ESWITCH_LEGACY:
3622 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
3623 break;
3624 case MLX5_ESWITCH_OFFLOADS:
3625 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
3626 break;
3627 default:
3628 return -EINVAL;
3629 }
3630
3631 return 0;
3632 }
3633
esw_inline_mode_from_devlink(u8 mode,u8 * mlx5_mode)3634 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
3635 {
3636 switch (mode) {
3637 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
3638 *mlx5_mode = MLX5_INLINE_MODE_NONE;
3639 break;
3640 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
3641 *mlx5_mode = MLX5_INLINE_MODE_L2;
3642 break;
3643 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
3644 *mlx5_mode = MLX5_INLINE_MODE_IP;
3645 break;
3646 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
3647 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
3648 break;
3649 default:
3650 return -EINVAL;
3651 }
3652
3653 return 0;
3654 }
3655
esw_inline_mode_to_devlink(u8 mlx5_mode,u8 * mode)3656 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
3657 {
3658 switch (mlx5_mode) {
3659 case MLX5_INLINE_MODE_NONE:
3660 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
3661 break;
3662 case MLX5_INLINE_MODE_L2:
3663 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
3664 break;
3665 case MLX5_INLINE_MODE_IP:
3666 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
3667 break;
3668 case MLX5_INLINE_MODE_TCP_UDP:
3669 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
3670 break;
3671 default:
3672 return -EINVAL;
3673 }
3674
3675 return 0;
3676 }
3677
mlx5_eswitch_block_mode(struct mlx5_core_dev * dev)3678 int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
3679 {
3680 struct mlx5_eswitch *esw = dev->priv.eswitch;
3681 int err;
3682
3683 if (!mlx5_esw_allowed(esw))
3684 return 0;
3685
3686 /* Take TC into account */
3687 err = mlx5_esw_try_lock(esw);
3688 if (err < 0)
3689 return err;
3690
3691 esw->offloads.num_block_mode++;
3692 mlx5_esw_unlock(esw);
3693 return 0;
3694 }
3695
mlx5_eswitch_unblock_mode(struct mlx5_core_dev * dev)3696 void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
3697 {
3698 struct mlx5_eswitch *esw = dev->priv.eswitch;
3699
3700 if (!mlx5_esw_allowed(esw))
3701 return;
3702
3703 down_write(&esw->mode_lock);
3704 esw->offloads.num_block_mode--;
3705 up_write(&esw->mode_lock);
3706 }
3707
mlx5_devlink_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)3708 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
3709 struct netlink_ext_ack *extack)
3710 {
3711 u16 cur_mlx5_mode, mlx5_mode = 0;
3712 struct mlx5_eswitch *esw;
3713 int err = 0;
3714
3715 esw = mlx5_devlink_eswitch_get(devlink);
3716 if (IS_ERR(esw))
3717 return PTR_ERR(esw);
3718
3719 if (esw_mode_from_devlink(mode, &mlx5_mode))
3720 return -EINVAL;
3721
3722 mlx5_lag_disable_change(esw->dev);
3723 err = mlx5_esw_try_lock(esw);
3724 if (err < 0) {
3725 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
3726 goto enable_lag;
3727 }
3728 cur_mlx5_mode = err;
3729 err = 0;
3730
3731 if (cur_mlx5_mode == mlx5_mode)
3732 goto unlock;
3733
3734 if (esw->offloads.num_block_mode) {
3735 NL_SET_ERR_MSG_MOD(extack,
3736 "Can't change eswitch mode when IPsec SA and/or policies are configured");
3737 err = -EOPNOTSUPP;
3738 goto unlock;
3739 }
3740
3741 esw->eswitch_operation_in_progress = true;
3742 up_write(&esw->mode_lock);
3743
3744 mlx5_eswitch_disable_locked(esw);
3745 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
3746 if (mlx5_devlink_trap_get_num_active(esw->dev)) {
3747 NL_SET_ERR_MSG_MOD(extack,
3748 "Can't change mode while devlink traps are active");
3749 err = -EOPNOTSUPP;
3750 goto skip;
3751 }
3752 err = esw_offloads_start(esw, extack);
3753 } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
3754 err = esw_offloads_stop(esw, extack);
3755 mlx5_rescan_drivers(esw->dev);
3756 } else {
3757 err = -EINVAL;
3758 }
3759
3760 skip:
3761 down_write(&esw->mode_lock);
3762 esw->eswitch_operation_in_progress = false;
3763 unlock:
3764 mlx5_esw_unlock(esw);
3765 enable_lag:
3766 mlx5_lag_enable_change(esw->dev);
3767 return err;
3768 }
3769
mlx5_devlink_eswitch_mode_get(struct devlink * devlink,u16 * mode)3770 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3771 {
3772 struct mlx5_eswitch *esw;
3773
3774 esw = mlx5_devlink_eswitch_get(devlink);
3775 if (IS_ERR(esw))
3776 return PTR_ERR(esw);
3777
3778 return esw_mode_to_devlink(esw->mode, mode);
3779 }
3780
mlx5_esw_vports_inline_set(struct mlx5_eswitch * esw,u8 mlx5_mode,struct netlink_ext_ack * extack)3781 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
3782 struct netlink_ext_ack *extack)
3783 {
3784 struct mlx5_core_dev *dev = esw->dev;
3785 struct mlx5_vport *vport;
3786 u16 err_vport_num = 0;
3787 unsigned long i;
3788 int err = 0;
3789
3790 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3791 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3792 if (err) {
3793 err_vport_num = vport->vport;
3794 NL_SET_ERR_MSG_MOD(extack,
3795 "Failed to set min inline on vport");
3796 goto revert_inline_mode;
3797 }
3798 }
3799 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
3800 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
3801 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3802 if (err) {
3803 err_vport_num = vport->vport;
3804 NL_SET_ERR_MSG_MOD(extack,
3805 "Failed to set min inline on vport");
3806 goto revert_ec_vf_inline_mode;
3807 }
3808 }
3809 }
3810 return 0;
3811
3812 revert_ec_vf_inline_mode:
3813 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
3814 if (vport->vport == err_vport_num)
3815 break;
3816 mlx5_modify_nic_vport_min_inline(dev,
3817 vport->vport,
3818 esw->offloads.inline_mode);
3819 }
3820 revert_inline_mode:
3821 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3822 if (vport->vport == err_vport_num)
3823 break;
3824 mlx5_modify_nic_vport_min_inline(dev,
3825 vport->vport,
3826 esw->offloads.inline_mode);
3827 }
3828 return err;
3829 }
3830
mlx5_devlink_eswitch_inline_mode_set(struct devlink * devlink,u8 mode,struct netlink_ext_ack * extack)3831 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3832 struct netlink_ext_ack *extack)
3833 {
3834 struct mlx5_core_dev *dev = devlink_priv(devlink);
3835 struct mlx5_eswitch *esw;
3836 u8 mlx5_mode;
3837 int err;
3838
3839 esw = mlx5_devlink_eswitch_get(devlink);
3840 if (IS_ERR(esw))
3841 return PTR_ERR(esw);
3842
3843 down_write(&esw->mode_lock);
3844
3845 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
3846 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
3847 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
3848 err = 0;
3849 goto out;
3850 }
3851
3852 fallthrough;
3853 case MLX5_CAP_INLINE_MODE_L2:
3854 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
3855 err = -EOPNOTSUPP;
3856 goto out;
3857 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3858 break;
3859 }
3860
3861 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3862 NL_SET_ERR_MSG_MOD(extack,
3863 "Can't set inline mode when flows are configured");
3864 err = -EOPNOTSUPP;
3865 goto out;
3866 }
3867
3868 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
3869 if (err)
3870 goto out;
3871
3872 esw->eswitch_operation_in_progress = true;
3873 up_write(&esw->mode_lock);
3874
3875 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
3876 if (!err)
3877 esw->offloads.inline_mode = mlx5_mode;
3878
3879 down_write(&esw->mode_lock);
3880 esw->eswitch_operation_in_progress = false;
3881 up_write(&esw->mode_lock);
3882 return 0;
3883
3884 out:
3885 up_write(&esw->mode_lock);
3886 return err;
3887 }
3888
mlx5_devlink_eswitch_inline_mode_get(struct devlink * devlink,u8 * mode)3889 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3890 {
3891 struct mlx5_eswitch *esw;
3892
3893 esw = mlx5_devlink_eswitch_get(devlink);
3894 if (IS_ERR(esw))
3895 return PTR_ERR(esw);
3896
3897 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3898 }
3899
mlx5_eswitch_block_encap(struct mlx5_core_dev * dev)3900 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
3901 {
3902 struct mlx5_eswitch *esw = dev->priv.eswitch;
3903
3904 if (!mlx5_esw_allowed(esw))
3905 return true;
3906
3907 down_write(&esw->mode_lock);
3908 if (esw->mode != MLX5_ESWITCH_LEGACY &&
3909 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
3910 up_write(&esw->mode_lock);
3911 return false;
3912 }
3913
3914 esw->offloads.num_block_encap++;
3915 up_write(&esw->mode_lock);
3916 return true;
3917 }
3918
mlx5_eswitch_unblock_encap(struct mlx5_core_dev * dev)3919 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
3920 {
3921 struct mlx5_eswitch *esw = dev->priv.eswitch;
3922
3923 if (!mlx5_esw_allowed(esw))
3924 return;
3925
3926 down_write(&esw->mode_lock);
3927 esw->offloads.num_block_encap--;
3928 up_write(&esw->mode_lock);
3929 }
3930
mlx5_devlink_eswitch_encap_mode_set(struct devlink * devlink,enum devlink_eswitch_encap_mode encap,struct netlink_ext_ack * extack)3931 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3932 enum devlink_eswitch_encap_mode encap,
3933 struct netlink_ext_ack *extack)
3934 {
3935 struct mlx5_core_dev *dev = devlink_priv(devlink);
3936 struct mlx5_eswitch *esw;
3937 int err = 0;
3938
3939 esw = mlx5_devlink_eswitch_get(devlink);
3940 if (IS_ERR(esw))
3941 return PTR_ERR(esw);
3942
3943 down_write(&esw->mode_lock);
3944
3945 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
3946 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
3947 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
3948 err = -EOPNOTSUPP;
3949 goto unlock;
3950 }
3951
3952 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
3953 err = -EOPNOTSUPP;
3954 goto unlock;
3955 }
3956
3957 if (esw->mode == MLX5_ESWITCH_LEGACY) {
3958 esw->offloads.encap = encap;
3959 goto unlock;
3960 }
3961
3962 if (esw->offloads.encap == encap)
3963 goto unlock;
3964
3965 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3966 NL_SET_ERR_MSG_MOD(extack,
3967 "Can't set encapsulation when flows are configured");
3968 err = -EOPNOTSUPP;
3969 goto unlock;
3970 }
3971
3972 if (esw->offloads.num_block_encap) {
3973 NL_SET_ERR_MSG_MOD(extack,
3974 "Can't set encapsulation when IPsec SA and/or policies are configured");
3975 err = -EOPNOTSUPP;
3976 goto unlock;
3977 }
3978
3979 esw->eswitch_operation_in_progress = true;
3980 up_write(&esw->mode_lock);
3981
3982 esw_destroy_offloads_fdb_tables(esw);
3983
3984 esw->offloads.encap = encap;
3985
3986 err = esw_create_offloads_fdb_tables(esw);
3987
3988 if (err) {
3989 NL_SET_ERR_MSG_MOD(extack,
3990 "Failed re-creating fast FDB table");
3991 esw->offloads.encap = !encap;
3992 (void)esw_create_offloads_fdb_tables(esw);
3993 }
3994
3995 down_write(&esw->mode_lock);
3996 esw->eswitch_operation_in_progress = false;
3997
3998 unlock:
3999 up_write(&esw->mode_lock);
4000 return err;
4001 }
4002
mlx5_devlink_eswitch_encap_mode_get(struct devlink * devlink,enum devlink_eswitch_encap_mode * encap)4003 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
4004 enum devlink_eswitch_encap_mode *encap)
4005 {
4006 struct mlx5_eswitch *esw;
4007
4008 esw = mlx5_devlink_eswitch_get(devlink);
4009 if (IS_ERR(esw))
4010 return PTR_ERR(esw);
4011
4012 *encap = esw->offloads.encap;
4013 return 0;
4014 }
4015
4016 static bool
mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch * esw,u16 vport_num)4017 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
4018 {
4019 /* Currently, only ECPF based device has representor for host PF. */
4020 if (vport_num == MLX5_VPORT_PF &&
4021 !mlx5_core_is_ecpf_esw_manager(esw->dev))
4022 return false;
4023
4024 if (vport_num == MLX5_VPORT_ECPF &&
4025 !mlx5_ecpf_vport_exists(esw->dev))
4026 return false;
4027
4028 return true;
4029 }
4030
mlx5_eswitch_register_vport_reps(struct mlx5_eswitch * esw,const struct mlx5_eswitch_rep_ops * ops,u8 rep_type)4031 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
4032 const struct mlx5_eswitch_rep_ops *ops,
4033 u8 rep_type)
4034 {
4035 struct mlx5_eswitch_rep_data *rep_data;
4036 struct mlx5_eswitch_rep *rep;
4037 unsigned long i;
4038
4039 esw->offloads.rep_ops[rep_type] = ops;
4040 mlx5_esw_for_each_rep(esw, i, rep) {
4041 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
4042 rep->esw = esw;
4043 rep_data = &rep->rep_data[rep_type];
4044 atomic_set(&rep_data->state, REP_REGISTERED);
4045 }
4046 }
4047 }
4048 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
4049
mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch * esw,u8 rep_type)4050 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
4051 {
4052 struct mlx5_eswitch_rep *rep;
4053 unsigned long i;
4054
4055 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
4056 __unload_reps_all_vport(esw, rep_type);
4057
4058 mlx5_esw_for_each_rep(esw, i, rep)
4059 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
4060 }
4061 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
4062
mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch * esw,u8 rep_type)4063 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
4064 {
4065 struct mlx5_eswitch_rep *rep;
4066
4067 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
4068 return rep->rep_data[rep_type].priv;
4069 }
4070
mlx5_eswitch_get_proto_dev(struct mlx5_eswitch * esw,u16 vport,u8 rep_type)4071 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
4072 u16 vport,
4073 u8 rep_type)
4074 {
4075 struct mlx5_eswitch_rep *rep;
4076
4077 rep = mlx5_eswitch_get_rep(esw, vport);
4078
4079 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
4080 esw->offloads.rep_ops[rep_type]->get_proto_dev)
4081 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
4082 return NULL;
4083 }
4084 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
4085
mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch * esw,u8 rep_type)4086 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
4087 {
4088 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
4089 }
4090 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
4091
mlx5_eswitch_vport_rep(struct mlx5_eswitch * esw,u16 vport)4092 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
4093 u16 vport)
4094 {
4095 return mlx5_eswitch_get_rep(esw, vport);
4096 }
4097 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
4098
mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch * esw)4099 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
4100 {
4101 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
4102 }
4103 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
4104
mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch * esw)4105 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
4106 {
4107 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
4108 }
4109 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
4110
mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch * esw,u16 vport_num)4111 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
4112 u16 vport_num)
4113 {
4114 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
4115
4116 if (WARN_ON_ONCE(IS_ERR(vport)))
4117 return 0;
4118
4119 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
4120 }
4121 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
4122
mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch * esw,u16 vport_num,u16 * vhca_id)4123 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
4124 {
4125 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4126 void *query_ctx;
4127 void *hca_caps;
4128 int err;
4129
4130 *vhca_id = 0;
4131
4132 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4133 if (!query_ctx)
4134 return -ENOMEM;
4135
4136 err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
4137 if (err)
4138 goto out_free;
4139
4140 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4141 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
4142
4143 out_free:
4144 kfree(query_ctx);
4145 return err;
4146 }
4147
mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch * esw,u16 vport_num)4148 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
4149 {
4150 u16 *old_entry, *vhca_map_entry, vhca_id;
4151 int err;
4152
4153 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
4154 if (err) {
4155 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
4156 vport_num, err);
4157 return err;
4158 }
4159
4160 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
4161 if (!vhca_map_entry)
4162 return -ENOMEM;
4163
4164 *vhca_map_entry = vport_num;
4165 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
4166 if (xa_is_err(old_entry)) {
4167 kfree(vhca_map_entry);
4168 return xa_err(old_entry);
4169 }
4170 kfree(old_entry);
4171 return 0;
4172 }
4173
mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch * esw,u16 vport_num)4174 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
4175 {
4176 u16 *vhca_map_entry, vhca_id;
4177 int err;
4178
4179 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
4180 if (err)
4181 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
4182 vport_num, err);
4183
4184 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
4185 kfree(vhca_map_entry);
4186 }
4187
mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch * esw,u16 vhca_id,u16 * vport_num)4188 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
4189 {
4190 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
4191
4192 if (!res)
4193 return -ENOENT;
4194
4195 *vport_num = *res;
4196 return 0;
4197 }
4198
mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch * esw,u16 vport_num)4199 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
4200 u16 vport_num)
4201 {
4202 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
4203
4204 if (WARN_ON_ONCE(IS_ERR(vport)))
4205 return 0;
4206
4207 return vport->metadata;
4208 }
4209 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
4210
mlx5_devlink_port_fn_hw_addr_get(struct devlink_port * port,u8 * hw_addr,int * hw_addr_len,struct netlink_ext_ack * extack)4211 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
4212 u8 *hw_addr, int *hw_addr_len,
4213 struct netlink_ext_ack *extack)
4214 {
4215 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4216 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4217
4218 mutex_lock(&esw->state_lock);
4219 ether_addr_copy(hw_addr, vport->info.mac);
4220 *hw_addr_len = ETH_ALEN;
4221 mutex_unlock(&esw->state_lock);
4222 return 0;
4223 }
4224
mlx5_devlink_port_fn_hw_addr_set(struct devlink_port * port,const u8 * hw_addr,int hw_addr_len,struct netlink_ext_ack * extack)4225 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
4226 const u8 *hw_addr, int hw_addr_len,
4227 struct netlink_ext_ack *extack)
4228 {
4229 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4230 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4231
4232 return mlx5_eswitch_set_vport_mac(esw, vport->vport, hw_addr);
4233 }
4234
mlx5_devlink_port_fn_migratable_get(struct devlink_port * port,bool * is_enabled,struct netlink_ext_ack * extack)4235 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
4236 struct netlink_ext_ack *extack)
4237 {
4238 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4239 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4240
4241 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4242 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4243 return -EOPNOTSUPP;
4244 }
4245
4246 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4247 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
4248 return -EOPNOTSUPP;
4249 }
4250
4251 mutex_lock(&esw->state_lock);
4252 *is_enabled = vport->info.mig_enabled;
4253 mutex_unlock(&esw->state_lock);
4254 return 0;
4255 }
4256
mlx5_devlink_port_fn_migratable_set(struct devlink_port * port,bool enable,struct netlink_ext_ack * extack)4257 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
4258 struct netlink_ext_ack *extack)
4259 {
4260 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4261 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4262 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4263 void *query_ctx;
4264 void *hca_caps;
4265 int err;
4266
4267 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4268 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4269 return -EOPNOTSUPP;
4270 }
4271
4272 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4273 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
4274 return -EOPNOTSUPP;
4275 }
4276
4277 mutex_lock(&esw->state_lock);
4278
4279 if (vport->info.mig_enabled == enable) {
4280 err = 0;
4281 goto out;
4282 }
4283
4284 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4285 if (!query_ctx) {
4286 err = -ENOMEM;
4287 goto out;
4288 }
4289
4290 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
4291 MLX5_CAP_GENERAL_2);
4292 if (err) {
4293 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4294 goto out_free;
4295 }
4296
4297 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4298 MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, enable);
4299
4300 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
4301 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
4302 if (err) {
4303 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap");
4304 goto out_free;
4305 }
4306
4307 vport->info.mig_enabled = enable;
4308
4309 out_free:
4310 kfree(query_ctx);
4311 out:
4312 mutex_unlock(&esw->state_lock);
4313 return err;
4314 }
4315
mlx5_devlink_port_fn_roce_get(struct devlink_port * port,bool * is_enabled,struct netlink_ext_ack * extack)4316 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
4317 struct netlink_ext_ack *extack)
4318 {
4319 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4320 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4321
4322 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4323 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
4324 return -EOPNOTSUPP;
4325 }
4326
4327 mutex_lock(&esw->state_lock);
4328 *is_enabled = vport->info.roce_enabled;
4329 mutex_unlock(&esw->state_lock);
4330 return 0;
4331 }
4332
mlx5_devlink_port_fn_roce_set(struct devlink_port * port,bool enable,struct netlink_ext_ack * extack)4333 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
4334 struct netlink_ext_ack *extack)
4335 {
4336 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4337 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
4338 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4339 u16 vport_num = vport->vport;
4340 void *query_ctx;
4341 void *hca_caps;
4342 int err;
4343
4344 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4345 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
4346 return -EOPNOTSUPP;
4347 }
4348
4349 mutex_lock(&esw->state_lock);
4350
4351 if (vport->info.roce_enabled == enable) {
4352 err = 0;
4353 goto out;
4354 }
4355
4356 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4357 if (!query_ctx) {
4358 err = -ENOMEM;
4359 goto out;
4360 }
4361
4362 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
4363 MLX5_CAP_GENERAL);
4364 if (err) {
4365 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4366 goto out_free;
4367 }
4368
4369 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4370 MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
4371
4372 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
4373 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
4374 if (err) {
4375 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
4376 goto out_free;
4377 }
4378
4379 vport->info.roce_enabled = enable;
4380
4381 out_free:
4382 kfree(query_ctx);
4383 out:
4384 mutex_unlock(&esw->state_lock);
4385 return err;
4386 }
4387
4388 int
mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_esw_flow_attr * esw_attr,int attr_idx)4389 mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
4390 struct mlx5_esw_flow_attr *esw_attr, int attr_idx)
4391 {
4392 struct mlx5_flow_destination new_dest = {};
4393 struct mlx5_flow_destination old_dest = {};
4394
4395 if (!esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
4396 return 0;
4397
4398 esw_setup_dest_fwd_ipsec(&old_dest, NULL, esw, esw_attr, attr_idx, 0, false);
4399 esw_setup_dest_fwd_vport(&new_dest, NULL, esw, esw_attr, attr_idx, 0, false);
4400
4401 return mlx5_modify_rule_destination(rule, &new_dest, &old_dest);
4402 }
4403
4404 #ifdef CONFIG_XFRM_OFFLOAD
mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port * port,bool * is_enabled,struct netlink_ext_ack * extack)4405 int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
4406 struct netlink_ext_ack *extack)
4407 {
4408 struct mlx5_eswitch *esw;
4409 struct mlx5_vport *vport;
4410 int err = 0;
4411
4412 esw = mlx5_devlink_eswitch_get(port->devlink);
4413 if (IS_ERR(esw))
4414 return PTR_ERR(esw);
4415
4416 if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
4417 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPSec crypto");
4418 return -EOPNOTSUPP;
4419 }
4420
4421 vport = mlx5_devlink_port_vport_get(port);
4422
4423 mutex_lock(&esw->state_lock);
4424 if (!vport->enabled) {
4425 err = -EOPNOTSUPP;
4426 goto unlock;
4427 }
4428
4429 *is_enabled = vport->info.ipsec_crypto_enabled;
4430 unlock:
4431 mutex_unlock(&esw->state_lock);
4432 return err;
4433 }
4434
mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port * port,bool enable,struct netlink_ext_ack * extack)4435 int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
4436 struct netlink_ext_ack *extack)
4437 {
4438 struct mlx5_eswitch *esw;
4439 struct mlx5_vport *vport;
4440 u16 vport_num;
4441 int err;
4442
4443 esw = mlx5_devlink_eswitch_get(port->devlink);
4444 if (IS_ERR(esw))
4445 return PTR_ERR(esw);
4446
4447 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4448 err = mlx5_esw_ipsec_vf_crypto_offload_supported(esw->dev, vport_num);
4449 if (err) {
4450 NL_SET_ERR_MSG_MOD(extack,
4451 "Device doesn't support IPsec crypto");
4452 return err;
4453 }
4454
4455 vport = mlx5_devlink_port_vport_get(port);
4456
4457 mutex_lock(&esw->state_lock);
4458 if (!vport->enabled) {
4459 err = -EOPNOTSUPP;
4460 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4461 goto unlock;
4462 }
4463
4464 if (vport->info.ipsec_crypto_enabled == enable)
4465 goto unlock;
4466
4467 if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
4468 err = -EBUSY;
4469 goto unlock;
4470 }
4471
4472 err = mlx5_esw_ipsec_vf_crypto_offload_set(esw, vport, enable);
4473 if (err) {
4474 NL_SET_ERR_MSG_MOD(extack, "Failed to set IPsec crypto");
4475 goto unlock;
4476 }
4477
4478 vport->info.ipsec_crypto_enabled = enable;
4479 if (enable)
4480 esw->enabled_ipsec_vf_count++;
4481 else
4482 esw->enabled_ipsec_vf_count--;
4483 unlock:
4484 mutex_unlock(&esw->state_lock);
4485 return err;
4486 }
4487
mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port * port,bool * is_enabled,struct netlink_ext_ack * extack)4488 int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
4489 struct netlink_ext_ack *extack)
4490 {
4491 struct mlx5_eswitch *esw;
4492 struct mlx5_vport *vport;
4493 int err = 0;
4494
4495 esw = mlx5_devlink_eswitch_get(port->devlink);
4496 if (IS_ERR(esw))
4497 return PTR_ERR(esw);
4498
4499 if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
4500 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet");
4501 return -EOPNOTSUPP;
4502 }
4503
4504 vport = mlx5_devlink_port_vport_get(port);
4505
4506 mutex_lock(&esw->state_lock);
4507 if (!vport->enabled) {
4508 err = -EOPNOTSUPP;
4509 goto unlock;
4510 }
4511
4512 *is_enabled = vport->info.ipsec_packet_enabled;
4513 unlock:
4514 mutex_unlock(&esw->state_lock);
4515 return err;
4516 }
4517
mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port * port,bool enable,struct netlink_ext_ack * extack)4518 int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
4519 bool enable,
4520 struct netlink_ext_ack *extack)
4521 {
4522 struct mlx5_eswitch *esw;
4523 struct mlx5_vport *vport;
4524 u16 vport_num;
4525 int err;
4526
4527 esw = mlx5_devlink_eswitch_get(port->devlink);
4528 if (IS_ERR(esw))
4529 return PTR_ERR(esw);
4530
4531 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
4532 err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num);
4533 if (err) {
4534 NL_SET_ERR_MSG_MOD(extack,
4535 "Device doesn't support IPsec packet mode");
4536 return err;
4537 }
4538
4539 vport = mlx5_devlink_port_vport_get(port);
4540 mutex_lock(&esw->state_lock);
4541 if (!vport->enabled) {
4542 err = -EOPNOTSUPP;
4543 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4544 goto unlock;
4545 }
4546
4547 if (vport->info.ipsec_packet_enabled == enable)
4548 goto unlock;
4549
4550 if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
4551 err = -EBUSY;
4552 goto unlock;
4553 }
4554
4555 err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable);
4556 if (err) {
4557 NL_SET_ERR_MSG_MOD(extack,
4558 "Failed to set IPsec packet mode");
4559 goto unlock;
4560 }
4561
4562 vport->info.ipsec_packet_enabled = enable;
4563 if (enable)
4564 esw->enabled_ipsec_vf_count++;
4565 else
4566 esw->enabled_ipsec_vf_count--;
4567 unlock:
4568 mutex_unlock(&esw->state_lock);
4569 return err;
4570 }
4571 #endif /* CONFIG_XFRM_OFFLOAD */
4572