1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3
4 #include <linux/etherdevice.h>
5 #include <linux/idr.h>
6 #include <linux/mlx5/driver.h>
7 #include <linux/mlx5/mlx5_ifc.h>
8 #include <linux/mlx5/vport.h>
9 #include <linux/mlx5/fs.h>
10 #include "mlx5_core.h"
11 #include "eswitch.h"
12 #include "en.h"
13 #include "en_tc.h"
14 #include "fs_core.h"
15 #include "esw/indir_table.h"
16 #include "lib/fs_chains.h"
17 #include "en/mod_hdr.h"
18
19 #define MLX5_ESW_INDIR_TABLE_SIZE 2
20 #define MLX5_ESW_INDIR_TABLE_RECIRC_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
21 #define MLX5_ESW_INDIR_TABLE_FWD_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 1)
22
23 struct mlx5_esw_indir_table_rule {
24 struct mlx5_flow_handle *handle;
25 struct mlx5_modify_hdr *mh;
26 refcount_t refcnt;
27 };
28
29 struct mlx5_esw_indir_table_entry {
30 struct hlist_node hlist;
31 struct mlx5_flow_table *ft;
32 struct mlx5_flow_group *recirc_grp;
33 struct mlx5_flow_group *fwd_grp;
34 struct mlx5_flow_handle *fwd_rule;
35 struct mlx5_esw_indir_table_rule *recirc_rule;
36 int fwd_ref;
37
38 u16 vport;
39 };
40
41 struct mlx5_esw_indir_table {
42 struct mutex lock; /* protects table */
43 DECLARE_HASHTABLE(table, 8);
44 };
45
46 struct mlx5_esw_indir_table *
mlx5_esw_indir_table_init(void)47 mlx5_esw_indir_table_init(void)
48 {
49 struct mlx5_esw_indir_table *indir = kvzalloc(sizeof(*indir), GFP_KERNEL);
50
51 if (!indir)
52 return ERR_PTR(-ENOMEM);
53
54 mutex_init(&indir->lock);
55 hash_init(indir->table);
56 return indir;
57 }
58
59 void
mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table * indir)60 mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
61 {
62 mutex_destroy(&indir->lock);
63 kvfree(indir);
64 }
65
66 bool
mlx5_esw_indir_table_needed(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,u16 vport_num,struct mlx5_core_dev * dest_mdev)67 mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
68 struct mlx5_flow_attr *attr,
69 u16 vport_num,
70 struct mlx5_core_dev *dest_mdev)
71 {
72 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
73 bool vf_sf_vport;
74
75 vf_sf_vport = mlx5_eswitch_is_vf_vport(esw, vport_num) ||
76 mlx5_esw_is_sf_vport(esw, vport_num);
77
78 /* Use indirect table for all IP traffic from UL to VF with vport
79 * destination when source rewrite flag is set.
80 */
81 return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK &&
82 vf_sf_vport &&
83 esw->dev == dest_mdev &&
84 attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE;
85 }
86
87 u16
mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr * attr)88 mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr)
89 {
90 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
91
92 return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0;
93 }
94
mlx5_esw_indir_table_rule_get(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,struct mlx5_esw_indir_table_entry * e)95 static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
96 struct mlx5_flow_attr *attr,
97 struct mlx5_esw_indir_table_entry *e)
98 {
99 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
100 struct mlx5_fs_chains *chains = esw_chains(esw);
101 struct mlx5e_tc_mod_hdr_acts mod_acts = {};
102 struct mlx5_flow_destination dest = {};
103 struct mlx5_esw_indir_table_rule *rule;
104 struct mlx5_flow_act flow_act = {};
105 struct mlx5_flow_handle *handle;
106 int err = 0;
107 u32 data;
108
109 if (e->recirc_rule) {
110 refcount_inc(&e->recirc_rule->refcnt);
111 return 0;
112 }
113
114 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
115 if (!rule)
116 return -ENOMEM;
117
118 /* Modify flow source to recirculate packet */
119 data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport);
120 err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
121 VPORT_TO_REG, data);
122 if (err)
123 goto err_mod_hdr_regc0;
124
125 err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
126 TUNNEL_TO_REG, ESW_TUN_SLOW_TABLE_GOTO_VPORT);
127 if (err)
128 goto err_mod_hdr_regc1;
129
130 flow_act.modify_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
131 mod_acts.num_actions, mod_acts.actions);
132 if (IS_ERR(flow_act.modify_hdr)) {
133 err = PTR_ERR(flow_act.modify_hdr);
134 goto err_mod_hdr_alloc;
135 }
136
137 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
138 flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
139 flow_act.fg = e->recirc_grp;
140 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
141 dest.ft = mlx5_chains_get_table(chains, 0, 1, 0);
142 if (IS_ERR(dest.ft)) {
143 err = PTR_ERR(dest.ft);
144 goto err_table;
145 }
146 handle = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1);
147 if (IS_ERR(handle)) {
148 err = PTR_ERR(handle);
149 goto err_handle;
150 }
151
152 mlx5e_mod_hdr_dealloc(&mod_acts);
153 rule->handle = handle;
154 rule->mh = flow_act.modify_hdr;
155 refcount_set(&rule->refcnt, 1);
156 e->recirc_rule = rule;
157 return 0;
158
159 err_handle:
160 mlx5_chains_put_table(chains, 0, 1, 0);
161 err_table:
162 mlx5_modify_header_dealloc(esw->dev, flow_act.modify_hdr);
163 err_mod_hdr_alloc:
164 err_mod_hdr_regc1:
165 mlx5e_mod_hdr_dealloc(&mod_acts);
166 err_mod_hdr_regc0:
167 kfree(rule);
168 return err;
169 }
170
mlx5_esw_indir_table_rule_put(struct mlx5_eswitch * esw,struct mlx5_esw_indir_table_entry * e)171 static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw,
172 struct mlx5_esw_indir_table_entry *e)
173 {
174 struct mlx5_esw_indir_table_rule *rule = e->recirc_rule;
175 struct mlx5_fs_chains *chains = esw_chains(esw);
176
177 if (!rule)
178 return;
179
180 if (!refcount_dec_and_test(&rule->refcnt))
181 return;
182
183 mlx5_del_flow_rules(rule->handle);
184 mlx5_chains_put_table(chains, 0, 1, 0);
185 mlx5_modify_header_dealloc(esw->dev, rule->mh);
186 kfree(rule);
187 e->recirc_rule = NULL;
188 }
189
mlx5_create_indir_recirc_group(struct mlx5_esw_indir_table_entry * e)190 static int mlx5_create_indir_recirc_group(struct mlx5_esw_indir_table_entry *e)
191 {
192 int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
193 u32 *in;
194
195 in = kvzalloc(inlen, GFP_KERNEL);
196 if (!in)
197 return -ENOMEM;
198
199 MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
200 MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX);
201 e->recirc_grp = mlx5_create_flow_group(e->ft, in);
202 if (IS_ERR(e->recirc_grp))
203 err = PTR_ERR(e->recirc_grp);
204
205 kvfree(in);
206 return err;
207 }
208
mlx5_create_indir_fwd_group(struct mlx5_eswitch * esw,struct mlx5_esw_indir_table_entry * e)209 static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
210 struct mlx5_esw_indir_table_entry *e)
211 {
212 int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
213 struct mlx5_flow_destination dest = {};
214 struct mlx5_flow_act flow_act = {};
215 u32 *in;
216
217 in = kvzalloc(inlen, GFP_KERNEL);
218 if (!in)
219 return -ENOMEM;
220
221 /* Hold one entry */
222 MLX5_SET(create_flow_group_in, in, start_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX);
223 MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX);
224 e->fwd_grp = mlx5_create_flow_group(e->ft, in);
225 if (IS_ERR(e->fwd_grp)) {
226 err = PTR_ERR(e->fwd_grp);
227 goto err_out;
228 }
229
230 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
231 flow_act.fg = e->fwd_grp;
232 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
233 dest.vport.num = e->vport;
234 dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
235 dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
236 e->fwd_rule = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1);
237 if (IS_ERR(e->fwd_rule)) {
238 mlx5_destroy_flow_group(e->fwd_grp);
239 err = PTR_ERR(e->fwd_rule);
240 }
241
242 err_out:
243 kvfree(in);
244 return err;
245 }
246
247 static struct mlx5_esw_indir_table_entry *
mlx5_esw_indir_table_entry_create(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,u16 vport,bool decap)248 mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
249 u16 vport, bool decap)
250 {
251 struct mlx5_flow_table_attr ft_attr = {};
252 struct mlx5_flow_namespace *root_ns;
253 struct mlx5_esw_indir_table_entry *e;
254 struct mlx5_flow_table *ft;
255 int err = 0;
256
257 root_ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB);
258 if (!root_ns)
259 return ERR_PTR(-ENOENT);
260
261 e = kzalloc(sizeof(*e), GFP_KERNEL);
262 if (!e)
263 return ERR_PTR(-ENOMEM);
264
265 ft_attr.prio = FDB_TC_OFFLOAD;
266 ft_attr.max_fte = MLX5_ESW_INDIR_TABLE_SIZE;
267 ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
268 ft_attr.level = 1;
269
270 ft = mlx5_create_flow_table(root_ns, &ft_attr);
271 if (IS_ERR(ft)) {
272 err = PTR_ERR(ft);
273 goto tbl_err;
274 }
275 e->ft = ft;
276 e->vport = vport;
277 e->fwd_ref = !decap;
278
279 err = mlx5_create_indir_recirc_group(e);
280 if (err)
281 goto recirc_grp_err;
282
283 if (decap) {
284 err = mlx5_esw_indir_table_rule_get(esw, attr, e);
285 if (err)
286 goto recirc_rule_err;
287 }
288
289 err = mlx5_create_indir_fwd_group(esw, e);
290 if (err)
291 goto fwd_grp_err;
292
293 hash_add(esw->fdb_table.offloads.indir->table, &e->hlist,
294 vport << 16);
295
296 return e;
297
298 fwd_grp_err:
299 if (decap)
300 mlx5_esw_indir_table_rule_put(esw, e);
301 recirc_rule_err:
302 mlx5_destroy_flow_group(e->recirc_grp);
303 recirc_grp_err:
304 mlx5_destroy_flow_table(e->ft);
305 tbl_err:
306 kfree(e);
307 return ERR_PTR(err);
308 }
309
310 static struct mlx5_esw_indir_table_entry *
mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch * esw,u16 vport)311 mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport)
312 {
313 struct mlx5_esw_indir_table_entry *e;
314 u32 key = vport << 16;
315
316 hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key)
317 if (e->vport == vport)
318 return e;
319
320 return NULL;
321 }
322
mlx5_esw_indir_table_get(struct mlx5_eswitch * esw,struct mlx5_flow_attr * attr,u16 vport,bool decap)323 struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
324 struct mlx5_flow_attr *attr,
325 u16 vport, bool decap)
326 {
327 struct mlx5_esw_indir_table_entry *e;
328 int err;
329
330 mutex_lock(&esw->fdb_table.offloads.indir->lock);
331 e = mlx5_esw_indir_table_entry_lookup(esw, vport);
332 if (e) {
333 if (!decap) {
334 e->fwd_ref++;
335 } else {
336 err = mlx5_esw_indir_table_rule_get(esw, attr, e);
337 if (err)
338 goto out_err;
339 }
340 } else {
341 e = mlx5_esw_indir_table_entry_create(esw, attr, vport, decap);
342 if (IS_ERR(e)) {
343 err = PTR_ERR(e);
344 esw_warn(esw->dev, "Failed to create indirection table, err %d.\n", err);
345 goto out_err;
346 }
347 }
348 mutex_unlock(&esw->fdb_table.offloads.indir->lock);
349 return e->ft;
350
351 out_err:
352 mutex_unlock(&esw->fdb_table.offloads.indir->lock);
353 return ERR_PTR(err);
354 }
355
mlx5_esw_indir_table_put(struct mlx5_eswitch * esw,u16 vport,bool decap)356 void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
357 u16 vport, bool decap)
358 {
359 struct mlx5_esw_indir_table_entry *e;
360
361 mutex_lock(&esw->fdb_table.offloads.indir->lock);
362 e = mlx5_esw_indir_table_entry_lookup(esw, vport);
363 if (!e)
364 goto out;
365
366 if (!decap)
367 e->fwd_ref--;
368 else
369 mlx5_esw_indir_table_rule_put(esw, e);
370
371 if (e->fwd_ref || e->recirc_rule)
372 goto out;
373
374 hash_del(&e->hlist);
375 mlx5_destroy_flow_group(e->recirc_grp);
376 mlx5_del_flow_rules(e->fwd_rule);
377 mlx5_destroy_flow_group(e->fwd_grp);
378 mlx5_destroy_flow_table(e->ft);
379 kfree(e);
380 out:
381 mutex_unlock(&esw->fdb_table.offloads.indir->lock);
382 }
383