1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "eswitch.h"
8 #include "ipsec.h"
9 #include "fs_core.h"
10 #include "lib/ipsec_fs_roce.h"
11 #include "lib/fs_chains.h"
12 #include "esw/ipsec_fs.h"
13 #include "en_rep.h"
14
15 #define NUM_IPSEC_FTE BIT(15)
16 #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
17 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40
18
19 struct mlx5e_ipsec_fc {
20 struct mlx5_fc *cnt;
21 struct mlx5_fc *drop;
22 };
23
24 struct mlx5e_ipsec_tx {
25 struct mlx5e_ipsec_ft ft;
26 struct mlx5e_ipsec_miss pol;
27 struct mlx5e_ipsec_miss sa;
28 struct mlx5e_ipsec_rule status;
29 struct mlx5_flow_namespace *ns;
30 struct mlx5e_ipsec_fc *fc;
31 struct mlx5_fs_chains *chains;
32 u8 allow_tunnel_mode : 1;
33 };
34
35 /* IPsec RX flow steering */
family2tt(u32 family)36 static enum mlx5_traffic_types family2tt(u32 family)
37 {
38 if (family == AF_INET)
39 return MLX5_TT_IPV4_IPSEC_ESP;
40 return MLX5_TT_IPV6_IPSEC_ESP;
41 }
42
ipsec_rx(struct mlx5e_ipsec * ipsec,u32 family,int type)43 static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family, int type)
44 {
45 if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
46 return ipsec->rx_esw;
47
48 if (family == AF_INET)
49 return ipsec->rx_ipv4;
50
51 return ipsec->rx_ipv6;
52 }
53
ipsec_tx(struct mlx5e_ipsec * ipsec,int type)54 static struct mlx5e_ipsec_tx *ipsec_tx(struct mlx5e_ipsec *ipsec, int type)
55 {
56 if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
57 return ipsec->tx_esw;
58
59 return ipsec->tx;
60 }
61
62 static struct mlx5_fs_chains *
ipsec_chains_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * miss_ft,enum mlx5_flow_namespace_type ns,int base_prio,int base_level,struct mlx5_flow_table ** root_ft)63 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
64 enum mlx5_flow_namespace_type ns, int base_prio,
65 int base_level, struct mlx5_flow_table **root_ft)
66 {
67 struct mlx5_chains_attr attr = {};
68 struct mlx5_fs_chains *chains;
69 struct mlx5_flow_table *ft;
70 int err;
71
72 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
73 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
74 attr.max_grp_num = 2;
75 attr.default_ft = miss_ft;
76 attr.ns = ns;
77 attr.fs_base_prio = base_prio;
78 attr.fs_base_level = base_level;
79 chains = mlx5_chains_create(mdev, &attr);
80 if (IS_ERR(chains))
81 return chains;
82
83 /* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
84 ft = mlx5_chains_get_table(chains, 0, 1, 0);
85 if (IS_ERR(ft)) {
86 err = PTR_ERR(ft);
87 goto err_chains_get;
88 }
89
90 *root_ft = ft;
91 return chains;
92
93 err_chains_get:
94 mlx5_chains_destroy(chains);
95 return ERR_PTR(err);
96 }
97
ipsec_chains_destroy(struct mlx5_fs_chains * chains)98 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
99 {
100 mlx5_chains_put_table(chains, 0, 1, 0);
101 mlx5_chains_destroy(chains);
102 }
103
104 static struct mlx5_flow_table *
ipsec_chains_get_table(struct mlx5_fs_chains * chains,u32 prio)105 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
106 {
107 return mlx5_chains_get_table(chains, 0, prio + 1, 0);
108 }
109
ipsec_chains_put_table(struct mlx5_fs_chains * chains,u32 prio)110 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
111 {
112 mlx5_chains_put_table(chains, 0, prio + 1, 0);
113 }
114
ipsec_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int max_num_groups,u32 flags)115 static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
116 int level, int prio,
117 int max_num_groups, u32 flags)
118 {
119 struct mlx5_flow_table_attr ft_attr = {};
120
121 ft_attr.autogroup.num_reserved_entries = 1;
122 ft_attr.autogroup.max_num_groups = max_num_groups;
123 ft_attr.max_fte = NUM_IPSEC_FTE;
124 ft_attr.level = level;
125 ft_attr.prio = prio;
126 ft_attr.flags = flags;
127
128 return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
129 }
130
ipsec_rx_status_drop_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)131 static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
132 struct mlx5e_ipsec_rx *rx)
133 {
134 mlx5_del_flow_rules(rx->status_drop.rule);
135 mlx5_destroy_flow_group(rx->status_drop.group);
136 mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
137 }
138
ipsec_rx_status_pass_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)139 static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
140 struct mlx5e_ipsec_rx *rx)
141 {
142 mlx5_del_flow_rules(rx->status.rule);
143
144 if (rx != ipsec->rx_esw)
145 return;
146
147 #ifdef CONFIG_MLX5_ESWITCH
148 mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
149 #endif
150 }
151
ipsec_rx_status_drop_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)152 static int ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
153 struct mlx5e_ipsec_rx *rx)
154 {
155 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
156 struct mlx5_flow_table *ft = rx->ft.status;
157 struct mlx5_core_dev *mdev = ipsec->mdev;
158 struct mlx5_flow_destination dest = {};
159 struct mlx5_flow_act flow_act = {};
160 struct mlx5_flow_handle *rule;
161 struct mlx5_fc *flow_counter;
162 struct mlx5_flow_spec *spec;
163 struct mlx5_flow_group *g;
164 u32 *flow_group_in;
165 int err = 0;
166
167 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
168 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
169 if (!flow_group_in || !spec) {
170 err = -ENOMEM;
171 goto err_out;
172 }
173
174 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
175 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
176 g = mlx5_create_flow_group(ft, flow_group_in);
177 if (IS_ERR(g)) {
178 err = PTR_ERR(g);
179 mlx5_core_err(mdev,
180 "Failed to add ipsec rx status drop flow group, err=%d\n", err);
181 goto err_out;
182 }
183
184 flow_counter = mlx5_fc_create(mdev, false);
185 if (IS_ERR(flow_counter)) {
186 err = PTR_ERR(flow_counter);
187 mlx5_core_err(mdev,
188 "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
189 goto err_cnt;
190 }
191
192 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
193 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
194 dest.counter_id = mlx5_fc_id(flow_counter);
195 if (rx == ipsec->rx_esw)
196 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
197 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
198 if (IS_ERR(rule)) {
199 err = PTR_ERR(rule);
200 mlx5_core_err(mdev,
201 "Failed to add ipsec rx status drop rule, err=%d\n", err);
202 goto err_rule;
203 }
204
205 rx->status_drop.group = g;
206 rx->status_drop.rule = rule;
207 rx->status_drop_cnt = flow_counter;
208
209 kvfree(flow_group_in);
210 kvfree(spec);
211 return 0;
212
213 err_rule:
214 mlx5_fc_destroy(mdev, flow_counter);
215 err_cnt:
216 mlx5_destroy_flow_group(g);
217 err_out:
218 kvfree(flow_group_in);
219 kvfree(spec);
220 return err;
221 }
222
ipsec_rx_status_pass_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)223 static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
224 struct mlx5e_ipsec_rx *rx,
225 struct mlx5_flow_destination *dest)
226 {
227 struct mlx5_flow_act flow_act = {};
228 struct mlx5_flow_handle *rule;
229 struct mlx5_flow_spec *spec;
230 int err;
231
232 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
233 if (!spec)
234 return -ENOMEM;
235
236 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
237 misc_parameters_2.ipsec_syndrome);
238 MLX5_SET(fte_match_param, spec->match_value,
239 misc_parameters_2.ipsec_syndrome, 0);
240 if (rx == ipsec->rx_esw)
241 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
242 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
243 flow_act.flags = FLOW_ACT_NO_APPEND;
244 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
245 MLX5_FLOW_CONTEXT_ACTION_COUNT;
246 rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
247 if (IS_ERR(rule)) {
248 err = PTR_ERR(rule);
249 mlx5_core_warn(ipsec->mdev,
250 "Failed to add ipsec rx status pass rule, err=%d\n", err);
251 goto err_rule;
252 }
253
254 rx->status.rule = rule;
255 kvfree(spec);
256 return 0;
257
258 err_rule:
259 kvfree(spec);
260 return err;
261 }
262
mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)263 static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
264 struct mlx5e_ipsec_rx *rx)
265 {
266 ipsec_rx_status_pass_destroy(ipsec, rx);
267 ipsec_rx_status_drop_destroy(ipsec, rx);
268 }
269
mlx5_ipsec_rx_status_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)270 static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
271 struct mlx5e_ipsec_rx *rx,
272 struct mlx5_flow_destination *dest)
273 {
274 int err;
275
276 err = ipsec_rx_status_drop_create(ipsec, rx);
277 if (err)
278 return err;
279
280 err = ipsec_rx_status_pass_create(ipsec, rx, dest);
281 if (err)
282 goto err_pass_create;
283
284 return 0;
285
286 err_pass_create:
287 ipsec_rx_status_drop_destroy(ipsec, rx);
288 return err;
289 }
290
ipsec_miss_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft,struct mlx5e_ipsec_miss * miss,struct mlx5_flow_destination * dest)291 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
292 struct mlx5_flow_table *ft,
293 struct mlx5e_ipsec_miss *miss,
294 struct mlx5_flow_destination *dest)
295 {
296 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
297 MLX5_DECLARE_FLOW_ACT(flow_act);
298 struct mlx5_flow_spec *spec;
299 u32 *flow_group_in;
300 int err = 0;
301
302 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
303 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
304 if (!flow_group_in || !spec) {
305 err = -ENOMEM;
306 goto out;
307 }
308
309 /* Create miss_group */
310 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
311 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
312 miss->group = mlx5_create_flow_group(ft, flow_group_in);
313 if (IS_ERR(miss->group)) {
314 err = PTR_ERR(miss->group);
315 mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
316 err);
317 goto out;
318 }
319
320 /* Create miss rule */
321 miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
322 if (IS_ERR(miss->rule)) {
323 mlx5_destroy_flow_group(miss->group);
324 err = PTR_ERR(miss->rule);
325 mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
326 err);
327 goto out;
328 }
329 out:
330 kvfree(flow_group_in);
331 kvfree(spec);
332 return err;
333 }
334
ipsec_rx_ft_disconnect(struct mlx5e_ipsec * ipsec,u32 family)335 static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
336 {
337 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
338
339 mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
340 }
341
rx_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)342 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
343 struct mlx5e_ipsec_rx *rx, u32 family)
344 {
345 /* disconnect */
346 if (rx != ipsec->rx_esw)
347 ipsec_rx_ft_disconnect(ipsec, family);
348
349 if (rx->chains) {
350 ipsec_chains_destroy(rx->chains);
351 } else {
352 mlx5_del_flow_rules(rx->pol.rule);
353 mlx5_destroy_flow_group(rx->pol.group);
354 mlx5_destroy_flow_table(rx->ft.pol);
355 }
356
357 mlx5_del_flow_rules(rx->sa.rule);
358 mlx5_destroy_flow_group(rx->sa.group);
359 mlx5_destroy_flow_table(rx->ft.sa);
360 if (rx->allow_tunnel_mode)
361 mlx5_eswitch_unblock_encap(mdev);
362 mlx5_ipsec_rx_status_destroy(ipsec, rx);
363 mlx5_destroy_flow_table(rx->ft.status);
364
365 mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
366 }
367
ipsec_rx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family,struct mlx5e_ipsec_rx_create_attr * attr)368 static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
369 struct mlx5e_ipsec_rx *rx,
370 u32 family,
371 struct mlx5e_ipsec_rx_create_attr *attr)
372 {
373 if (rx == ipsec->rx_esw) {
374 /* For packet offload in switchdev mode, RX & TX use FDB namespace */
375 attr->ns = ipsec->tx_esw->ns;
376 mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr);
377 return;
378 }
379
380 attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
381 attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
382 attr->family = family;
383 attr->prio = MLX5E_NIC_PRIO;
384 attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
385 attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
386 attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
387 attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
388 }
389
ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)390 static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
391 struct mlx5e_ipsec_rx *rx,
392 struct mlx5e_ipsec_rx_create_attr *attr,
393 struct mlx5_flow_destination *dest)
394 {
395 struct mlx5_flow_table *ft;
396 int err;
397
398 if (rx == ipsec->rx_esw)
399 return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
400
401 *dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
402 err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
403 attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
404 attr->prio);
405 if (err)
406 return err;
407
408 ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, attr->family);
409 if (ft) {
410 dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
411 dest->ft = ft;
412 }
413
414 return 0;
415 }
416
ipsec_rx_ft_connect(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr)417 static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
418 struct mlx5e_ipsec_rx *rx,
419 struct mlx5e_ipsec_rx_create_attr *attr)
420 {
421 struct mlx5_flow_destination dest = {};
422
423 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
424 dest.ft = rx->ft.pol;
425 mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
426 }
427
rx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)428 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
429 struct mlx5e_ipsec_rx *rx, u32 family)
430 {
431 struct mlx5e_ipsec_rx_create_attr attr;
432 struct mlx5_flow_destination dest[2];
433 struct mlx5_flow_table *ft;
434 u32 flags = 0;
435 int err;
436
437 ipsec_rx_create_attr_set(ipsec, rx, family, &attr);
438
439 err = ipsec_rx_status_pass_dest_get(ipsec, rx, &attr, &dest[0]);
440 if (err)
441 return err;
442
443 ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
444 if (IS_ERR(ft)) {
445 err = PTR_ERR(ft);
446 goto err_fs_ft_status;
447 }
448 rx->ft.status = ft;
449
450 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
451 dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
452 err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
453 if (err)
454 goto err_add;
455
456 /* Create FT */
457 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
458 rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
459 if (rx->allow_tunnel_mode)
460 flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
461 ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
462 if (IS_ERR(ft)) {
463 err = PTR_ERR(ft);
464 goto err_fs_ft;
465 }
466 rx->ft.sa = ft;
467
468 err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
469 if (err)
470 goto err_fs;
471
472 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
473 rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
474 attr.chains_ns,
475 attr.prio,
476 attr.pol_level,
477 &rx->ft.pol);
478 if (IS_ERR(rx->chains)) {
479 err = PTR_ERR(rx->chains);
480 goto err_pol_ft;
481 }
482
483 goto connect;
484 }
485
486 ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
487 if (IS_ERR(ft)) {
488 err = PTR_ERR(ft);
489 goto err_pol_ft;
490 }
491 rx->ft.pol = ft;
492 memset(dest, 0x00, 2 * sizeof(*dest));
493 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
494 dest[0].ft = rx->ft.sa;
495 err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
496 if (err)
497 goto err_pol_miss;
498
499 connect:
500 /* connect */
501 if (rx != ipsec->rx_esw)
502 ipsec_rx_ft_connect(ipsec, rx, &attr);
503 return 0;
504
505 err_pol_miss:
506 mlx5_destroy_flow_table(rx->ft.pol);
507 err_pol_ft:
508 mlx5_del_flow_rules(rx->sa.rule);
509 mlx5_destroy_flow_group(rx->sa.group);
510 err_fs:
511 mlx5_destroy_flow_table(rx->ft.sa);
512 err_fs_ft:
513 if (rx->allow_tunnel_mode)
514 mlx5_eswitch_unblock_encap(mdev);
515 mlx5_ipsec_rx_status_destroy(ipsec, rx);
516 err_add:
517 mlx5_destroy_flow_table(rx->ft.status);
518 err_fs_ft_status:
519 mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
520 return err;
521 }
522
rx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)523 static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
524 struct mlx5e_ipsec_rx *rx, u32 family)
525 {
526 int err;
527
528 if (rx->ft.refcnt)
529 goto skip;
530
531 err = mlx5_eswitch_block_mode(mdev);
532 if (err)
533 return err;
534
535 err = rx_create(mdev, ipsec, rx, family);
536 if (err) {
537 mlx5_eswitch_unblock_mode(mdev);
538 return err;
539 }
540
541 skip:
542 rx->ft.refcnt++;
543 return 0;
544 }
545
rx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)546 static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
547 u32 family)
548 {
549 if (--rx->ft.refcnt)
550 return;
551
552 rx_destroy(ipsec->mdev, ipsec, rx, family);
553 mlx5_eswitch_unblock_mode(ipsec->mdev);
554 }
555
rx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,int type)556 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
557 struct mlx5e_ipsec *ipsec, u32 family,
558 int type)
559 {
560 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
561 int err;
562
563 mutex_lock(&rx->ft.mutex);
564 err = rx_get(mdev, ipsec, rx, family);
565 mutex_unlock(&rx->ft.mutex);
566 if (err)
567 return ERR_PTR(err);
568
569 return rx;
570 }
571
rx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)572 static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
573 struct mlx5e_ipsec *ipsec,
574 u32 family, u32 prio, int type)
575 {
576 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
577 struct mlx5_flow_table *ft;
578 int err;
579
580 mutex_lock(&rx->ft.mutex);
581 err = rx_get(mdev, ipsec, rx, family);
582 if (err)
583 goto err_get;
584
585 ft = rx->chains ? ipsec_chains_get_table(rx->chains, prio) : rx->ft.pol;
586 if (IS_ERR(ft)) {
587 err = PTR_ERR(ft);
588 goto err_get_ft;
589 }
590
591 mutex_unlock(&rx->ft.mutex);
592 return ft;
593
594 err_get_ft:
595 rx_put(ipsec, rx, family);
596 err_get:
597 mutex_unlock(&rx->ft.mutex);
598 return ERR_PTR(err);
599 }
600
rx_ft_put(struct mlx5e_ipsec * ipsec,u32 family,int type)601 static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family, int type)
602 {
603 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
604
605 mutex_lock(&rx->ft.mutex);
606 rx_put(ipsec, rx, family);
607 mutex_unlock(&rx->ft.mutex);
608 }
609
rx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)610 static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio, int type)
611 {
612 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
613
614 mutex_lock(&rx->ft.mutex);
615 if (rx->chains)
616 ipsec_chains_put_table(rx->chains, prio);
617
618 rx_put(ipsec, rx, family);
619 mutex_unlock(&rx->ft.mutex);
620 }
621
ipsec_counter_rule_tx(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)622 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
623 {
624 struct mlx5_flow_destination dest = {};
625 struct mlx5_flow_act flow_act = {};
626 struct mlx5_flow_handle *fte;
627 struct mlx5_flow_spec *spec;
628 int err;
629
630 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
631 if (!spec)
632 return -ENOMEM;
633
634 /* create fte */
635 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
636 MLX5_FLOW_CONTEXT_ACTION_COUNT;
637 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
638 dest.counter_id = mlx5_fc_id(tx->fc->cnt);
639 fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
640 if (IS_ERR(fte)) {
641 err = PTR_ERR(fte);
642 mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
643 goto err_rule;
644 }
645
646 kvfree(spec);
647 tx->status.rule = fte;
648 return 0;
649
650 err_rule:
651 kvfree(spec);
652 return err;
653 }
654
655 /* IPsec TX flow steering */
tx_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)656 static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
657 struct mlx5_ipsec_fs *roce)
658 {
659 mlx5_ipsec_fs_roce_tx_destroy(roce);
660 if (tx->chains) {
661 ipsec_chains_destroy(tx->chains);
662 } else {
663 mlx5_del_flow_rules(tx->pol.rule);
664 mlx5_destroy_flow_group(tx->pol.group);
665 mlx5_destroy_flow_table(tx->ft.pol);
666 }
667
668 if (tx == ipsec->tx_esw) {
669 mlx5_del_flow_rules(tx->sa.rule);
670 mlx5_destroy_flow_group(tx->sa.group);
671 }
672 mlx5_destroy_flow_table(tx->ft.sa);
673 if (tx->allow_tunnel_mode)
674 mlx5_eswitch_unblock_encap(ipsec->mdev);
675 mlx5_del_flow_rules(tx->status.rule);
676 mlx5_destroy_flow_table(tx->ft.status);
677 }
678
ipsec_tx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5e_ipsec_tx_create_attr * attr)679 static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
680 struct mlx5e_ipsec_tx *tx,
681 struct mlx5e_ipsec_tx_create_attr *attr)
682 {
683 if (tx == ipsec->tx_esw) {
684 mlx5_esw_ipsec_tx_create_attr_set(ipsec, attr);
685 return;
686 }
687
688 attr->prio = 0;
689 attr->pol_level = 0;
690 attr->sa_level = 1;
691 attr->cnt_level = 2;
692 attr->chains_ns = MLX5_FLOW_NAMESPACE_EGRESS_IPSEC;
693 }
694
tx_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)695 static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
696 struct mlx5_ipsec_fs *roce)
697 {
698 struct mlx5_core_dev *mdev = ipsec->mdev;
699 struct mlx5e_ipsec_tx_create_attr attr;
700 struct mlx5_flow_destination dest = {};
701 struct mlx5_flow_table *ft;
702 u32 flags = 0;
703 int err;
704
705 ipsec_tx_create_attr_set(ipsec, tx, &attr);
706 ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
707 if (IS_ERR(ft))
708 return PTR_ERR(ft);
709 tx->ft.status = ft;
710
711 err = ipsec_counter_rule_tx(mdev, tx);
712 if (err)
713 goto err_status_rule;
714
715 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
716 tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
717 if (tx->allow_tunnel_mode)
718 flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
719 ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
720 if (IS_ERR(ft)) {
721 err = PTR_ERR(ft);
722 goto err_sa_ft;
723 }
724 tx->ft.sa = ft;
725
726 if (tx == ipsec->tx_esw) {
727 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
728 dest.vport.num = MLX5_VPORT_UPLINK;
729 err = ipsec_miss_create(mdev, tx->ft.sa, &tx->sa, &dest);
730 if (err)
731 goto err_sa_miss;
732 memset(&dest, 0, sizeof(dest));
733 }
734
735 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
736 tx->chains = ipsec_chains_create(
737 mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level,
738 &tx->ft.pol);
739 if (IS_ERR(tx->chains)) {
740 err = PTR_ERR(tx->chains);
741 goto err_pol_ft;
742 }
743
744 goto connect_roce;
745 }
746
747 ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
748 if (IS_ERR(ft)) {
749 err = PTR_ERR(ft);
750 goto err_pol_ft;
751 }
752 tx->ft.pol = ft;
753 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
754 dest.ft = tx->ft.sa;
755 err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
756 if (err) {
757 mlx5_destroy_flow_table(tx->ft.pol);
758 goto err_pol_ft;
759 }
760
761 connect_roce:
762 err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol);
763 if (err)
764 goto err_roce;
765 return 0;
766
767 err_roce:
768 if (tx->chains) {
769 ipsec_chains_destroy(tx->chains);
770 } else {
771 mlx5_del_flow_rules(tx->pol.rule);
772 mlx5_destroy_flow_group(tx->pol.group);
773 mlx5_destroy_flow_table(tx->ft.pol);
774 }
775 err_pol_ft:
776 if (tx == ipsec->tx_esw) {
777 mlx5_del_flow_rules(tx->sa.rule);
778 mlx5_destroy_flow_group(tx->sa.group);
779 }
780 err_sa_miss:
781 mlx5_destroy_flow_table(tx->ft.sa);
782 err_sa_ft:
783 if (tx->allow_tunnel_mode)
784 mlx5_eswitch_unblock_encap(mdev);
785 mlx5_del_flow_rules(tx->status.rule);
786 err_status_rule:
787 mlx5_destroy_flow_table(tx->ft.status);
788 return err;
789 }
790
ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft)791 static void ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev *mdev,
792 struct mlx5_flow_table *ft)
793 {
794 #ifdef CONFIG_MLX5_ESWITCH
795 struct mlx5_eswitch *esw = mdev->priv.eswitch;
796 struct mlx5e_rep_priv *uplink_rpriv;
797 struct mlx5e_priv *priv;
798
799 esw->offloads.ft_ipsec_tx_pol = ft;
800 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
801 priv = netdev_priv(uplink_rpriv->netdev);
802 if (!priv->channels.num)
803 return;
804
805 mlx5e_rep_deactivate_channels(priv);
806 mlx5e_rep_activate_channels(priv);
807 #endif
808 }
809
tx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)810 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
811 struct mlx5e_ipsec_tx *tx)
812 {
813 int err;
814
815 if (tx->ft.refcnt)
816 goto skip;
817
818 err = mlx5_eswitch_block_mode(mdev);
819 if (err)
820 return err;
821
822 err = tx_create(ipsec, tx, ipsec->roce);
823 if (err) {
824 mlx5_eswitch_unblock_mode(mdev);
825 return err;
826 }
827
828 if (tx == ipsec->tx_esw)
829 ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
830
831 skip:
832 tx->ft.refcnt++;
833 return 0;
834 }
835
tx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)836 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
837 {
838 if (--tx->ft.refcnt)
839 return;
840
841 if (tx == ipsec->tx_esw) {
842 mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
843 ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
844 }
845
846 tx_destroy(ipsec, tx, ipsec->roce);
847 mlx5_eswitch_unblock_mode(ipsec->mdev);
848 }
849
tx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 prio,int type)850 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
851 struct mlx5e_ipsec *ipsec,
852 u32 prio, int type)
853 {
854 struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
855 struct mlx5_flow_table *ft;
856 int err;
857
858 mutex_lock(&tx->ft.mutex);
859 err = tx_get(mdev, ipsec, tx);
860 if (err)
861 goto err_get;
862
863 ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
864 if (IS_ERR(ft)) {
865 err = PTR_ERR(ft);
866 goto err_get_ft;
867 }
868
869 mutex_unlock(&tx->ft.mutex);
870 return ft;
871
872 err_get_ft:
873 tx_put(ipsec, tx);
874 err_get:
875 mutex_unlock(&tx->ft.mutex);
876 return ERR_PTR(err);
877 }
878
tx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,int type)879 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
880 struct mlx5e_ipsec *ipsec, int type)
881 {
882 struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
883 int err;
884
885 mutex_lock(&tx->ft.mutex);
886 err = tx_get(mdev, ipsec, tx);
887 mutex_unlock(&tx->ft.mutex);
888 if (err)
889 return ERR_PTR(err);
890
891 return tx;
892 }
893
tx_ft_put(struct mlx5e_ipsec * ipsec,int type)894 static void tx_ft_put(struct mlx5e_ipsec *ipsec, int type)
895 {
896 struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
897
898 mutex_lock(&tx->ft.mutex);
899 tx_put(ipsec, tx);
900 mutex_unlock(&tx->ft.mutex);
901 }
902
tx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 prio,int type)903 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
904 {
905 struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
906
907 mutex_lock(&tx->ft.mutex);
908 if (tx->chains)
909 ipsec_chains_put_table(tx->chains, prio);
910
911 tx_put(ipsec, tx);
912 mutex_unlock(&tx->ft.mutex);
913 }
914
setup_fte_addr4(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)915 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
916 __be32 *daddr)
917 {
918 if (!*saddr && !*daddr)
919 return;
920
921 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
922
923 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
924 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
925
926 if (*saddr) {
927 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
928 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
929 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
930 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
931 }
932
933 if (*daddr) {
934 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
935 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
936 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
937 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
938 }
939 }
940
setup_fte_addr6(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)941 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
942 __be32 *daddr)
943 {
944 if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
945 return;
946
947 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
948
949 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
950 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
951
952 if (!addr6_all_zero(saddr)) {
953 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
954 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
955 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
956 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
957 }
958
959 if (!addr6_all_zero(daddr)) {
960 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
961 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
962 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
963 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
964 }
965 }
966
setup_fte_esp(struct mlx5_flow_spec * spec)967 static void setup_fte_esp(struct mlx5_flow_spec *spec)
968 {
969 /* ESP header */
970 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
971
972 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
973 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
974 }
975
setup_fte_spi(struct mlx5_flow_spec * spec,u32 spi,bool encap)976 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
977 {
978 /* SPI number */
979 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
980
981 if (encap) {
982 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
983 misc_parameters.inner_esp_spi);
984 MLX5_SET(fte_match_param, spec->match_value,
985 misc_parameters.inner_esp_spi, spi);
986 } else {
987 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
988 misc_parameters.outer_esp_spi);
989 MLX5_SET(fte_match_param, spec->match_value,
990 misc_parameters.outer_esp_spi, spi);
991 }
992 }
993
setup_fte_no_frags(struct mlx5_flow_spec * spec)994 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
995 {
996 /* Non fragmented */
997 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
998
999 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
1000 MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
1001 }
1002
setup_fte_reg_a(struct mlx5_flow_spec * spec)1003 static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
1004 {
1005 /* Add IPsec indicator in metadata_reg_a */
1006 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1007
1008 MLX5_SET(fte_match_param, spec->match_criteria,
1009 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1010 MLX5_SET(fte_match_param, spec->match_value,
1011 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1012 }
1013
setup_fte_reg_c4(struct mlx5_flow_spec * spec,u32 reqid)1014 static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)
1015 {
1016 /* Pass policy check before choosing this SA */
1017 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1018
1019 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1020 misc_parameters_2.metadata_reg_c_4);
1021 MLX5_SET(fte_match_param, spec->match_value,
1022 misc_parameters_2.metadata_reg_c_4, reqid);
1023 }
1024
setup_fte_upper_proto_match(struct mlx5_flow_spec * spec,struct upspec * upspec)1025 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
1026 {
1027 switch (upspec->proto) {
1028 case IPPROTO_UDP:
1029 if (upspec->dport) {
1030 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1031 udp_dport, upspec->dport_mask);
1032 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1033 udp_dport, upspec->dport);
1034 }
1035 if (upspec->sport) {
1036 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1037 udp_sport, upspec->sport_mask);
1038 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1039 udp_sport, upspec->sport);
1040 }
1041 break;
1042 case IPPROTO_TCP:
1043 if (upspec->dport) {
1044 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1045 tcp_dport, upspec->dport_mask);
1046 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1047 tcp_dport, upspec->dport);
1048 }
1049 if (upspec->sport) {
1050 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1051 tcp_sport, upspec->sport_mask);
1052 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1053 tcp_sport, upspec->sport);
1054 }
1055 break;
1056 default:
1057 return;
1058 }
1059
1060 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1061 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
1062 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
1063 }
1064
ipsec_fs_get_ns(struct mlx5e_ipsec * ipsec,int type,u8 dir)1065 static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
1066 int type, u8 dir)
1067 {
1068 if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
1069 return MLX5_FLOW_NAMESPACE_FDB;
1070
1071 if (dir == XFRM_DEV_OFFLOAD_IN)
1072 return MLX5_FLOW_NAMESPACE_KERNEL;
1073
1074 return MLX5_FLOW_NAMESPACE_EGRESS;
1075 }
1076
setup_modify_header(struct mlx5e_ipsec * ipsec,int type,u32 val,u8 dir,struct mlx5_flow_act * flow_act)1077 static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir,
1078 struct mlx5_flow_act *flow_act)
1079 {
1080 enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
1081 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1082 struct mlx5_core_dev *mdev = ipsec->mdev;
1083 struct mlx5_modify_hdr *modify_hdr;
1084
1085 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1086 switch (dir) {
1087 case XFRM_DEV_OFFLOAD_IN:
1088 MLX5_SET(set_action_in, action, field,
1089 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1090 break;
1091 case XFRM_DEV_OFFLOAD_OUT:
1092 MLX5_SET(set_action_in, action, field,
1093 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1094 break;
1095 default:
1096 return -EINVAL;
1097 }
1098
1099 MLX5_SET(set_action_in, action, data, val);
1100 MLX5_SET(set_action_in, action, offset, 0);
1101 MLX5_SET(set_action_in, action, length, 32);
1102
1103 modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
1104 if (IS_ERR(modify_hdr)) {
1105 mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
1106 PTR_ERR(modify_hdr));
1107 return PTR_ERR(modify_hdr);
1108 }
1109
1110 flow_act->modify_hdr = modify_hdr;
1111 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1112 return 0;
1113 }
1114
1115 static int
setup_pkt_tunnel_reformat(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1116 setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
1117 struct mlx5_accel_esp_xfrm_attrs *attrs,
1118 struct mlx5_pkt_reformat_params *reformat_params)
1119 {
1120 struct ip_esp_hdr *esp_hdr;
1121 struct ipv6hdr *ipv6hdr;
1122 struct ethhdr *eth_hdr;
1123 struct iphdr *iphdr;
1124 char *reformatbf;
1125 size_t bfflen;
1126 void *hdr;
1127
1128 bfflen = sizeof(*eth_hdr);
1129
1130 if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
1131 bfflen += sizeof(*esp_hdr) + 8;
1132
1133 switch (attrs->family) {
1134 case AF_INET:
1135 bfflen += sizeof(*iphdr);
1136 break;
1137 case AF_INET6:
1138 bfflen += sizeof(*ipv6hdr);
1139 break;
1140 default:
1141 return -EINVAL;
1142 }
1143 }
1144
1145 reformatbf = kzalloc(bfflen, GFP_KERNEL);
1146 if (!reformatbf)
1147 return -ENOMEM;
1148
1149 eth_hdr = (struct ethhdr *)reformatbf;
1150 switch (attrs->family) {
1151 case AF_INET:
1152 eth_hdr->h_proto = htons(ETH_P_IP);
1153 break;
1154 case AF_INET6:
1155 eth_hdr->h_proto = htons(ETH_P_IPV6);
1156 break;
1157 default:
1158 goto free_reformatbf;
1159 }
1160
1161 ether_addr_copy(eth_hdr->h_dest, attrs->dmac);
1162 ether_addr_copy(eth_hdr->h_source, attrs->smac);
1163
1164 switch (attrs->dir) {
1165 case XFRM_DEV_OFFLOAD_IN:
1166 reformat_params->type = MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2;
1167 break;
1168 case XFRM_DEV_OFFLOAD_OUT:
1169 reformat_params->type = MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL;
1170 reformat_params->param_0 = attrs->authsize;
1171
1172 hdr = reformatbf + sizeof(*eth_hdr);
1173 switch (attrs->family) {
1174 case AF_INET:
1175 iphdr = (struct iphdr *)hdr;
1176 memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
1177 memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
1178 iphdr->version = 4;
1179 iphdr->ihl = 5;
1180 iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
1181 iphdr->protocol = IPPROTO_ESP;
1182 hdr += sizeof(*iphdr);
1183 break;
1184 case AF_INET6:
1185 ipv6hdr = (struct ipv6hdr *)hdr;
1186 memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
1187 memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
1188 ipv6hdr->nexthdr = IPPROTO_ESP;
1189 ipv6hdr->version = 6;
1190 ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
1191 hdr += sizeof(*ipv6hdr);
1192 break;
1193 default:
1194 goto free_reformatbf;
1195 }
1196
1197 esp_hdr = (struct ip_esp_hdr *)hdr;
1198 esp_hdr->spi = htonl(attrs->spi);
1199 break;
1200 default:
1201 goto free_reformatbf;
1202 }
1203
1204 reformat_params->size = bfflen;
1205 reformat_params->data = reformatbf;
1206 return 0;
1207
1208 free_reformatbf:
1209 kfree(reformatbf);
1210 return -EINVAL;
1211 }
1212
get_reformat_type(struct mlx5_accel_esp_xfrm_attrs * attrs)1213 static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
1214 {
1215 switch (attrs->dir) {
1216 case XFRM_DEV_OFFLOAD_IN:
1217 if (attrs->encap)
1218 return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
1219 return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
1220 case XFRM_DEV_OFFLOAD_OUT:
1221 if (attrs->family == AF_INET) {
1222 if (attrs->encap)
1223 return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
1224 return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
1225 }
1226
1227 if (attrs->encap)
1228 return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
1229 return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
1230 default:
1231 WARN_ON(true);
1232 }
1233
1234 return -EINVAL;
1235 }
1236
1237 static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1238 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
1239 struct mlx5_pkt_reformat_params *reformat_params)
1240 {
1241 struct udphdr *udphdr;
1242 char *reformatbf;
1243 size_t bfflen;
1244 __be32 spi;
1245 void *hdr;
1246
1247 reformat_params->type = get_reformat_type(attrs);
1248 if (reformat_params->type < 0)
1249 return reformat_params->type;
1250
1251 switch (attrs->dir) {
1252 case XFRM_DEV_OFFLOAD_IN:
1253 break;
1254 case XFRM_DEV_OFFLOAD_OUT:
1255 bfflen = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
1256 if (attrs->encap)
1257 bfflen += sizeof(*udphdr);
1258
1259 reformatbf = kzalloc(bfflen, GFP_KERNEL);
1260 if (!reformatbf)
1261 return -ENOMEM;
1262
1263 hdr = reformatbf;
1264 if (attrs->encap) {
1265 udphdr = (struct udphdr *)reformatbf;
1266 udphdr->source = attrs->sport;
1267 udphdr->dest = attrs->dport;
1268 hdr += sizeof(*udphdr);
1269 }
1270
1271 /* convert to network format */
1272 spi = htonl(attrs->spi);
1273 memcpy(hdr, &spi, sizeof(spi));
1274
1275 reformat_params->param_0 = attrs->authsize;
1276 reformat_params->size = bfflen;
1277 reformat_params->data = reformatbf;
1278 break;
1279 default:
1280 return -EINVAL;
1281 }
1282
1283 return 0;
1284 }
1285
setup_pkt_reformat(struct mlx5e_ipsec * ipsec,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_flow_act * flow_act)1286 static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
1287 struct mlx5_accel_esp_xfrm_attrs *attrs,
1288 struct mlx5_flow_act *flow_act)
1289 {
1290 enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type,
1291 attrs->dir);
1292 struct mlx5_pkt_reformat_params reformat_params = {};
1293 struct mlx5_core_dev *mdev = ipsec->mdev;
1294 struct mlx5_pkt_reformat *pkt_reformat;
1295 int ret;
1296
1297 switch (attrs->mode) {
1298 case XFRM_MODE_TRANSPORT:
1299 ret = setup_pkt_transport_reformat(attrs, &reformat_params);
1300 break;
1301 case XFRM_MODE_TUNNEL:
1302 ret = setup_pkt_tunnel_reformat(mdev, attrs, &reformat_params);
1303 break;
1304 default:
1305 ret = -EINVAL;
1306 }
1307
1308 if (ret)
1309 return ret;
1310
1311 pkt_reformat =
1312 mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
1313 kfree(reformat_params.data);
1314 if (IS_ERR(pkt_reformat))
1315 return PTR_ERR(pkt_reformat);
1316
1317 flow_act->pkt_reformat = pkt_reformat;
1318 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
1319 return 0;
1320 }
1321
rx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1322 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1323 {
1324 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1325 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1326 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1327 struct mlx5_flow_destination dest[2];
1328 struct mlx5_flow_act flow_act = {};
1329 struct mlx5_flow_handle *rule;
1330 struct mlx5_flow_spec *spec;
1331 struct mlx5e_ipsec_rx *rx;
1332 struct mlx5_fc *counter;
1333 int err = 0;
1334
1335 rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
1336 if (IS_ERR(rx))
1337 return PTR_ERR(rx);
1338
1339 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1340 if (!spec) {
1341 err = -ENOMEM;
1342 goto err_alloc;
1343 }
1344
1345 if (attrs->family == AF_INET)
1346 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1347 else
1348 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1349
1350 setup_fte_spi(spec, attrs->spi, attrs->encap);
1351 if (!attrs->encap)
1352 setup_fte_esp(spec);
1353 setup_fte_no_frags(spec);
1354 setup_fte_upper_proto_match(spec, &attrs->upspec);
1355
1356 if (rx != ipsec->rx_esw)
1357 err = setup_modify_header(ipsec, attrs->type,
1358 sa_entry->ipsec_obj_id | BIT(31),
1359 XFRM_DEV_OFFLOAD_IN, &flow_act);
1360 else
1361 err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
1362
1363 if (err)
1364 goto err_mod_header;
1365
1366 switch (attrs->type) {
1367 case XFRM_DEV_OFFLOAD_PACKET:
1368 err = setup_pkt_reformat(ipsec, attrs, &flow_act);
1369 if (err)
1370 goto err_pkt_reformat;
1371 break;
1372 default:
1373 break;
1374 }
1375
1376 counter = mlx5_fc_create(mdev, true);
1377 if (IS_ERR(counter)) {
1378 err = PTR_ERR(counter);
1379 goto err_add_cnt;
1380 }
1381 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1382 flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1383 flow_act.flags |= FLOW_ACT_NO_APPEND;
1384 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
1385 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1386 if (attrs->drop)
1387 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1388 else
1389 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1390 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1391 dest[0].ft = rx->ft.status;
1392 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1393 dest[1].counter_id = mlx5_fc_id(counter);
1394 rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
1395 if (IS_ERR(rule)) {
1396 err = PTR_ERR(rule);
1397 mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
1398 goto err_add_flow;
1399 }
1400 kvfree(spec);
1401
1402 sa_entry->ipsec_rule.rule = rule;
1403 sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1404 sa_entry->ipsec_rule.fc = counter;
1405 sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
1406 return 0;
1407
1408 err_add_flow:
1409 mlx5_fc_destroy(mdev, counter);
1410 err_add_cnt:
1411 if (flow_act.pkt_reformat)
1412 mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1413 err_pkt_reformat:
1414 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1415 err_mod_header:
1416 kvfree(spec);
1417 err_alloc:
1418 rx_ft_put(ipsec, attrs->family, attrs->type);
1419 return err;
1420 }
1421
tx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1422 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1423 {
1424 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1425 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1426 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1427 struct mlx5_flow_destination dest[2];
1428 struct mlx5_flow_act flow_act = {};
1429 struct mlx5_flow_handle *rule;
1430 struct mlx5_flow_spec *spec;
1431 struct mlx5e_ipsec_tx *tx;
1432 struct mlx5_fc *counter;
1433 int err;
1434
1435 tx = tx_ft_get(mdev, ipsec, attrs->type);
1436 if (IS_ERR(tx))
1437 return PTR_ERR(tx);
1438
1439 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1440 if (!spec) {
1441 err = -ENOMEM;
1442 goto err_alloc;
1443 }
1444
1445 if (attrs->family == AF_INET)
1446 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1447 else
1448 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1449
1450 setup_fte_no_frags(spec);
1451 setup_fte_upper_proto_match(spec, &attrs->upspec);
1452
1453 switch (attrs->type) {
1454 case XFRM_DEV_OFFLOAD_CRYPTO:
1455 setup_fte_spi(spec, attrs->spi, false);
1456 setup_fte_esp(spec);
1457 setup_fte_reg_a(spec);
1458 break;
1459 case XFRM_DEV_OFFLOAD_PACKET:
1460 if (attrs->reqid)
1461 setup_fte_reg_c4(spec, attrs->reqid);
1462 err = setup_pkt_reformat(ipsec, attrs, &flow_act);
1463 if (err)
1464 goto err_pkt_reformat;
1465 break;
1466 default:
1467 break;
1468 }
1469
1470 counter = mlx5_fc_create(mdev, true);
1471 if (IS_ERR(counter)) {
1472 err = PTR_ERR(counter);
1473 goto err_add_cnt;
1474 }
1475
1476 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1477 flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1478 flow_act.flags |= FLOW_ACT_NO_APPEND;
1479 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
1480 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1481 if (attrs->drop)
1482 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1483 else
1484 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1485
1486 dest[0].ft = tx->ft.status;
1487 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1488 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1489 dest[1].counter_id = mlx5_fc_id(counter);
1490 rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
1491 if (IS_ERR(rule)) {
1492 err = PTR_ERR(rule);
1493 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1494 goto err_add_flow;
1495 }
1496
1497 kvfree(spec);
1498 sa_entry->ipsec_rule.rule = rule;
1499 sa_entry->ipsec_rule.fc = counter;
1500 sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
1501 return 0;
1502
1503 err_add_flow:
1504 mlx5_fc_destroy(mdev, counter);
1505 err_add_cnt:
1506 if (flow_act.pkt_reformat)
1507 mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1508 err_pkt_reformat:
1509 kvfree(spec);
1510 err_alloc:
1511 tx_ft_put(ipsec, attrs->type);
1512 return err;
1513 }
1514
tx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1515 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1516 {
1517 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1518 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1519 struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1520 struct mlx5_flow_destination dest[2] = {};
1521 struct mlx5_flow_act flow_act = {};
1522 struct mlx5_flow_handle *rule;
1523 struct mlx5_flow_spec *spec;
1524 struct mlx5_flow_table *ft;
1525 struct mlx5e_ipsec_tx *tx;
1526 int err, dstn = 0;
1527
1528 ft = tx_ft_get_policy(mdev, ipsec, attrs->prio, attrs->type);
1529 if (IS_ERR(ft))
1530 return PTR_ERR(ft);
1531
1532 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1533 if (!spec) {
1534 err = -ENOMEM;
1535 goto err_alloc;
1536 }
1537
1538 tx = ipsec_tx(ipsec, attrs->type);
1539 if (attrs->family == AF_INET)
1540 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1541 else
1542 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1543
1544 setup_fte_no_frags(spec);
1545 setup_fte_upper_proto_match(spec, &attrs->upspec);
1546
1547 switch (attrs->action) {
1548 case XFRM_POLICY_ALLOW:
1549 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1550 if (!attrs->reqid)
1551 break;
1552
1553 err = setup_modify_header(ipsec, attrs->type, attrs->reqid,
1554 XFRM_DEV_OFFLOAD_OUT, &flow_act);
1555 if (err)
1556 goto err_mod_header;
1557 break;
1558 case XFRM_POLICY_BLOCK:
1559 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1560 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1561 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1562 dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
1563 dstn++;
1564 break;
1565 default:
1566 WARN_ON(true);
1567 err = -EINVAL;
1568 goto err_mod_header;
1569 }
1570
1571 flow_act.flags |= FLOW_ACT_NO_APPEND;
1572 if (tx == ipsec->tx_esw && tx->chains)
1573 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1574 dest[dstn].ft = tx->ft.sa;
1575 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1576 dstn++;
1577 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1578 if (IS_ERR(rule)) {
1579 err = PTR_ERR(rule);
1580 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1581 goto err_action;
1582 }
1583
1584 kvfree(spec);
1585 pol_entry->ipsec_rule.rule = rule;
1586 pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1587 return 0;
1588
1589 err_action:
1590 if (flow_act.modify_hdr)
1591 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1592 err_mod_header:
1593 kvfree(spec);
1594 err_alloc:
1595 tx_ft_put_policy(ipsec, attrs->prio, attrs->type);
1596 return err;
1597 }
1598
rx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1599 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1600 {
1601 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1602 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1603 struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1604 struct mlx5_flow_destination dest[2];
1605 struct mlx5_flow_act flow_act = {};
1606 struct mlx5_flow_handle *rule;
1607 struct mlx5_flow_spec *spec;
1608 struct mlx5_flow_table *ft;
1609 struct mlx5e_ipsec_rx *rx;
1610 int err, dstn = 0;
1611
1612 ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
1613 attrs->type);
1614 if (IS_ERR(ft))
1615 return PTR_ERR(ft);
1616
1617 rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
1618
1619 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1620 if (!spec) {
1621 err = -ENOMEM;
1622 goto err_alloc;
1623 }
1624
1625 if (attrs->family == AF_INET)
1626 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1627 else
1628 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1629
1630 setup_fte_no_frags(spec);
1631 setup_fte_upper_proto_match(spec, &attrs->upspec);
1632
1633 switch (attrs->action) {
1634 case XFRM_POLICY_ALLOW:
1635 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1636 break;
1637 case XFRM_POLICY_BLOCK:
1638 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1639 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1640 dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
1641 dstn++;
1642 break;
1643 default:
1644 WARN_ON(true);
1645 err = -EINVAL;
1646 goto err_action;
1647 }
1648
1649 flow_act.flags |= FLOW_ACT_NO_APPEND;
1650 if (rx == ipsec->rx_esw && rx->chains)
1651 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1652 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1653 dest[dstn].ft = rx->ft.sa;
1654 dstn++;
1655 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1656 if (IS_ERR(rule)) {
1657 err = PTR_ERR(rule);
1658 mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
1659 goto err_action;
1660 }
1661
1662 kvfree(spec);
1663 pol_entry->ipsec_rule.rule = rule;
1664 return 0;
1665
1666 err_action:
1667 kvfree(spec);
1668 err_alloc:
1669 rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
1670 return err;
1671 }
1672
ipsec_fs_destroy_single_counter(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_fc * fc)1673 static void ipsec_fs_destroy_single_counter(struct mlx5_core_dev *mdev,
1674 struct mlx5e_ipsec_fc *fc)
1675 {
1676 mlx5_fc_destroy(mdev, fc->drop);
1677 mlx5_fc_destroy(mdev, fc->cnt);
1678 kfree(fc);
1679 }
1680
ipsec_fs_destroy_counters(struct mlx5e_ipsec * ipsec)1681 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
1682 {
1683 struct mlx5_core_dev *mdev = ipsec->mdev;
1684
1685 ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
1686 ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
1687 if (ipsec->is_uplink_rep) {
1688 ipsec_fs_destroy_single_counter(mdev, ipsec->tx_esw->fc);
1689 ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
1690 }
1691 }
1692
ipsec_fs_init_single_counter(struct mlx5_core_dev * mdev)1693 static struct mlx5e_ipsec_fc *ipsec_fs_init_single_counter(struct mlx5_core_dev *mdev)
1694 {
1695 struct mlx5e_ipsec_fc *fc;
1696 struct mlx5_fc *counter;
1697 int err;
1698
1699 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
1700 if (!fc)
1701 return ERR_PTR(-ENOMEM);
1702
1703 counter = mlx5_fc_create(mdev, false);
1704 if (IS_ERR(counter)) {
1705 err = PTR_ERR(counter);
1706 goto err_cnt;
1707 }
1708 fc->cnt = counter;
1709
1710 counter = mlx5_fc_create(mdev, false);
1711 if (IS_ERR(counter)) {
1712 err = PTR_ERR(counter);
1713 goto err_drop;
1714 }
1715 fc->drop = counter;
1716
1717 return fc;
1718
1719 err_drop:
1720 mlx5_fc_destroy(mdev, fc->cnt);
1721 err_cnt:
1722 kfree(fc);
1723 return ERR_PTR(err);
1724 }
1725
ipsec_fs_init_counters(struct mlx5e_ipsec * ipsec)1726 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
1727 {
1728 struct mlx5_core_dev *mdev = ipsec->mdev;
1729 struct mlx5e_ipsec_fc *fc;
1730 int err;
1731
1732 fc = ipsec_fs_init_single_counter(mdev);
1733 if (IS_ERR(fc)) {
1734 err = PTR_ERR(fc);
1735 goto err_rx_cnt;
1736 }
1737 ipsec->rx_ipv4->fc = fc;
1738
1739 fc = ipsec_fs_init_single_counter(mdev);
1740 if (IS_ERR(fc)) {
1741 err = PTR_ERR(fc);
1742 goto err_tx_cnt;
1743 }
1744 ipsec->tx->fc = fc;
1745
1746 if (ipsec->is_uplink_rep) {
1747 fc = ipsec_fs_init_single_counter(mdev);
1748 if (IS_ERR(fc)) {
1749 err = PTR_ERR(fc);
1750 goto err_rx_esw_cnt;
1751 }
1752 ipsec->rx_esw->fc = fc;
1753
1754 fc = ipsec_fs_init_single_counter(mdev);
1755 if (IS_ERR(fc)) {
1756 err = PTR_ERR(fc);
1757 goto err_tx_esw_cnt;
1758 }
1759 ipsec->tx_esw->fc = fc;
1760 }
1761
1762 /* Both IPv4 and IPv6 point to same flow counters struct. */
1763 ipsec->rx_ipv6->fc = ipsec->rx_ipv4->fc;
1764 return 0;
1765
1766 err_tx_esw_cnt:
1767 ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
1768 err_rx_esw_cnt:
1769 ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
1770 err_tx_cnt:
1771 ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
1772 err_rx_cnt:
1773 return err;
1774 }
1775
mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv * priv,void * ipsec_stats)1776 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
1777 {
1778 struct mlx5_core_dev *mdev = priv->mdev;
1779 struct mlx5e_ipsec *ipsec = priv->ipsec;
1780 struct mlx5e_ipsec_hw_stats *stats;
1781 struct mlx5e_ipsec_fc *fc;
1782 u64 packets, bytes;
1783
1784 stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
1785
1786 stats->ipsec_rx_pkts = 0;
1787 stats->ipsec_rx_bytes = 0;
1788 stats->ipsec_rx_drop_pkts = 0;
1789 stats->ipsec_rx_drop_bytes = 0;
1790 stats->ipsec_tx_pkts = 0;
1791 stats->ipsec_tx_bytes = 0;
1792 stats->ipsec_tx_drop_pkts = 0;
1793 stats->ipsec_tx_drop_bytes = 0;
1794
1795 fc = ipsec->rx_ipv4->fc;
1796 mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
1797 mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
1798 &stats->ipsec_rx_drop_bytes);
1799
1800 fc = ipsec->tx->fc;
1801 mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
1802 mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
1803 &stats->ipsec_tx_drop_bytes);
1804
1805 if (ipsec->is_uplink_rep) {
1806 fc = ipsec->rx_esw->fc;
1807 if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
1808 stats->ipsec_rx_pkts += packets;
1809 stats->ipsec_rx_bytes += bytes;
1810 }
1811
1812 if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
1813 stats->ipsec_rx_drop_pkts += packets;
1814 stats->ipsec_rx_drop_bytes += bytes;
1815 }
1816
1817 fc = ipsec->tx_esw->fc;
1818 if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
1819 stats->ipsec_tx_pkts += packets;
1820 stats->ipsec_tx_bytes += bytes;
1821 }
1822
1823 if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
1824 stats->ipsec_tx_drop_pkts += packets;
1825 stats->ipsec_tx_drop_bytes += bytes;
1826 }
1827 }
1828 }
1829
1830 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)1831 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
1832 {
1833 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1834 int err = 0;
1835
1836 if (esw) {
1837 err = mlx5_esw_lock(esw);
1838 if (err)
1839 return err;
1840 }
1841
1842 if (mdev->num_block_ipsec) {
1843 err = -EBUSY;
1844 goto unlock;
1845 }
1846
1847 mdev->num_block_tc++;
1848
1849 unlock:
1850 if (esw)
1851 mlx5_esw_unlock(esw);
1852
1853 return err;
1854 }
1855 #else
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)1856 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
1857 {
1858 if (mdev->num_block_ipsec)
1859 return -EBUSY;
1860
1861 mdev->num_block_tc++;
1862 return 0;
1863 }
1864 #endif
1865
mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev * mdev)1866 static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
1867 {
1868 mdev->num_block_tc--;
1869 }
1870
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1871 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1872 {
1873 int err;
1874
1875 if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) {
1876 err = mlx5e_ipsec_block_tc_offload(sa_entry->ipsec->mdev);
1877 if (err)
1878 return err;
1879 }
1880
1881 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
1882 err = tx_add_rule(sa_entry);
1883 else
1884 err = rx_add_rule(sa_entry);
1885
1886 if (err)
1887 goto err_out;
1888
1889 return 0;
1890
1891 err_out:
1892 if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
1893 mlx5e_ipsec_unblock_tc_offload(sa_entry->ipsec->mdev);
1894 return err;
1895 }
1896
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1897 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1898 {
1899 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
1900 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1901
1902 mlx5_del_flow_rules(ipsec_rule->rule);
1903 mlx5_fc_destroy(mdev, ipsec_rule->fc);
1904 if (ipsec_rule->pkt_reformat)
1905 mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
1906
1907 if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
1908 mlx5e_ipsec_unblock_tc_offload(mdev);
1909
1910 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
1911 tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type);
1912 return;
1913 }
1914
1915 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
1916 mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
1917 rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
1918 }
1919
mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry * pol_entry)1920 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
1921 {
1922 int err;
1923
1924 err = mlx5e_ipsec_block_tc_offload(pol_entry->ipsec->mdev);
1925 if (err)
1926 return err;
1927
1928 if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
1929 err = tx_add_policy(pol_entry);
1930 else
1931 err = rx_add_policy(pol_entry);
1932
1933 if (err)
1934 goto err_out;
1935
1936 return 0;
1937
1938 err_out:
1939 mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
1940 return err;
1941 }
1942
mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry * pol_entry)1943 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
1944 {
1945 struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
1946 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1947
1948 mlx5_del_flow_rules(ipsec_rule->rule);
1949
1950 mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
1951
1952 if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
1953 rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
1954 pol_entry->attrs.prio, pol_entry->attrs.type);
1955 return;
1956 }
1957
1958 if (ipsec_rule->modify_hdr)
1959 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
1960
1961 tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio, pol_entry->attrs.type);
1962 }
1963
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)1964 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
1965 {
1966 if (!ipsec->tx)
1967 return;
1968
1969 if (ipsec->roce)
1970 mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
1971
1972 ipsec_fs_destroy_counters(ipsec);
1973 mutex_destroy(&ipsec->tx->ft.mutex);
1974 WARN_ON(ipsec->tx->ft.refcnt);
1975 kfree(ipsec->tx);
1976
1977 mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
1978 WARN_ON(ipsec->rx_ipv4->ft.refcnt);
1979 kfree(ipsec->rx_ipv4);
1980
1981 mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
1982 WARN_ON(ipsec->rx_ipv6->ft.refcnt);
1983 kfree(ipsec->rx_ipv6);
1984
1985 if (ipsec->is_uplink_rep) {
1986 xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
1987
1988 mutex_destroy(&ipsec->tx_esw->ft.mutex);
1989 WARN_ON(ipsec->tx_esw->ft.refcnt);
1990 kfree(ipsec->tx_esw);
1991
1992 mutex_destroy(&ipsec->rx_esw->ft.mutex);
1993 WARN_ON(ipsec->rx_esw->ft.refcnt);
1994 kfree(ipsec->rx_esw);
1995 }
1996 }
1997
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec)1998 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
1999 {
2000 struct mlx5_core_dev *mdev = ipsec->mdev;
2001 struct mlx5_flow_namespace *ns, *ns_esw;
2002 int err = -ENOMEM;
2003
2004 ns = mlx5_get_flow_namespace(ipsec->mdev,
2005 MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
2006 if (!ns)
2007 return -EOPNOTSUPP;
2008
2009 if (ipsec->is_uplink_rep) {
2010 ns_esw = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_FDB);
2011 if (!ns_esw)
2012 return -EOPNOTSUPP;
2013
2014 ipsec->tx_esw = kzalloc(sizeof(*ipsec->tx_esw), GFP_KERNEL);
2015 if (!ipsec->tx_esw)
2016 return -ENOMEM;
2017
2018 ipsec->rx_esw = kzalloc(sizeof(*ipsec->rx_esw), GFP_KERNEL);
2019 if (!ipsec->rx_esw)
2020 goto err_rx_esw;
2021 }
2022
2023 ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
2024 if (!ipsec->tx)
2025 goto err_tx;
2026
2027 ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
2028 if (!ipsec->rx_ipv4)
2029 goto err_rx_ipv4;
2030
2031 ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
2032 if (!ipsec->rx_ipv6)
2033 goto err_rx_ipv6;
2034
2035 err = ipsec_fs_init_counters(ipsec);
2036 if (err)
2037 goto err_counters;
2038
2039 mutex_init(&ipsec->tx->ft.mutex);
2040 mutex_init(&ipsec->rx_ipv4->ft.mutex);
2041 mutex_init(&ipsec->rx_ipv6->ft.mutex);
2042 ipsec->tx->ns = ns;
2043
2044 if (ipsec->is_uplink_rep) {
2045 mutex_init(&ipsec->tx_esw->ft.mutex);
2046 mutex_init(&ipsec->rx_esw->ft.mutex);
2047 ipsec->tx_esw->ns = ns_esw;
2048 xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
2049 } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
2050 ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
2051 }
2052
2053 return 0;
2054
2055 err_counters:
2056 kfree(ipsec->rx_ipv6);
2057 err_rx_ipv6:
2058 kfree(ipsec->rx_ipv4);
2059 err_rx_ipv4:
2060 kfree(ipsec->tx);
2061 err_tx:
2062 kfree(ipsec->rx_esw);
2063 err_rx_esw:
2064 kfree(ipsec->tx_esw);
2065 return err;
2066 }
2067
mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry * sa_entry)2068 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
2069 {
2070 struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
2071 int err;
2072
2073 memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
2074 memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
2075
2076 err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
2077 if (err)
2078 return;
2079
2080 mlx5e_accel_ipsec_fs_del_rule(sa_entry);
2081 memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
2082 }
2083
mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry * sa_entry)2084 bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
2085 {
2086 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2087 struct mlx5e_ipsec_rx *rx;
2088 struct mlx5e_ipsec_tx *tx;
2089
2090 rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
2091 tx = ipsec_tx(sa_entry->ipsec, attrs->type);
2092 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2093 return tx->allow_tunnel_mode;
2094
2095 return rx->allow_tunnel_mode;
2096 }
2097