1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include "accel/ipsec_offload.h"
6 #include "ipsec_fs.h"
7 #include "fs_core.h"
8 
9 #define NUM_IPSEC_FTE BIT(15)
10 
11 enum accel_fs_esp_type {
12 	ACCEL_FS_ESP4,
13 	ACCEL_FS_ESP6,
14 	ACCEL_FS_ESP_NUM_TYPES,
15 };
16 
17 struct mlx5e_ipsec_rx_err {
18 	struct mlx5_flow_table *ft;
19 	struct mlx5_flow_handle *rule;
20 	struct mlx5_modify_hdr *copy_modify_hdr;
21 };
22 
23 struct mlx5e_accel_fs_esp_prot {
24 	struct mlx5_flow_table *ft;
25 	struct mlx5_flow_group *miss_group;
26 	struct mlx5_flow_handle *miss_rule;
27 	struct mlx5_flow_destination default_dest;
28 	struct mlx5e_ipsec_rx_err rx_err;
29 	u32 refcnt;
30 	struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
31 };
32 
33 struct mlx5e_accel_fs_esp {
34 	struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
35 };
36 
37 struct mlx5e_ipsec_tx {
38 	struct mlx5_flow_table *ft;
39 	struct mutex mutex; /* Protect IPsec TX steering */
40 	u32 refcnt;
41 };
42 
43 /* IPsec RX flow steering */
44 static enum mlx5_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
45 {
46 	if (i == ACCEL_FS_ESP4)
47 		return MLX5_TT_IPV4_IPSEC_ESP;
48 	return MLX5_TT_IPV6_IPSEC_ESP;
49 }
50 
51 static int rx_err_add_rule(struct mlx5e_priv *priv,
52 			   struct mlx5e_accel_fs_esp_prot *fs_prot,
53 			   struct mlx5e_ipsec_rx_err *rx_err)
54 {
55 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
56 	struct mlx5_core_dev *mdev = priv->mdev;
57 	struct mlx5_flow_act flow_act = {};
58 	struct mlx5_modify_hdr *modify_hdr;
59 	struct mlx5_flow_handle *fte;
60 	struct mlx5_flow_spec *spec;
61 	int err = 0;
62 
63 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
64 	if (!spec)
65 		return -ENOMEM;
66 
67 	/* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
68 	MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
69 	MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
70 	MLX5_SET(copy_action_in, action, src_offset, 0);
71 	MLX5_SET(copy_action_in, action, length, 7);
72 	MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
73 	MLX5_SET(copy_action_in, action, dst_offset, 24);
74 
75 	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
76 					      1, action);
77 
78 	if (IS_ERR(modify_hdr)) {
79 		err = PTR_ERR(modify_hdr);
80 		netdev_err(priv->netdev,
81 			   "fail to alloc ipsec copy modify_header_id err=%d\n", err);
82 		goto out_spec;
83 	}
84 
85 	/* create fte */
86 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
87 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
88 	flow_act.modify_hdr = modify_hdr;
89 	fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
90 				  &fs_prot->default_dest, 1);
91 	if (IS_ERR(fte)) {
92 		err = PTR_ERR(fte);
93 		netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err);
94 		goto out;
95 	}
96 
97 	rx_err->rule = fte;
98 	rx_err->copy_modify_hdr = modify_hdr;
99 
100 out:
101 	if (err)
102 		mlx5_modify_header_dealloc(mdev, modify_hdr);
103 out_spec:
104 	kvfree(spec);
105 	return err;
106 }
107 
108 static void rx_err_del_rule(struct mlx5e_priv *priv,
109 			    struct mlx5e_ipsec_rx_err *rx_err)
110 {
111 	if (rx_err->rule) {
112 		mlx5_del_flow_rules(rx_err->rule);
113 		rx_err->rule = NULL;
114 	}
115 
116 	if (rx_err->copy_modify_hdr) {
117 		mlx5_modify_header_dealloc(priv->mdev, rx_err->copy_modify_hdr);
118 		rx_err->copy_modify_hdr = NULL;
119 	}
120 }
121 
122 static void rx_err_destroy_ft(struct mlx5e_priv *priv, struct mlx5e_ipsec_rx_err *rx_err)
123 {
124 	rx_err_del_rule(priv, rx_err);
125 
126 	if (rx_err->ft) {
127 		mlx5_destroy_flow_table(rx_err->ft);
128 		rx_err->ft = NULL;
129 	}
130 }
131 
132 static int rx_err_create_ft(struct mlx5e_priv *priv,
133 			    struct mlx5e_accel_fs_esp_prot *fs_prot,
134 			    struct mlx5e_ipsec_rx_err *rx_err)
135 {
136 	struct mlx5_flow_table_attr ft_attr = {};
137 	struct mlx5_flow_table *ft;
138 	int err;
139 
140 	ft_attr.max_fte = 1;
141 	ft_attr.autogroup.max_num_groups = 1;
142 	ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
143 	ft_attr.prio = MLX5E_NIC_PRIO;
144 	ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
145 	if (IS_ERR(ft)) {
146 		err = PTR_ERR(ft);
147 		netdev_err(priv->netdev, "fail to create ipsec rx inline ft err=%d\n", err);
148 		return err;
149 	}
150 
151 	rx_err->ft = ft;
152 	err = rx_err_add_rule(priv, fs_prot, rx_err);
153 	if (err)
154 		goto out_err;
155 
156 	return 0;
157 
158 out_err:
159 	mlx5_destroy_flow_table(ft);
160 	rx_err->ft = NULL;
161 	return err;
162 }
163 
164 static void rx_fs_destroy(struct mlx5e_accel_fs_esp_prot *fs_prot)
165 {
166 	if (fs_prot->miss_rule) {
167 		mlx5_del_flow_rules(fs_prot->miss_rule);
168 		fs_prot->miss_rule = NULL;
169 	}
170 
171 	if (fs_prot->miss_group) {
172 		mlx5_destroy_flow_group(fs_prot->miss_group);
173 		fs_prot->miss_group = NULL;
174 	}
175 
176 	if (fs_prot->ft) {
177 		mlx5_destroy_flow_table(fs_prot->ft);
178 		fs_prot->ft = NULL;
179 	}
180 }
181 
182 static int rx_fs_create(struct mlx5e_priv *priv,
183 			struct mlx5e_accel_fs_esp_prot *fs_prot)
184 {
185 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
186 	struct mlx5_flow_table_attr ft_attr = {};
187 	struct mlx5_flow_group *miss_group;
188 	struct mlx5_flow_handle *miss_rule;
189 	MLX5_DECLARE_FLOW_ACT(flow_act);
190 	struct mlx5_flow_spec *spec;
191 	struct mlx5_flow_table *ft;
192 	u32 *flow_group_in;
193 	int err = 0;
194 
195 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
196 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
197 	if (!flow_group_in || !spec) {
198 		err = -ENOMEM;
199 		goto out;
200 	}
201 
202 	/* Create FT */
203 	ft_attr.max_fte = NUM_IPSEC_FTE;
204 	ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
205 	ft_attr.prio = MLX5E_NIC_PRIO;
206 	ft_attr.autogroup.num_reserved_entries = 1;
207 	ft_attr.autogroup.max_num_groups = 1;
208 	ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
209 	if (IS_ERR(ft)) {
210 		err = PTR_ERR(ft);
211 		netdev_err(priv->netdev, "fail to create ipsec rx ft err=%d\n", err);
212 		goto out;
213 	}
214 	fs_prot->ft = ft;
215 
216 	/* Create miss_group */
217 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
218 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
219 	miss_group = mlx5_create_flow_group(ft, flow_group_in);
220 	if (IS_ERR(miss_group)) {
221 		err = PTR_ERR(miss_group);
222 		netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err);
223 		goto out;
224 	}
225 	fs_prot->miss_group = miss_group;
226 
227 	/* Create miss rule */
228 	miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
229 	if (IS_ERR(miss_rule)) {
230 		err = PTR_ERR(miss_rule);
231 		netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
232 		goto out;
233 	}
234 	fs_prot->miss_rule = miss_rule;
235 
236 out:
237 	kvfree(flow_group_in);
238 	kvfree(spec);
239 	return err;
240 }
241 
242 static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
243 {
244 	struct mlx5e_accel_fs_esp_prot *fs_prot;
245 	struct mlx5e_accel_fs_esp *accel_esp;
246 
247 	accel_esp = priv->ipsec->rx_fs;
248 
249 	/* The netdev unreg already happened, so all offloaded rule are already removed */
250 	fs_prot = &accel_esp->fs_prot[type];
251 
252 	rx_fs_destroy(fs_prot);
253 
254 	rx_err_destroy_ft(priv, &fs_prot->rx_err);
255 
256 	return 0;
257 }
258 
259 static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
260 {
261 	struct mlx5e_accel_fs_esp_prot *fs_prot;
262 	struct mlx5e_accel_fs_esp *accel_esp;
263 	int err;
264 
265 	accel_esp = priv->ipsec->rx_fs;
266 	fs_prot = &accel_esp->fs_prot[type];
267 
268 	fs_prot->default_dest =
269 		mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type));
270 
271 	err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err);
272 	if (err)
273 		return err;
274 
275 	err = rx_fs_create(priv, fs_prot);
276 	if (err)
277 		rx_destroy(priv, type);
278 
279 	return err;
280 }
281 
282 static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
283 {
284 	struct mlx5e_accel_fs_esp_prot *fs_prot;
285 	struct mlx5_flow_destination dest = {};
286 	struct mlx5e_accel_fs_esp *accel_esp;
287 	int err = 0;
288 
289 	accel_esp = priv->ipsec->rx_fs;
290 	fs_prot = &accel_esp->fs_prot[type];
291 	mutex_lock(&fs_prot->prot_mutex);
292 	if (fs_prot->refcnt++)
293 		goto out;
294 
295 	/* create FT */
296 	err = rx_create(priv, type);
297 	if (err) {
298 		fs_prot->refcnt--;
299 		goto out;
300 	}
301 
302 	/* connect */
303 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
304 	dest.ft = fs_prot->ft;
305 	mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest);
306 
307 out:
308 	mutex_unlock(&fs_prot->prot_mutex);
309 	return err;
310 }
311 
312 static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
313 {
314 	struct mlx5e_accel_fs_esp_prot *fs_prot;
315 	struct mlx5e_accel_fs_esp *accel_esp;
316 
317 	accel_esp = priv->ipsec->rx_fs;
318 	fs_prot = &accel_esp->fs_prot[type];
319 	mutex_lock(&fs_prot->prot_mutex);
320 	if (--fs_prot->refcnt)
321 		goto out;
322 
323 	/* disconnect */
324 	mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type));
325 
326 	/* remove FT */
327 	rx_destroy(priv, type);
328 
329 out:
330 	mutex_unlock(&fs_prot->prot_mutex);
331 }
332 
333 /* IPsec TX flow steering */
334 static int tx_create(struct mlx5e_priv *priv)
335 {
336 	struct mlx5_flow_table_attr ft_attr = {};
337 	struct mlx5e_ipsec *ipsec = priv->ipsec;
338 	struct mlx5_flow_table *ft;
339 	int err;
340 
341 	priv->fs.egress_ns =
342 		mlx5_get_flow_namespace(priv->mdev,
343 					MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
344 	if (!priv->fs.egress_ns)
345 		return -EOPNOTSUPP;
346 
347 	ft_attr.max_fte = NUM_IPSEC_FTE;
348 	ft_attr.autogroup.max_num_groups = 1;
349 	ft = mlx5_create_auto_grouped_flow_table(priv->fs.egress_ns, &ft_attr);
350 	if (IS_ERR(ft)) {
351 		err = PTR_ERR(ft);
352 		netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
353 		return err;
354 	}
355 	ipsec->tx_fs->ft = ft;
356 	return 0;
357 }
358 
359 static void tx_destroy(struct mlx5e_priv *priv)
360 {
361 	struct mlx5e_ipsec *ipsec = priv->ipsec;
362 
363 	if (IS_ERR_OR_NULL(ipsec->tx_fs->ft))
364 		return;
365 
366 	mlx5_destroy_flow_table(ipsec->tx_fs->ft);
367 	ipsec->tx_fs->ft = NULL;
368 }
369 
370 static int tx_ft_get(struct mlx5e_priv *priv)
371 {
372 	struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
373 	int err = 0;
374 
375 	mutex_lock(&tx_fs->mutex);
376 	if (tx_fs->refcnt++)
377 		goto out;
378 
379 	err = tx_create(priv);
380 	if (err) {
381 		tx_fs->refcnt--;
382 		goto out;
383 	}
384 
385 out:
386 	mutex_unlock(&tx_fs->mutex);
387 	return err;
388 }
389 
390 static void tx_ft_put(struct mlx5e_priv *priv)
391 {
392 	struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
393 
394 	mutex_lock(&tx_fs->mutex);
395 	if (--tx_fs->refcnt)
396 		goto out;
397 
398 	tx_destroy(priv);
399 
400 out:
401 	mutex_unlock(&tx_fs->mutex);
402 }
403 
404 static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
405 			     u32 ipsec_obj_id,
406 			     struct mlx5_flow_spec *spec,
407 			     struct mlx5_flow_act *flow_act)
408 {
409 	u8 ip_version = attrs->is_ipv6 ? 6 : 4;
410 
411 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS;
412 
413 	/* ip_version */
414 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
415 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version);
416 
417 	/* Non fragmented */
418 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
419 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
420 
421 	/* ESP header */
422 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
423 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
424 
425 	/* SPI number */
426 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
427 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi,
428 		 be32_to_cpu(attrs->spi));
429 
430 	if (ip_version == 4) {
431 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
432 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
433 		       &attrs->saddr.a4, 4);
434 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
435 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
436 		       &attrs->daddr.a4, 4);
437 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
438 				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
439 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
440 				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
441 	} else {
442 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
443 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
444 		       &attrs->saddr.a6, 16);
445 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
446 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
447 		       &attrs->daddr.a6, 16);
448 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
449 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
450 		       0xff, 16);
451 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
452 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
453 		       0xff, 16);
454 	}
455 
456 	flow_act->ipsec_obj_id = ipsec_obj_id;
457 	flow_act->flags |= FLOW_ACT_NO_APPEND;
458 }
459 
460 static int rx_add_rule(struct mlx5e_priv *priv,
461 		       struct mlx5_accel_esp_xfrm_attrs *attrs,
462 		       u32 ipsec_obj_id,
463 		       struct mlx5e_ipsec_rule *ipsec_rule)
464 {
465 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
466 	struct mlx5_modify_hdr *modify_hdr = NULL;
467 	struct mlx5e_accel_fs_esp_prot *fs_prot;
468 	struct mlx5_flow_destination dest = {};
469 	struct mlx5e_accel_fs_esp *accel_esp;
470 	struct mlx5_flow_act flow_act = {};
471 	struct mlx5_flow_handle *rule;
472 	enum accel_fs_esp_type type;
473 	struct mlx5_flow_spec *spec;
474 	int err = 0;
475 
476 	accel_esp = priv->ipsec->rx_fs;
477 	type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4;
478 	fs_prot = &accel_esp->fs_prot[type];
479 
480 	err = rx_ft_get(priv, type);
481 	if (err)
482 		return err;
483 
484 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
485 	if (!spec) {
486 		err = -ENOMEM;
487 		goto out_err;
488 	}
489 
490 	setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
491 
492 	/* Set bit[31] ipsec marker */
493 	/* Set bit[23-0] ipsec_obj_id */
494 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
495 	MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
496 	MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
497 	MLX5_SET(set_action_in, action, offset, 0);
498 	MLX5_SET(set_action_in, action, length, 32);
499 
500 	modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
501 					      1, action);
502 	if (IS_ERR(modify_hdr)) {
503 		err = PTR_ERR(modify_hdr);
504 		netdev_err(priv->netdev,
505 			   "fail to alloc ipsec set modify_header_id err=%d\n", err);
506 		modify_hdr = NULL;
507 		goto out_err;
508 	}
509 
510 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
511 			  MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT |
512 			  MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
513 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
514 	flow_act.modify_hdr = modify_hdr;
515 	dest.ft = fs_prot->rx_err.ft;
516 	rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
517 	if (IS_ERR(rule)) {
518 		err = PTR_ERR(rule);
519 		netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
520 			   attrs->action, err);
521 		goto out_err;
522 	}
523 
524 	ipsec_rule->rule = rule;
525 	ipsec_rule->set_modify_hdr = modify_hdr;
526 	goto out;
527 
528 out_err:
529 	if (modify_hdr)
530 		mlx5_modify_header_dealloc(priv->mdev, modify_hdr);
531 	rx_ft_put(priv, type);
532 
533 out:
534 	kvfree(spec);
535 	return err;
536 }
537 
538 static int tx_add_rule(struct mlx5e_priv *priv,
539 		       struct mlx5_accel_esp_xfrm_attrs *attrs,
540 		       u32 ipsec_obj_id,
541 		       struct mlx5e_ipsec_rule *ipsec_rule)
542 {
543 	struct mlx5_flow_act flow_act = {};
544 	struct mlx5_flow_handle *rule;
545 	struct mlx5_flow_spec *spec;
546 	int err = 0;
547 
548 	err = tx_ft_get(priv);
549 	if (err)
550 		return err;
551 
552 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
553 	if (!spec) {
554 		err = -ENOMEM;
555 		goto out;
556 	}
557 
558 	setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
559 
560 	/* Add IPsec indicator in metadata_reg_a */
561 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
562 	MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
563 		 MLX5_ETH_WQE_FT_META_IPSEC);
564 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
565 		 MLX5_ETH_WQE_FT_META_IPSEC);
566 
567 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
568 			  MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT;
569 	rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
570 	if (IS_ERR(rule)) {
571 		err = PTR_ERR(rule);
572 		netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
573 			   attrs->action, err);
574 		goto out;
575 	}
576 
577 	ipsec_rule->rule = rule;
578 
579 out:
580 	kvfree(spec);
581 	if (err)
582 		tx_ft_put(priv);
583 	return err;
584 }
585 
586 static void rx_del_rule(struct mlx5e_priv *priv,
587 			struct mlx5_accel_esp_xfrm_attrs *attrs,
588 			struct mlx5e_ipsec_rule *ipsec_rule)
589 {
590 	mlx5_del_flow_rules(ipsec_rule->rule);
591 	ipsec_rule->rule = NULL;
592 
593 	mlx5_modify_header_dealloc(priv->mdev, ipsec_rule->set_modify_hdr);
594 	ipsec_rule->set_modify_hdr = NULL;
595 
596 	rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
597 }
598 
599 static void tx_del_rule(struct mlx5e_priv *priv,
600 			struct mlx5e_ipsec_rule *ipsec_rule)
601 {
602 	mlx5_del_flow_rules(ipsec_rule->rule);
603 	ipsec_rule->rule = NULL;
604 
605 	tx_ft_put(priv);
606 }
607 
608 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
609 				  struct mlx5_accel_esp_xfrm_attrs *attrs,
610 				  u32 ipsec_obj_id,
611 				  struct mlx5e_ipsec_rule *ipsec_rule)
612 {
613 	if (!priv->ipsec->rx_fs)
614 		return -EOPNOTSUPP;
615 
616 	if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
617 		return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
618 	else
619 		return tx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
620 }
621 
622 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
623 				   struct mlx5_accel_esp_xfrm_attrs *attrs,
624 				   struct mlx5e_ipsec_rule *ipsec_rule)
625 {
626 	if (!priv->ipsec->rx_fs)
627 		return;
628 
629 	if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
630 		rx_del_rule(priv, attrs, ipsec_rule);
631 	else
632 		tx_del_rule(priv, ipsec_rule);
633 }
634 
635 static void fs_cleanup_tx(struct mlx5e_priv *priv)
636 {
637 	mutex_destroy(&priv->ipsec->tx_fs->mutex);
638 	WARN_ON(priv->ipsec->tx_fs->refcnt);
639 	kfree(priv->ipsec->tx_fs);
640 	priv->ipsec->tx_fs = NULL;
641 }
642 
643 static void fs_cleanup_rx(struct mlx5e_priv *priv)
644 {
645 	struct mlx5e_accel_fs_esp_prot *fs_prot;
646 	struct mlx5e_accel_fs_esp *accel_esp;
647 	enum accel_fs_esp_type i;
648 
649 	accel_esp = priv->ipsec->rx_fs;
650 	for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
651 		fs_prot = &accel_esp->fs_prot[i];
652 		mutex_destroy(&fs_prot->prot_mutex);
653 		WARN_ON(fs_prot->refcnt);
654 	}
655 	kfree(priv->ipsec->rx_fs);
656 	priv->ipsec->rx_fs = NULL;
657 }
658 
659 static int fs_init_tx(struct mlx5e_priv *priv)
660 {
661 	priv->ipsec->tx_fs =
662 		kzalloc(sizeof(struct mlx5e_ipsec_tx), GFP_KERNEL);
663 	if (!priv->ipsec->tx_fs)
664 		return -ENOMEM;
665 
666 	mutex_init(&priv->ipsec->tx_fs->mutex);
667 	return 0;
668 }
669 
670 static int fs_init_rx(struct mlx5e_priv *priv)
671 {
672 	struct mlx5e_accel_fs_esp_prot *fs_prot;
673 	struct mlx5e_accel_fs_esp *accel_esp;
674 	enum accel_fs_esp_type i;
675 
676 	priv->ipsec->rx_fs =
677 		kzalloc(sizeof(struct mlx5e_accel_fs_esp), GFP_KERNEL);
678 	if (!priv->ipsec->rx_fs)
679 		return -ENOMEM;
680 
681 	accel_esp = priv->ipsec->rx_fs;
682 	for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
683 		fs_prot = &accel_esp->fs_prot[i];
684 		mutex_init(&fs_prot->prot_mutex);
685 	}
686 
687 	return 0;
688 }
689 
690 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv)
691 {
692 	if (!priv->ipsec->rx_fs)
693 		return;
694 
695 	fs_cleanup_tx(priv);
696 	fs_cleanup_rx(priv);
697 }
698 
699 int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
700 {
701 	int err;
702 
703 	if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
704 		return -EOPNOTSUPP;
705 
706 	err = fs_init_tx(priv);
707 	if (err)
708 		return err;
709 
710 	err = fs_init_rx(priv);
711 	if (err)
712 		fs_cleanup_tx(priv);
713 
714 	return err;
715 }
716