1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "ipsec.h"
8 #include "fs_core.h"
9 
10 #define NUM_IPSEC_FTE BIT(15)
11 
12 enum accel_fs_esp_type {
13 	ACCEL_FS_ESP4,
14 	ACCEL_FS_ESP6,
15 	ACCEL_FS_ESP_NUM_TYPES,
16 };
17 
18 struct mlx5e_ipsec_rx_err {
19 	struct mlx5_flow_table *ft;
20 	struct mlx5_flow_handle *rule;
21 	struct mlx5_modify_hdr *copy_modify_hdr;
22 };
23 
24 struct mlx5e_accel_fs_esp_prot {
25 	struct mlx5_flow_table *ft;
26 	struct mlx5_flow_group *miss_group;
27 	struct mlx5_flow_handle *miss_rule;
28 	struct mlx5_flow_destination default_dest;
29 	struct mlx5e_ipsec_rx_err rx_err;
30 	u32 refcnt;
31 	struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
32 };
33 
34 struct mlx5e_accel_fs_esp {
35 	struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
36 };
37 
38 struct mlx5e_ipsec_tx {
39 	struct mlx5_flow_namespace *ns;
40 	struct mlx5_flow_table *ft;
41 	struct mutex mutex; /* Protect IPsec TX steering */
42 	u32 refcnt;
43 };
44 
45 /* IPsec RX flow steering */
46 static enum mlx5_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
47 {
48 	if (i == ACCEL_FS_ESP4)
49 		return MLX5_TT_IPV4_IPSEC_ESP;
50 	return MLX5_TT_IPV6_IPSEC_ESP;
51 }
52 
53 static int rx_err_add_rule(struct mlx5e_priv *priv,
54 			   struct mlx5e_accel_fs_esp_prot *fs_prot,
55 			   struct mlx5e_ipsec_rx_err *rx_err)
56 {
57 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
58 	struct mlx5_core_dev *mdev = priv->mdev;
59 	struct mlx5_flow_act flow_act = {};
60 	struct mlx5_modify_hdr *modify_hdr;
61 	struct mlx5_flow_handle *fte;
62 	struct mlx5_flow_spec *spec;
63 	int err;
64 
65 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
66 	if (!spec)
67 		return -ENOMEM;
68 
69 	/* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
70 	MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
71 	MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
72 	MLX5_SET(copy_action_in, action, src_offset, 0);
73 	MLX5_SET(copy_action_in, action, length, 7);
74 	MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
75 	MLX5_SET(copy_action_in, action, dst_offset, 24);
76 
77 	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
78 					      1, action);
79 
80 	if (IS_ERR(modify_hdr)) {
81 		err = PTR_ERR(modify_hdr);
82 		netdev_err(priv->netdev,
83 			   "fail to alloc ipsec copy modify_header_id err=%d\n", err);
84 		goto out_spec;
85 	}
86 
87 	/* create fte */
88 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
89 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
90 	flow_act.modify_hdr = modify_hdr;
91 	fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
92 				  &fs_prot->default_dest, 1);
93 	if (IS_ERR(fte)) {
94 		err = PTR_ERR(fte);
95 		netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err);
96 		goto out;
97 	}
98 
99 	kvfree(spec);
100 	rx_err->rule = fte;
101 	rx_err->copy_modify_hdr = modify_hdr;
102 	return 0;
103 
104 out:
105 	mlx5_modify_header_dealloc(mdev, modify_hdr);
106 out_spec:
107 	kvfree(spec);
108 	return err;
109 }
110 
111 static int rx_fs_create(struct mlx5e_priv *priv,
112 			struct mlx5e_accel_fs_esp_prot *fs_prot)
113 {
114 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
115 	struct mlx5_flow_table *ft = fs_prot->ft;
116 	struct mlx5_flow_group *miss_group;
117 	struct mlx5_flow_handle *miss_rule;
118 	MLX5_DECLARE_FLOW_ACT(flow_act);
119 	struct mlx5_flow_spec *spec;
120 	u32 *flow_group_in;
121 	int err = 0;
122 
123 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
124 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
125 	if (!flow_group_in || !spec) {
126 		err = -ENOMEM;
127 		goto out;
128 	}
129 
130 	/* Create miss_group */
131 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
132 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
133 	miss_group = mlx5_create_flow_group(ft, flow_group_in);
134 	if (IS_ERR(miss_group)) {
135 		err = PTR_ERR(miss_group);
136 		netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err);
137 		goto out;
138 	}
139 	fs_prot->miss_group = miss_group;
140 
141 	/* Create miss rule */
142 	miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
143 	if (IS_ERR(miss_rule)) {
144 		mlx5_destroy_flow_group(fs_prot->miss_group);
145 		err = PTR_ERR(miss_rule);
146 		netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
147 		goto out;
148 	}
149 	fs_prot->miss_rule = miss_rule;
150 out:
151 	kvfree(flow_group_in);
152 	kvfree(spec);
153 	return err;
154 }
155 
156 static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
157 {
158 	struct mlx5e_accel_fs_esp_prot *fs_prot;
159 	struct mlx5e_accel_fs_esp *accel_esp;
160 
161 	accel_esp = priv->ipsec->rx_fs;
162 
163 	/* The netdev unreg already happened, so all offloaded rule are already removed */
164 	fs_prot = &accel_esp->fs_prot[type];
165 
166 	mlx5_del_flow_rules(fs_prot->miss_rule);
167 	mlx5_destroy_flow_group(fs_prot->miss_group);
168 	mlx5_destroy_flow_table(fs_prot->ft);
169 
170 	mlx5_del_flow_rules(fs_prot->rx_err.rule);
171 	mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
172 	mlx5_destroy_flow_table(fs_prot->rx_err.ft);
173 }
174 
175 static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
176 {
177 	struct mlx5_flow_table_attr ft_attr = {};
178 	struct mlx5e_accel_fs_esp_prot *fs_prot;
179 	struct mlx5e_accel_fs_esp *accel_esp;
180 	struct mlx5_flow_table *ft;
181 	int err;
182 
183 	accel_esp = priv->ipsec->rx_fs;
184 	fs_prot = &accel_esp->fs_prot[type];
185 
186 	fs_prot->default_dest =
187 		mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type));
188 
189 	ft_attr.max_fte = 1;
190 	ft_attr.autogroup.max_num_groups = 1;
191 	ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
192 	ft_attr.prio = MLX5E_NIC_PRIO;
193 	ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
194 	if (IS_ERR(ft))
195 		return PTR_ERR(ft);
196 
197 	fs_prot->rx_err.ft = ft;
198 	err = rx_err_add_rule(priv, fs_prot, &fs_prot->rx_err);
199 	if (err)
200 		goto err_add;
201 
202 	/* Create FT */
203 	ft_attr.max_fte = NUM_IPSEC_FTE;
204 	ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
205 	ft_attr.prio = MLX5E_NIC_PRIO;
206 	ft_attr.autogroup.num_reserved_entries = 1;
207 	ft_attr.autogroup.max_num_groups = 1;
208 	ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
209 	if (IS_ERR(ft)) {
210 		err = PTR_ERR(ft);
211 		goto err_fs_ft;
212 	}
213 	fs_prot->ft = ft;
214 
215 	err = rx_fs_create(priv, fs_prot);
216 	if (err)
217 		goto err_fs;
218 
219 	return 0;
220 
221 err_fs:
222 	mlx5_destroy_flow_table(fs_prot->ft);
223 err_fs_ft:
224 	mlx5_del_flow_rules(fs_prot->rx_err.rule);
225 	mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
226 err_add:
227 	mlx5_destroy_flow_table(fs_prot->rx_err.ft);
228 	return err;
229 }
230 
231 static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
232 {
233 	struct mlx5e_accel_fs_esp_prot *fs_prot;
234 	struct mlx5_flow_destination dest = {};
235 	struct mlx5e_accel_fs_esp *accel_esp;
236 	int err = 0;
237 
238 	accel_esp = priv->ipsec->rx_fs;
239 	fs_prot = &accel_esp->fs_prot[type];
240 	mutex_lock(&fs_prot->prot_mutex);
241 	if (fs_prot->refcnt)
242 		goto skip;
243 
244 	/* create FT */
245 	err = rx_create(priv, type);
246 	if (err)
247 		goto out;
248 
249 	/* connect */
250 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
251 	dest.ft = fs_prot->ft;
252 	mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest);
253 
254 skip:
255 	fs_prot->refcnt++;
256 out:
257 	mutex_unlock(&fs_prot->prot_mutex);
258 	return err;
259 }
260 
261 static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
262 {
263 	struct mlx5e_accel_fs_esp_prot *fs_prot;
264 	struct mlx5e_accel_fs_esp *accel_esp;
265 
266 	accel_esp = priv->ipsec->rx_fs;
267 	fs_prot = &accel_esp->fs_prot[type];
268 	mutex_lock(&fs_prot->prot_mutex);
269 	fs_prot->refcnt--;
270 	if (fs_prot->refcnt)
271 		goto out;
272 
273 	/* disconnect */
274 	mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type));
275 
276 	/* remove FT */
277 	rx_destroy(priv, type);
278 
279 out:
280 	mutex_unlock(&fs_prot->prot_mutex);
281 }
282 
283 /* IPsec TX flow steering */
284 static int tx_create(struct mlx5e_priv *priv)
285 {
286 	struct mlx5_flow_table_attr ft_attr = {};
287 	struct mlx5e_ipsec *ipsec = priv->ipsec;
288 	struct mlx5_flow_table *ft;
289 	int err;
290 
291 	ft_attr.max_fte = NUM_IPSEC_FTE;
292 	ft_attr.autogroup.max_num_groups = 1;
293 	ft = mlx5_create_auto_grouped_flow_table(ipsec->tx_fs->ns, &ft_attr);
294 	if (IS_ERR(ft)) {
295 		err = PTR_ERR(ft);
296 		netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
297 		return err;
298 	}
299 	ipsec->tx_fs->ft = ft;
300 	return 0;
301 }
302 
303 static int tx_ft_get(struct mlx5e_priv *priv)
304 {
305 	struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
306 	int err = 0;
307 
308 	mutex_lock(&tx_fs->mutex);
309 	if (tx_fs->refcnt)
310 		goto skip;
311 
312 	err = tx_create(priv);
313 	if (err)
314 		goto out;
315 skip:
316 	tx_fs->refcnt++;
317 out:
318 	mutex_unlock(&tx_fs->mutex);
319 	return err;
320 }
321 
322 static void tx_ft_put(struct mlx5e_priv *priv)
323 {
324 	struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
325 
326 	mutex_lock(&tx_fs->mutex);
327 	tx_fs->refcnt--;
328 	if (tx_fs->refcnt)
329 		goto out;
330 
331 	mlx5_destroy_flow_table(tx_fs->ft);
332 out:
333 	mutex_unlock(&tx_fs->mutex);
334 }
335 
336 static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
337 			     u32 ipsec_obj_id,
338 			     struct mlx5_flow_spec *spec,
339 			     struct mlx5_flow_act *flow_act)
340 {
341 	u8 ip_version = attrs->is_ipv6 ? 6 : 4;
342 
343 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS;
344 
345 	/* ip_version */
346 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
347 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version);
348 
349 	/* Non fragmented */
350 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
351 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
352 
353 	/* ESP header */
354 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
355 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
356 
357 	/* SPI number */
358 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
359 	MLX5_SET(fte_match_param, spec->match_value,
360 		 misc_parameters.outer_esp_spi, attrs->spi);
361 
362 	if (ip_version == 4) {
363 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
364 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
365 		       &attrs->saddr.a4, 4);
366 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
367 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
368 		       &attrs->daddr.a4, 4);
369 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
370 				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
371 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
372 				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
373 	} else {
374 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
375 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
376 		       &attrs->saddr.a6, 16);
377 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
378 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
379 		       &attrs->daddr.a6, 16);
380 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
381 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
382 		       0xff, 16);
383 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
384 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
385 		       0xff, 16);
386 	}
387 
388 	flow_act->ipsec_obj_id = ipsec_obj_id;
389 	flow_act->flags |= FLOW_ACT_NO_APPEND;
390 }
391 
392 static int rx_add_rule(struct mlx5e_priv *priv,
393 		       struct mlx5e_ipsec_sa_entry *sa_entry)
394 {
395 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
396 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
397 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
398 	u32 ipsec_obj_id = sa_entry->ipsec_obj_id;
399 	struct mlx5_modify_hdr *modify_hdr = NULL;
400 	struct mlx5e_accel_fs_esp_prot *fs_prot;
401 	struct mlx5_flow_destination dest = {};
402 	struct mlx5e_accel_fs_esp *accel_esp;
403 	struct mlx5_flow_act flow_act = {};
404 	struct mlx5_flow_handle *rule;
405 	enum accel_fs_esp_type type;
406 	struct mlx5_flow_spec *spec;
407 	int err = 0;
408 
409 	accel_esp = priv->ipsec->rx_fs;
410 	type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4;
411 	fs_prot = &accel_esp->fs_prot[type];
412 
413 	err = rx_ft_get(priv, type);
414 	if (err)
415 		return err;
416 
417 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
418 	if (!spec) {
419 		err = -ENOMEM;
420 		goto out_err;
421 	}
422 
423 	setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
424 
425 	/* Set bit[31] ipsec marker */
426 	/* Set bit[23-0] ipsec_obj_id */
427 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
428 	MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
429 	MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
430 	MLX5_SET(set_action_in, action, offset, 0);
431 	MLX5_SET(set_action_in, action, length, 32);
432 
433 	modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
434 					      1, action);
435 	if (IS_ERR(modify_hdr)) {
436 		err = PTR_ERR(modify_hdr);
437 		netdev_err(priv->netdev,
438 			   "fail to alloc ipsec set modify_header_id err=%d\n", err);
439 		modify_hdr = NULL;
440 		goto out_err;
441 	}
442 
443 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
444 			  MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT |
445 			  MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
446 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
447 	flow_act.modify_hdr = modify_hdr;
448 	dest.ft = fs_prot->rx_err.ft;
449 	rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
450 	if (IS_ERR(rule)) {
451 		err = PTR_ERR(rule);
452 		netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
453 			   attrs->action, err);
454 		goto out_err;
455 	}
456 
457 	ipsec_rule->rule = rule;
458 	ipsec_rule->set_modify_hdr = modify_hdr;
459 	goto out;
460 
461 out_err:
462 	if (modify_hdr)
463 		mlx5_modify_header_dealloc(priv->mdev, modify_hdr);
464 	rx_ft_put(priv, type);
465 
466 out:
467 	kvfree(spec);
468 	return err;
469 }
470 
471 static int tx_add_rule(struct mlx5e_priv *priv,
472 		       struct mlx5e_ipsec_sa_entry *sa_entry)
473 {
474 	struct mlx5_flow_act flow_act = {};
475 	struct mlx5_flow_handle *rule;
476 	struct mlx5_flow_spec *spec;
477 	int err = 0;
478 
479 	err = tx_ft_get(priv);
480 	if (err)
481 		return err;
482 
483 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
484 	if (!spec) {
485 		err = -ENOMEM;
486 		goto out;
487 	}
488 
489 	setup_fte_common(&sa_entry->attrs, sa_entry->ipsec_obj_id, spec,
490 			 &flow_act);
491 
492 	/* Add IPsec indicator in metadata_reg_a */
493 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
494 	MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
495 		 MLX5_ETH_WQE_FT_META_IPSEC);
496 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
497 		 MLX5_ETH_WQE_FT_META_IPSEC);
498 
499 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
500 			  MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT;
501 	rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
502 	if (IS_ERR(rule)) {
503 		err = PTR_ERR(rule);
504 		netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
505 				sa_entry->attrs.action, err);
506 		goto out;
507 	}
508 
509 	sa_entry->ipsec_rule.rule = rule;
510 
511 out:
512 	kvfree(spec);
513 	if (err)
514 		tx_ft_put(priv);
515 	return err;
516 }
517 
518 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
519 				  struct mlx5e_ipsec_sa_entry *sa_entry)
520 {
521 	if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT)
522 		return tx_add_rule(priv, sa_entry);
523 
524 	return rx_add_rule(priv, sa_entry);
525 }
526 
527 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
528 				   struct mlx5e_ipsec_sa_entry *sa_entry)
529 {
530 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
531 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
532 
533 	mlx5_del_flow_rules(ipsec_rule->rule);
534 
535 	if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) {
536 		tx_ft_put(priv);
537 		return;
538 	}
539 
540 	mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr);
541 	rx_ft_put(priv,
542 		  sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
543 }
544 
545 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
546 {
547 	struct mlx5e_accel_fs_esp_prot *fs_prot;
548 	struct mlx5e_accel_fs_esp *accel_esp;
549 	enum accel_fs_esp_type i;
550 
551 	if (!ipsec->rx_fs)
552 		return;
553 
554 	mutex_destroy(&ipsec->tx_fs->mutex);
555 	WARN_ON(ipsec->tx_fs->refcnt);
556 	kfree(ipsec->tx_fs);
557 
558 	accel_esp = ipsec->rx_fs;
559 	for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
560 		fs_prot = &accel_esp->fs_prot[i];
561 		mutex_destroy(&fs_prot->prot_mutex);
562 		WARN_ON(fs_prot->refcnt);
563 	}
564 	kfree(ipsec->rx_fs);
565 }
566 
567 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
568 {
569 	struct mlx5e_accel_fs_esp_prot *fs_prot;
570 	struct mlx5e_accel_fs_esp *accel_esp;
571 	struct mlx5_flow_namespace *ns;
572 	enum accel_fs_esp_type i;
573 	int err = -ENOMEM;
574 
575 	ns = mlx5_get_flow_namespace(ipsec->mdev,
576 				     MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
577 	if (!ns)
578 		return -EOPNOTSUPP;
579 
580 	ipsec->tx_fs = kzalloc(sizeof(*ipsec->tx_fs), GFP_KERNEL);
581 	if (!ipsec->tx_fs)
582 		return -ENOMEM;
583 
584 	ipsec->rx_fs = kzalloc(sizeof(*ipsec->rx_fs), GFP_KERNEL);
585 	if (!ipsec->rx_fs)
586 		goto err_rx;
587 
588 	mutex_init(&ipsec->tx_fs->mutex);
589 	ipsec->tx_fs->ns = ns;
590 
591 	accel_esp = ipsec->rx_fs;
592 	for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
593 		fs_prot = &accel_esp->fs_prot[i];
594 		mutex_init(&fs_prot->prot_mutex);
595 	}
596 
597 	return 0;
598 
599 err_rx:
600 	kfree(ipsec->tx_fs);
601 	return err;
602 }
603