1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include "accel/ipsec_offload.h"
6 #include "ipsec_fs.h"
7 #include "fs_core.h"
8 
9 #define NUM_IPSEC_FTE BIT(15)
10 
11 enum accel_fs_esp_type {
12 	ACCEL_FS_ESP4,
13 	ACCEL_FS_ESP6,
14 	ACCEL_FS_ESP_NUM_TYPES,
15 };
16 
17 struct mlx5e_ipsec_rx_err {
18 	struct mlx5_flow_table *ft;
19 	struct mlx5_flow_handle *rule;
20 	struct mlx5_modify_hdr *copy_modify_hdr;
21 };
22 
23 struct mlx5e_accel_fs_esp_prot {
24 	struct mlx5_flow_table *ft;
25 	struct mlx5_flow_group *miss_group;
26 	struct mlx5_flow_handle *miss_rule;
27 	struct mlx5_flow_destination default_dest;
28 	struct mlx5e_ipsec_rx_err rx_err;
29 	u32 refcnt;
30 	struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
31 };
32 
33 struct mlx5e_accel_fs_esp {
34 	struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
35 };
36 
37 struct mlx5e_ipsec_tx {
38 	struct mlx5_flow_table *ft;
39 	struct mutex mutex; /* Protect IPsec TX steering */
40 	u32 refcnt;
41 };
42 
43 /* IPsec RX flow steering */
44 static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
45 {
46 	if (i == ACCEL_FS_ESP4)
47 		return MLX5E_TT_IPV4_IPSEC_ESP;
48 	return MLX5E_TT_IPV6_IPSEC_ESP;
49 }
50 
51 static int rx_err_add_rule(struct mlx5e_priv *priv,
52 			   struct mlx5e_accel_fs_esp_prot *fs_prot,
53 			   struct mlx5e_ipsec_rx_err *rx_err)
54 {
55 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
56 	struct mlx5_core_dev *mdev = priv->mdev;
57 	struct mlx5_flow_act flow_act = {};
58 	struct mlx5_modify_hdr *modify_hdr;
59 	struct mlx5_flow_handle *fte;
60 	struct mlx5_flow_spec *spec;
61 	int err = 0;
62 
63 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
64 	if (!spec)
65 		return -ENOMEM;
66 
67 	/* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
68 	MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
69 	MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
70 	MLX5_SET(copy_action_in, action, src_offset, 0);
71 	MLX5_SET(copy_action_in, action, length, 7);
72 	MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
73 	MLX5_SET(copy_action_in, action, dst_offset, 24);
74 
75 	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
76 					      1, action);
77 
78 	if (IS_ERR(modify_hdr)) {
79 		err = PTR_ERR(modify_hdr);
80 		netdev_err(priv->netdev,
81 			   "fail to alloc ipsec copy modify_header_id err=%d\n", err);
82 		goto out_spec;
83 	}
84 
85 	/* create fte */
86 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
87 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
88 	flow_act.modify_hdr = modify_hdr;
89 	fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
90 				  &fs_prot->default_dest, 1);
91 	if (IS_ERR(fte)) {
92 		err = PTR_ERR(fte);
93 		netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err);
94 		goto out;
95 	}
96 
97 	rx_err->rule = fte;
98 	rx_err->copy_modify_hdr = modify_hdr;
99 
100 out:
101 	if (err)
102 		mlx5_modify_header_dealloc(mdev, modify_hdr);
103 out_spec:
104 	kvfree(spec);
105 	return err;
106 }
107 
108 static void rx_err_del_rule(struct mlx5e_priv *priv,
109 			    struct mlx5e_ipsec_rx_err *rx_err)
110 {
111 	if (rx_err->rule) {
112 		mlx5_del_flow_rules(rx_err->rule);
113 		rx_err->rule = NULL;
114 	}
115 
116 	if (rx_err->copy_modify_hdr) {
117 		mlx5_modify_header_dealloc(priv->mdev, rx_err->copy_modify_hdr);
118 		rx_err->copy_modify_hdr = NULL;
119 	}
120 }
121 
122 static void rx_err_destroy_ft(struct mlx5e_priv *priv, struct mlx5e_ipsec_rx_err *rx_err)
123 {
124 	rx_err_del_rule(priv, rx_err);
125 
126 	if (rx_err->ft) {
127 		mlx5_destroy_flow_table(rx_err->ft);
128 		rx_err->ft = NULL;
129 	}
130 }
131 
132 static int rx_err_create_ft(struct mlx5e_priv *priv,
133 			    struct mlx5e_accel_fs_esp_prot *fs_prot,
134 			    struct mlx5e_ipsec_rx_err *rx_err)
135 {
136 	struct mlx5_flow_table_attr ft_attr = {};
137 	struct mlx5_flow_table *ft;
138 	int err;
139 
140 	ft_attr.max_fte = 1;
141 	ft_attr.autogroup.max_num_groups = 1;
142 	ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
143 	ft_attr.prio = MLX5E_NIC_PRIO;
144 	ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
145 	if (IS_ERR(ft)) {
146 		err = PTR_ERR(ft);
147 		netdev_err(priv->netdev, "fail to create ipsec rx inline ft err=%d\n", err);
148 		return err;
149 	}
150 
151 	rx_err->ft = ft;
152 	err = rx_err_add_rule(priv, fs_prot, rx_err);
153 	if (err)
154 		goto out_err;
155 
156 	return 0;
157 
158 out_err:
159 	mlx5_destroy_flow_table(ft);
160 	rx_err->ft = NULL;
161 	return err;
162 }
163 
164 static void rx_fs_destroy(struct mlx5e_accel_fs_esp_prot *fs_prot)
165 {
166 	if (fs_prot->miss_rule) {
167 		mlx5_del_flow_rules(fs_prot->miss_rule);
168 		fs_prot->miss_rule = NULL;
169 	}
170 
171 	if (fs_prot->miss_group) {
172 		mlx5_destroy_flow_group(fs_prot->miss_group);
173 		fs_prot->miss_group = NULL;
174 	}
175 
176 	if (fs_prot->ft) {
177 		mlx5_destroy_flow_table(fs_prot->ft);
178 		fs_prot->ft = NULL;
179 	}
180 }
181 
182 static int rx_fs_create(struct mlx5e_priv *priv,
183 			struct mlx5e_accel_fs_esp_prot *fs_prot)
184 {
185 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
186 	struct mlx5_flow_table_attr ft_attr = {};
187 	struct mlx5_flow_group *miss_group;
188 	struct mlx5_flow_handle *miss_rule;
189 	MLX5_DECLARE_FLOW_ACT(flow_act);
190 	struct mlx5_flow_spec *spec;
191 	struct mlx5_flow_table *ft;
192 	u32 *flow_group_in;
193 	int err = 0;
194 
195 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
196 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
197 	if (!flow_group_in || !spec) {
198 		err = -ENOMEM;
199 		goto out;
200 	}
201 
202 	/* Create FT */
203 	ft_attr.max_fte = NUM_IPSEC_FTE;
204 	ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
205 	ft_attr.prio = MLX5E_NIC_PRIO;
206 	ft_attr.autogroup.num_reserved_entries = 1;
207 	ft_attr.autogroup.max_num_groups = 1;
208 	ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
209 	if (IS_ERR(ft)) {
210 		err = PTR_ERR(ft);
211 		netdev_err(priv->netdev, "fail to create ipsec rx ft err=%d\n", err);
212 		goto out;
213 	}
214 	fs_prot->ft = ft;
215 
216 	/* Create miss_group */
217 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
218 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
219 	miss_group = mlx5_create_flow_group(ft, flow_group_in);
220 	if (IS_ERR(miss_group)) {
221 		err = PTR_ERR(miss_group);
222 		netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err);
223 		goto out;
224 	}
225 	fs_prot->miss_group = miss_group;
226 
227 	/* Create miss rule */
228 	miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
229 	if (IS_ERR(miss_rule)) {
230 		err = PTR_ERR(miss_rule);
231 		netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
232 		goto out;
233 	}
234 	fs_prot->miss_rule = miss_rule;
235 
236 out:
237 	kvfree(flow_group_in);
238 	kvfree(spec);
239 	return err;
240 }
241 
242 static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
243 {
244 	struct mlx5e_accel_fs_esp_prot *fs_prot;
245 	struct mlx5e_accel_fs_esp *accel_esp;
246 
247 	accel_esp = priv->ipsec->rx_fs;
248 
249 	/* The netdev unreg already happened, so all offloaded rule are already removed */
250 	fs_prot = &accel_esp->fs_prot[type];
251 
252 	rx_fs_destroy(fs_prot);
253 
254 	rx_err_destroy_ft(priv, &fs_prot->rx_err);
255 
256 	return 0;
257 }
258 
259 static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
260 {
261 	struct mlx5e_accel_fs_esp_prot *fs_prot;
262 	struct mlx5e_accel_fs_esp *accel_esp;
263 	int err;
264 
265 	accel_esp = priv->ipsec->rx_fs;
266 	fs_prot = &accel_esp->fs_prot[type];
267 
268 	fs_prot->default_dest = mlx5e_ttc_get_default_dest(priv, fs_esp2tt(type));
269 
270 	err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err);
271 	if (err)
272 		return err;
273 
274 	err = rx_fs_create(priv, fs_prot);
275 	if (err)
276 		rx_destroy(priv, type);
277 
278 	return err;
279 }
280 
281 static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
282 {
283 	struct mlx5e_accel_fs_esp_prot *fs_prot;
284 	struct mlx5_flow_destination dest = {};
285 	struct mlx5e_accel_fs_esp *accel_esp;
286 	int err = 0;
287 
288 	accel_esp = priv->ipsec->rx_fs;
289 	fs_prot = &accel_esp->fs_prot[type];
290 	mutex_lock(&fs_prot->prot_mutex);
291 	if (fs_prot->refcnt++)
292 		goto out;
293 
294 	/* create FT */
295 	err = rx_create(priv, type);
296 	if (err) {
297 		fs_prot->refcnt--;
298 		goto out;
299 	}
300 
301 	/* connect */
302 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
303 	dest.ft = fs_prot->ft;
304 	mlx5e_ttc_fwd_dest(priv, fs_esp2tt(type), &dest);
305 
306 out:
307 	mutex_unlock(&fs_prot->prot_mutex);
308 	return err;
309 }
310 
311 static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
312 {
313 	struct mlx5e_accel_fs_esp_prot *fs_prot;
314 	struct mlx5e_accel_fs_esp *accel_esp;
315 
316 	accel_esp = priv->ipsec->rx_fs;
317 	fs_prot = &accel_esp->fs_prot[type];
318 	mutex_lock(&fs_prot->prot_mutex);
319 	if (--fs_prot->refcnt)
320 		goto out;
321 
322 	/* disconnect */
323 	mlx5e_ttc_fwd_default_dest(priv, fs_esp2tt(type));
324 
325 	/* remove FT */
326 	rx_destroy(priv, type);
327 
328 out:
329 	mutex_unlock(&fs_prot->prot_mutex);
330 }
331 
332 /* IPsec TX flow steering */
333 static int tx_create(struct mlx5e_priv *priv)
334 {
335 	struct mlx5_flow_table_attr ft_attr = {};
336 	struct mlx5e_ipsec *ipsec = priv->ipsec;
337 	struct mlx5_flow_table *ft;
338 	int err;
339 
340 	priv->fs.egress_ns =
341 		mlx5_get_flow_namespace(priv->mdev,
342 					MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
343 	if (!priv->fs.egress_ns)
344 		return -EOPNOTSUPP;
345 
346 	ft_attr.max_fte = NUM_IPSEC_FTE;
347 	ft_attr.autogroup.max_num_groups = 1;
348 	ft = mlx5_create_auto_grouped_flow_table(priv->fs.egress_ns, &ft_attr);
349 	if (IS_ERR(ft)) {
350 		err = PTR_ERR(ft);
351 		netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
352 		return err;
353 	}
354 	ipsec->tx_fs->ft = ft;
355 	return 0;
356 }
357 
358 static void tx_destroy(struct mlx5e_priv *priv)
359 {
360 	struct mlx5e_ipsec *ipsec = priv->ipsec;
361 
362 	if (IS_ERR_OR_NULL(ipsec->tx_fs->ft))
363 		return;
364 
365 	mlx5_destroy_flow_table(ipsec->tx_fs->ft);
366 	ipsec->tx_fs->ft = NULL;
367 }
368 
369 static int tx_ft_get(struct mlx5e_priv *priv)
370 {
371 	struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
372 	int err = 0;
373 
374 	mutex_lock(&tx_fs->mutex);
375 	if (tx_fs->refcnt++)
376 		goto out;
377 
378 	err = tx_create(priv);
379 	if (err) {
380 		tx_fs->refcnt--;
381 		goto out;
382 	}
383 
384 out:
385 	mutex_unlock(&tx_fs->mutex);
386 	return err;
387 }
388 
389 static void tx_ft_put(struct mlx5e_priv *priv)
390 {
391 	struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
392 
393 	mutex_lock(&tx_fs->mutex);
394 	if (--tx_fs->refcnt)
395 		goto out;
396 
397 	tx_destroy(priv);
398 
399 out:
400 	mutex_unlock(&tx_fs->mutex);
401 }
402 
403 static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
404 			     u32 ipsec_obj_id,
405 			     struct mlx5_flow_spec *spec,
406 			     struct mlx5_flow_act *flow_act)
407 {
408 	u8 ip_version = attrs->is_ipv6 ? 6 : 4;
409 
410 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS;
411 
412 	/* ip_version */
413 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
414 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version);
415 
416 	/* Non fragmented */
417 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
418 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
419 
420 	/* ESP header */
421 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
422 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
423 
424 	/* SPI number */
425 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
426 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi,
427 		 be32_to_cpu(attrs->spi));
428 
429 	if (ip_version == 4) {
430 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
431 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
432 		       &attrs->saddr.a4, 4);
433 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
434 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
435 		       &attrs->daddr.a4, 4);
436 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
437 				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
438 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
439 				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
440 	} else {
441 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
442 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
443 		       &attrs->saddr.a6, 16);
444 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
445 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
446 		       &attrs->daddr.a6, 16);
447 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
448 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
449 		       0xff, 16);
450 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
451 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
452 		       0xff, 16);
453 	}
454 
455 	flow_act->ipsec_obj_id = ipsec_obj_id;
456 	flow_act->flags |= FLOW_ACT_NO_APPEND;
457 }
458 
459 static int rx_add_rule(struct mlx5e_priv *priv,
460 		       struct mlx5_accel_esp_xfrm_attrs *attrs,
461 		       u32 ipsec_obj_id,
462 		       struct mlx5e_ipsec_rule *ipsec_rule)
463 {
464 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
465 	struct mlx5_modify_hdr *modify_hdr = NULL;
466 	struct mlx5e_accel_fs_esp_prot *fs_prot;
467 	struct mlx5_flow_destination dest = {};
468 	struct mlx5e_accel_fs_esp *accel_esp;
469 	struct mlx5_flow_act flow_act = {};
470 	struct mlx5_flow_handle *rule;
471 	enum accel_fs_esp_type type;
472 	struct mlx5_flow_spec *spec;
473 	int err = 0;
474 
475 	accel_esp = priv->ipsec->rx_fs;
476 	type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4;
477 	fs_prot = &accel_esp->fs_prot[type];
478 
479 	err = rx_ft_get(priv, type);
480 	if (err)
481 		return err;
482 
483 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
484 	if (!spec) {
485 		err = -ENOMEM;
486 		goto out_err;
487 	}
488 
489 	setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
490 
491 	/* Set bit[31] ipsec marker */
492 	/* Set bit[23-0] ipsec_obj_id */
493 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
494 	MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
495 	MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
496 	MLX5_SET(set_action_in, action, offset, 0);
497 	MLX5_SET(set_action_in, action, length, 32);
498 
499 	modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
500 					      1, action);
501 	if (IS_ERR(modify_hdr)) {
502 		err = PTR_ERR(modify_hdr);
503 		netdev_err(priv->netdev,
504 			   "fail to alloc ipsec set modify_header_id err=%d\n", err);
505 		modify_hdr = NULL;
506 		goto out_err;
507 	}
508 
509 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
510 			  MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT |
511 			  MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
512 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
513 	flow_act.modify_hdr = modify_hdr;
514 	dest.ft = fs_prot->rx_err.ft;
515 	rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
516 	if (IS_ERR(rule)) {
517 		err = PTR_ERR(rule);
518 		netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
519 			   attrs->action, err);
520 		goto out_err;
521 	}
522 
523 	ipsec_rule->rule = rule;
524 	ipsec_rule->set_modify_hdr = modify_hdr;
525 	goto out;
526 
527 out_err:
528 	if (modify_hdr)
529 		mlx5_modify_header_dealloc(priv->mdev, modify_hdr);
530 	rx_ft_put(priv, type);
531 
532 out:
533 	kvfree(spec);
534 	return err;
535 }
536 
537 static int tx_add_rule(struct mlx5e_priv *priv,
538 		       struct mlx5_accel_esp_xfrm_attrs *attrs,
539 		       u32 ipsec_obj_id,
540 		       struct mlx5e_ipsec_rule *ipsec_rule)
541 {
542 	struct mlx5_flow_act flow_act = {};
543 	struct mlx5_flow_handle *rule;
544 	struct mlx5_flow_spec *spec;
545 	int err = 0;
546 
547 	err = tx_ft_get(priv);
548 	if (err)
549 		return err;
550 
551 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
552 	if (!spec) {
553 		err = -ENOMEM;
554 		goto out;
555 	}
556 
557 	setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
558 
559 	/* Add IPsec indicator in metadata_reg_a */
560 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
561 	MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
562 		 MLX5_ETH_WQE_FT_META_IPSEC);
563 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
564 		 MLX5_ETH_WQE_FT_META_IPSEC);
565 
566 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
567 			  MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT;
568 	rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
569 	if (IS_ERR(rule)) {
570 		err = PTR_ERR(rule);
571 		netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
572 			   attrs->action, err);
573 		goto out;
574 	}
575 
576 	ipsec_rule->rule = rule;
577 
578 out:
579 	kvfree(spec);
580 	if (err)
581 		tx_ft_put(priv);
582 	return err;
583 }
584 
585 static void rx_del_rule(struct mlx5e_priv *priv,
586 			struct mlx5_accel_esp_xfrm_attrs *attrs,
587 			struct mlx5e_ipsec_rule *ipsec_rule)
588 {
589 	mlx5_del_flow_rules(ipsec_rule->rule);
590 	ipsec_rule->rule = NULL;
591 
592 	mlx5_modify_header_dealloc(priv->mdev, ipsec_rule->set_modify_hdr);
593 	ipsec_rule->set_modify_hdr = NULL;
594 
595 	rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
596 }
597 
598 static void tx_del_rule(struct mlx5e_priv *priv,
599 			struct mlx5e_ipsec_rule *ipsec_rule)
600 {
601 	mlx5_del_flow_rules(ipsec_rule->rule);
602 	ipsec_rule->rule = NULL;
603 
604 	tx_ft_put(priv);
605 }
606 
607 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
608 				  struct mlx5_accel_esp_xfrm_attrs *attrs,
609 				  u32 ipsec_obj_id,
610 				  struct mlx5e_ipsec_rule *ipsec_rule)
611 {
612 	if (!priv->ipsec->rx_fs)
613 		return -EOPNOTSUPP;
614 
615 	if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
616 		return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
617 	else
618 		return tx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
619 }
620 
621 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
622 				   struct mlx5_accel_esp_xfrm_attrs *attrs,
623 				   struct mlx5e_ipsec_rule *ipsec_rule)
624 {
625 	if (!priv->ipsec->rx_fs)
626 		return;
627 
628 	if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
629 		rx_del_rule(priv, attrs, ipsec_rule);
630 	else
631 		tx_del_rule(priv, ipsec_rule);
632 }
633 
634 static void fs_cleanup_tx(struct mlx5e_priv *priv)
635 {
636 	mutex_destroy(&priv->ipsec->tx_fs->mutex);
637 	WARN_ON(priv->ipsec->tx_fs->refcnt);
638 	kfree(priv->ipsec->tx_fs);
639 	priv->ipsec->tx_fs = NULL;
640 }
641 
642 static void fs_cleanup_rx(struct mlx5e_priv *priv)
643 {
644 	struct mlx5e_accel_fs_esp_prot *fs_prot;
645 	struct mlx5e_accel_fs_esp *accel_esp;
646 	enum accel_fs_esp_type i;
647 
648 	accel_esp = priv->ipsec->rx_fs;
649 	for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
650 		fs_prot = &accel_esp->fs_prot[i];
651 		mutex_destroy(&fs_prot->prot_mutex);
652 		WARN_ON(fs_prot->refcnt);
653 	}
654 	kfree(priv->ipsec->rx_fs);
655 	priv->ipsec->rx_fs = NULL;
656 }
657 
658 static int fs_init_tx(struct mlx5e_priv *priv)
659 {
660 	priv->ipsec->tx_fs =
661 		kzalloc(sizeof(struct mlx5e_ipsec_tx), GFP_KERNEL);
662 	if (!priv->ipsec->tx_fs)
663 		return -ENOMEM;
664 
665 	mutex_init(&priv->ipsec->tx_fs->mutex);
666 	return 0;
667 }
668 
669 static int fs_init_rx(struct mlx5e_priv *priv)
670 {
671 	struct mlx5e_accel_fs_esp_prot *fs_prot;
672 	struct mlx5e_accel_fs_esp *accel_esp;
673 	enum accel_fs_esp_type i;
674 
675 	priv->ipsec->rx_fs =
676 		kzalloc(sizeof(struct mlx5e_accel_fs_esp), GFP_KERNEL);
677 	if (!priv->ipsec->rx_fs)
678 		return -ENOMEM;
679 
680 	accel_esp = priv->ipsec->rx_fs;
681 	for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
682 		fs_prot = &accel_esp->fs_prot[i];
683 		mutex_init(&fs_prot->prot_mutex);
684 	}
685 
686 	return 0;
687 }
688 
689 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv)
690 {
691 	if (!priv->ipsec->rx_fs)
692 		return;
693 
694 	fs_cleanup_tx(priv);
695 	fs_cleanup_rx(priv);
696 }
697 
698 int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
699 {
700 	int err;
701 
702 	if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
703 		return -EOPNOTSUPP;
704 
705 	err = fs_init_tx(priv);
706 	if (err)
707 		return err;
708 
709 	err = fs_init_rx(priv);
710 	if (err)
711 		fs_cleanup_tx(priv);
712 
713 	return err;
714 }
715