1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include "accel/ipsec_offload.h"
6 #include "ipsec_fs.h"
7 #include "fs_core.h"
8 
9 #define NUM_IPSEC_FTE BIT(15)
10 
11 enum accel_fs_esp_type {
12 	ACCEL_FS_ESP4,
13 	ACCEL_FS_ESP6,
14 	ACCEL_FS_ESP_NUM_TYPES,
15 };
16 
17 struct mlx5e_ipsec_rx_err {
18 	struct mlx5_flow_table *ft;
19 	struct mlx5_flow_handle *rule;
20 	struct mlx5_modify_hdr *copy_modify_hdr;
21 };
22 
23 struct mlx5e_accel_fs_esp_prot {
24 	struct mlx5_flow_table *ft;
25 	struct mlx5_flow_group *miss_group;
26 	struct mlx5_flow_handle *miss_rule;
27 	struct mlx5_flow_destination default_dest;
28 	struct mlx5e_ipsec_rx_err rx_err;
29 	u32 refcnt;
30 	struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
31 };
32 
33 struct mlx5e_accel_fs_esp {
34 	struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
35 };
36 
37 /* IPsec RX flow steering */
38 static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
39 {
40 	if (i == ACCEL_FS_ESP4)
41 		return MLX5E_TT_IPV4_IPSEC_ESP;
42 	return MLX5E_TT_IPV6_IPSEC_ESP;
43 }
44 
45 static int rx_err_add_rule(struct mlx5e_priv *priv,
46 			   struct mlx5e_accel_fs_esp_prot *fs_prot,
47 			   struct mlx5e_ipsec_rx_err *rx_err)
48 {
49 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
50 	struct mlx5_core_dev *mdev = priv->mdev;
51 	struct mlx5_flow_act flow_act = {};
52 	struct mlx5_modify_hdr *modify_hdr;
53 	struct mlx5_flow_handle *fte;
54 	struct mlx5_flow_spec *spec;
55 	int err = 0;
56 
57 	spec = kzalloc(sizeof(*spec), GFP_KERNEL);
58 	if (!spec)
59 		return -ENOMEM;
60 
61 	/* Action to copy 7 bit ipsec_syndrome to regB[0:6] */
62 	MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
63 	MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
64 	MLX5_SET(copy_action_in, action, src_offset, 0);
65 	MLX5_SET(copy_action_in, action, length, 7);
66 	MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
67 	MLX5_SET(copy_action_in, action, dst_offset, 0);
68 
69 	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
70 					      1, action);
71 
72 	if (IS_ERR(modify_hdr)) {
73 		err = PTR_ERR(modify_hdr);
74 		netdev_err(priv->netdev,
75 			   "fail to alloc ipsec copy modify_header_id err=%d\n", err);
76 		goto out_spec;
77 	}
78 
79 	/* create fte */
80 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
81 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
82 	flow_act.modify_hdr = modify_hdr;
83 	fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
84 				  &fs_prot->default_dest, 1);
85 	if (IS_ERR(fte)) {
86 		err = PTR_ERR(fte);
87 		netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err);
88 		goto out;
89 	}
90 
91 	rx_err->rule = fte;
92 	rx_err->copy_modify_hdr = modify_hdr;
93 
94 out:
95 	if (err)
96 		mlx5_modify_header_dealloc(mdev, modify_hdr);
97 out_spec:
98 	kfree(spec);
99 	return err;
100 }
101 
102 static void rx_err_del_rule(struct mlx5e_priv *priv,
103 			    struct mlx5e_ipsec_rx_err *rx_err)
104 {
105 	if (rx_err->rule) {
106 		mlx5_del_flow_rules(rx_err->rule);
107 		rx_err->rule = NULL;
108 	}
109 
110 	if (rx_err->copy_modify_hdr) {
111 		mlx5_modify_header_dealloc(priv->mdev, rx_err->copy_modify_hdr);
112 		rx_err->copy_modify_hdr = NULL;
113 	}
114 }
115 
116 static void rx_err_destroy_ft(struct mlx5e_priv *priv, struct mlx5e_ipsec_rx_err *rx_err)
117 {
118 	rx_err_del_rule(priv, rx_err);
119 
120 	if (rx_err->ft) {
121 		mlx5_destroy_flow_table(rx_err->ft);
122 		rx_err->ft = NULL;
123 	}
124 }
125 
126 static int rx_err_create_ft(struct mlx5e_priv *priv,
127 			    struct mlx5e_accel_fs_esp_prot *fs_prot,
128 			    struct mlx5e_ipsec_rx_err *rx_err)
129 {
130 	struct mlx5_flow_table_attr ft_attr = {};
131 	struct mlx5_flow_table *ft;
132 	int err;
133 
134 	ft_attr.max_fte = 1;
135 	ft_attr.autogroup.max_num_groups = 1;
136 	ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
137 	ft_attr.prio = MLX5E_NIC_PRIO;
138 	ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
139 	if (IS_ERR(ft)) {
140 		err = PTR_ERR(ft);
141 		netdev_err(priv->netdev, "fail to create ipsec rx inline ft err=%d\n", err);
142 		return err;
143 	}
144 
145 	rx_err->ft = ft;
146 	err = rx_err_add_rule(priv, fs_prot, rx_err);
147 	if (err)
148 		goto out_err;
149 
150 	return 0;
151 
152 out_err:
153 	mlx5_destroy_flow_table(ft);
154 	rx_err->ft = NULL;
155 	return err;
156 }
157 
158 static void rx_fs_destroy(struct mlx5e_accel_fs_esp_prot *fs_prot)
159 {
160 	if (fs_prot->miss_rule) {
161 		mlx5_del_flow_rules(fs_prot->miss_rule);
162 		fs_prot->miss_rule = NULL;
163 	}
164 
165 	if (fs_prot->miss_group) {
166 		mlx5_destroy_flow_group(fs_prot->miss_group);
167 		fs_prot->miss_group = NULL;
168 	}
169 
170 	if (fs_prot->ft) {
171 		mlx5_destroy_flow_table(fs_prot->ft);
172 		fs_prot->ft = NULL;
173 	}
174 }
175 
176 static int rx_fs_create(struct mlx5e_priv *priv,
177 			struct mlx5e_accel_fs_esp_prot *fs_prot)
178 {
179 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
180 	struct mlx5_flow_table_attr ft_attr = {};
181 	struct mlx5_flow_group *miss_group;
182 	struct mlx5_flow_handle *miss_rule;
183 	MLX5_DECLARE_FLOW_ACT(flow_act);
184 	struct mlx5_flow_spec *spec;
185 	struct mlx5_flow_table *ft;
186 	u32 *flow_group_in;
187 	int err = 0;
188 
189 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
190 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
191 	if (!flow_group_in || !spec) {
192 		err = -ENOMEM;
193 		goto out;
194 	}
195 
196 	/* Create FT */
197 	ft_attr.max_fte = NUM_IPSEC_FTE;
198 	ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
199 	ft_attr.prio = MLX5E_NIC_PRIO;
200 	ft_attr.autogroup.num_reserved_entries = 1;
201 	ft_attr.autogroup.max_num_groups = 1;
202 	ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
203 	if (IS_ERR(ft)) {
204 		err = PTR_ERR(ft);
205 		netdev_err(priv->netdev, "fail to create ipsec rx ft err=%d\n", err);
206 		goto out;
207 	}
208 	fs_prot->ft = ft;
209 
210 	/* Create miss_group */
211 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
212 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
213 	miss_group = mlx5_create_flow_group(ft, flow_group_in);
214 	if (IS_ERR(miss_group)) {
215 		err = PTR_ERR(miss_group);
216 		netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err);
217 		goto out;
218 	}
219 	fs_prot->miss_group = miss_group;
220 
221 	/* Create miss rule */
222 	miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
223 	if (IS_ERR(miss_rule)) {
224 		err = PTR_ERR(miss_rule);
225 		netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
226 		goto out;
227 	}
228 	fs_prot->miss_rule = miss_rule;
229 
230 out:
231 	kfree(flow_group_in);
232 	kfree(spec);
233 	return err;
234 }
235 
236 static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
237 {
238 	struct mlx5e_accel_fs_esp_prot *fs_prot;
239 	struct mlx5e_accel_fs_esp *accel_esp;
240 
241 	accel_esp = priv->ipsec->rx_fs;
242 
243 	/* The netdev unreg already happened, so all offloaded rule are already removed */
244 	fs_prot = &accel_esp->fs_prot[type];
245 
246 	rx_fs_destroy(fs_prot);
247 
248 	rx_err_destroy_ft(priv, &fs_prot->rx_err);
249 
250 	return 0;
251 }
252 
253 static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
254 {
255 	struct mlx5e_accel_fs_esp_prot *fs_prot;
256 	struct mlx5e_accel_fs_esp *accel_esp;
257 	int err;
258 
259 	accel_esp = priv->ipsec->rx_fs;
260 	fs_prot = &accel_esp->fs_prot[type];
261 
262 	fs_prot->default_dest = mlx5e_ttc_get_default_dest(priv, fs_esp2tt(type));
263 
264 	err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err);
265 	if (err)
266 		return err;
267 
268 	err = rx_fs_create(priv, fs_prot);
269 	if (err)
270 		rx_destroy(priv, type);
271 
272 	return err;
273 }
274 
275 static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
276 {
277 	struct mlx5e_accel_fs_esp_prot *fs_prot;
278 	struct mlx5_flow_destination dest = {};
279 	struct mlx5e_accel_fs_esp *accel_esp;
280 	int err = 0;
281 
282 	accel_esp = priv->ipsec->rx_fs;
283 	fs_prot = &accel_esp->fs_prot[type];
284 	mutex_lock(&fs_prot->prot_mutex);
285 	if (fs_prot->refcnt++)
286 		goto out;
287 
288 	/* create FT */
289 	err = rx_create(priv, type);
290 	if (err) {
291 		fs_prot->refcnt--;
292 		goto out;
293 	}
294 
295 	/* connect */
296 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
297 	dest.ft = fs_prot->ft;
298 	mlx5e_ttc_fwd_dest(priv, fs_esp2tt(type), &dest);
299 
300 out:
301 	mutex_unlock(&fs_prot->prot_mutex);
302 	return err;
303 }
304 
305 static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
306 {
307 	struct mlx5e_accel_fs_esp_prot *fs_prot;
308 	struct mlx5e_accel_fs_esp *accel_esp;
309 
310 	accel_esp = priv->ipsec->rx_fs;
311 	fs_prot = &accel_esp->fs_prot[type];
312 	mutex_lock(&fs_prot->prot_mutex);
313 	if (--fs_prot->refcnt)
314 		goto out;
315 
316 	/* disconnect */
317 	mlx5e_ttc_fwd_default_dest(priv, fs_esp2tt(type));
318 
319 	/* remove FT */
320 	rx_destroy(priv, type);
321 
322 out:
323 	mutex_unlock(&fs_prot->prot_mutex);
324 }
325 
326 static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
327 			     u32 ipsec_obj_id,
328 			     struct mlx5_flow_spec *spec,
329 			     struct mlx5_flow_act *flow_act)
330 {
331 	u8 ip_version = attrs->is_ipv6 ? 6 : 4;
332 
333 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS;
334 
335 	/* ip_version */
336 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
337 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version);
338 
339 	/* Non fragmented */
340 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
341 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
342 
343 	/* ESP header */
344 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
345 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
346 
347 	/* SPI number */
348 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
349 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi,
350 		 be32_to_cpu(attrs->spi));
351 
352 	if (ip_version == 4) {
353 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
354 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
355 		       &attrs->saddr.a4, 4);
356 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
357 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
358 		       &attrs->daddr.a4, 4);
359 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
360 				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
361 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
362 				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
363 	} else {
364 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
365 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
366 		       &attrs->saddr.a6, 16);
367 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
368 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
369 		       &attrs->daddr.a6, 16);
370 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
371 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
372 		       0xff, 16);
373 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
374 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
375 		       0xff, 16);
376 	}
377 
378 	flow_act->ipsec_obj_id = ipsec_obj_id;
379 	flow_act->flags |= FLOW_ACT_NO_APPEND;
380 }
381 
382 static int rx_add_rule(struct mlx5e_priv *priv,
383 		       struct mlx5_accel_esp_xfrm_attrs *attrs,
384 		       u32 ipsec_obj_id,
385 		       struct mlx5e_ipsec_rule *ipsec_rule)
386 {
387 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
388 	struct mlx5_modify_hdr *modify_hdr = NULL;
389 	struct mlx5e_accel_fs_esp_prot *fs_prot;
390 	struct mlx5_flow_destination dest = {};
391 	struct mlx5e_accel_fs_esp *accel_esp;
392 	struct mlx5_flow_act flow_act = {};
393 	struct mlx5_flow_handle *rule;
394 	enum accel_fs_esp_type type;
395 	struct mlx5_flow_spec *spec;
396 	int err = 0;
397 
398 	accel_esp = priv->ipsec->rx_fs;
399 	type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4;
400 	fs_prot = &accel_esp->fs_prot[type];
401 
402 	err = rx_ft_get(priv, type);
403 	if (err)
404 		return err;
405 
406 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
407 	if (!spec) {
408 		err = -ENOMEM;
409 		goto out_err;
410 	}
411 
412 	setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
413 
414 	/* Set 1  bit ipsec marker */
415 	/* Set 24 bit ipsec_obj_id */
416 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
417 	MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
418 	MLX5_SET(set_action_in, action, data, (ipsec_obj_id << 1) | 0x1);
419 	MLX5_SET(set_action_in, action, offset, 7);
420 	MLX5_SET(set_action_in, action, length, 25);
421 
422 	modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
423 					      1, action);
424 	if (IS_ERR(modify_hdr)) {
425 		err = PTR_ERR(modify_hdr);
426 		netdev_err(priv->netdev,
427 			   "fail to alloc ipsec set modify_header_id err=%d\n", err);
428 		modify_hdr = NULL;
429 		goto out_err;
430 	}
431 
432 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
433 			  MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT |
434 			  MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
435 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
436 	flow_act.modify_hdr = modify_hdr;
437 	dest.ft = fs_prot->rx_err.ft;
438 	rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
439 	if (IS_ERR(rule)) {
440 		err = PTR_ERR(rule);
441 		netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
442 			   attrs->action, err);
443 		goto out_err;
444 	}
445 
446 	ipsec_rule->rule = rule;
447 	ipsec_rule->set_modify_hdr = modify_hdr;
448 	goto out;
449 
450 out_err:
451 	if (modify_hdr)
452 		mlx5_modify_header_dealloc(priv->mdev, modify_hdr);
453 	rx_ft_put(priv, type);
454 
455 out:
456 	kvfree(spec);
457 	return err;
458 }
459 
460 static void rx_del_rule(struct mlx5e_priv *priv,
461 			struct mlx5_accel_esp_xfrm_attrs *attrs,
462 			struct mlx5e_ipsec_rule *ipsec_rule)
463 {
464 	mlx5_del_flow_rules(ipsec_rule->rule);
465 	ipsec_rule->rule = NULL;
466 
467 	mlx5_modify_header_dealloc(priv->mdev, ipsec_rule->set_modify_hdr);
468 	ipsec_rule->set_modify_hdr = NULL;
469 
470 	rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
471 }
472 
473 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
474 				  struct mlx5_accel_esp_xfrm_attrs *attrs,
475 				  u32 ipsec_obj_id,
476 				  struct mlx5e_ipsec_rule *ipsec_rule)
477 {
478 	if (!priv->ipsec->rx_fs || attrs->action != MLX5_ACCEL_ESP_ACTION_DECRYPT)
479 		return -EOPNOTSUPP;
480 
481 	return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
482 }
483 
484 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
485 				   struct mlx5_accel_esp_xfrm_attrs *attrs,
486 				   struct mlx5e_ipsec_rule *ipsec_rule)
487 {
488 	if (!priv->ipsec->rx_fs)
489 		return;
490 
491 	rx_del_rule(priv, attrs, ipsec_rule);
492 }
493 
494 static void fs_cleanup_rx(struct mlx5e_priv *priv)
495 {
496 	struct mlx5e_accel_fs_esp_prot *fs_prot;
497 	struct mlx5e_accel_fs_esp *accel_esp;
498 	enum accel_fs_esp_type i;
499 
500 	accel_esp = priv->ipsec->rx_fs;
501 	for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
502 		fs_prot = &accel_esp->fs_prot[i];
503 		mutex_destroy(&fs_prot->prot_mutex);
504 		WARN_ON(fs_prot->refcnt);
505 	}
506 	kfree(priv->ipsec->rx_fs);
507 	priv->ipsec->rx_fs = NULL;
508 }
509 
510 static int fs_init_rx(struct mlx5e_priv *priv)
511 {
512 	struct mlx5e_accel_fs_esp_prot *fs_prot;
513 	struct mlx5e_accel_fs_esp *accel_esp;
514 	enum accel_fs_esp_type i;
515 
516 	priv->ipsec->rx_fs =
517 		kzalloc(sizeof(struct mlx5e_accel_fs_esp), GFP_KERNEL);
518 	if (!priv->ipsec->rx_fs)
519 		return -ENOMEM;
520 
521 	accel_esp = priv->ipsec->rx_fs;
522 	for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
523 		fs_prot = &accel_esp->fs_prot[i];
524 		mutex_init(&fs_prot->prot_mutex);
525 	}
526 
527 	return 0;
528 }
529 
530 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv)
531 {
532 	if (!priv->ipsec->rx_fs)
533 		return;
534 
535 	fs_cleanup_rx(priv);
536 }
537 
538 int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
539 {
540 	if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
541 		return -EOPNOTSUPP;
542 
543 	return fs_init_rx(priv);
544 }
545