1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4 #include <net/macsec.h>
5 #include <linux/mlx5/qp.h>
6 #include <linux/if_vlan.h>
7 #include <linux/mlx5/fs_helpers.h>
8 #include <linux/mlx5/macsec.h>
9 #include "fs_core.h"
10 #include "lib/macsec_fs.h"
11 #include "mlx5_core.h"
12
13 /* MACsec TX flow steering */
14 #define CRYPTO_NUM_MAXSEC_FTE BIT(15)
15 #define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1
16
17 #define TX_CRYPTO_TABLE_LEVEL 0
18 #define TX_CRYPTO_TABLE_NUM_GROUPS 3
19 #define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1
20 #define TX_CRYPTO_TABLE_SA_GROUP_SIZE \
21 (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \
22 CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE))
23 #define TX_CHECK_TABLE_LEVEL 1
24 #define TX_CHECK_TABLE_NUM_FTE 2
25 #define RX_CRYPTO_TABLE_LEVEL 0
26 #define RX_CHECK_TABLE_LEVEL 1
27 #define RX_ROCE_TABLE_LEVEL 2
28 #define RX_CHECK_TABLE_NUM_FTE 3
29 #define RX_ROCE_TABLE_NUM_FTE 2
30 #define RX_CRYPTO_TABLE_NUM_GROUPS 3
31 #define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \
32 ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2)
33 #define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \
34 (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE)
35 #define RX_NUM_OF_RULES_PER_SA 2
36
37 #define RDMA_RX_ROCE_IP_TABLE_LEVEL 0
38 #define RDMA_RX_ROCE_MACSEC_OP_TABLE_LEVEL 1
39
40 #define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */
41 #define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23
42 #define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8
43 #define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5
44 #define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET)
45 #define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
46 #define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
47
48 /* MACsec RX flow steering */
49 #define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
50
51 /* MACsec fs_id handling for steering */
52 #define macsec_fs_set_tx_fs_id(fs_id) (MLX5_ETH_WQE_FT_META_MACSEC | (fs_id) << 2)
53 #define macsec_fs_set_rx_fs_id(fs_id) ((fs_id) | BIT(30))
54
55 struct mlx5_sectag_header {
56 __be16 ethertype;
57 u8 tci_an;
58 u8 sl;
59 u32 pn;
60 u8 sci[MACSEC_SCI_LEN]; /* optional */
61 } __packed;
62
63 struct mlx5_roce_macsec_tx_rule {
64 u32 fs_id;
65 u16 gid_idx;
66 struct list_head entry;
67 struct mlx5_flow_handle *rule;
68 struct mlx5_modify_hdr *meta_modhdr;
69 };
70
71 struct mlx5_macsec_tx_rule {
72 struct mlx5_flow_handle *rule;
73 struct mlx5_pkt_reformat *pkt_reformat;
74 u32 fs_id;
75 };
76
77 struct mlx5_macsec_flow_table {
78 int num_groups;
79 struct mlx5_flow_table *t;
80 struct mlx5_flow_group **g;
81 };
82
83 struct mlx5_macsec_tables {
84 struct mlx5_macsec_flow_table ft_crypto;
85 struct mlx5_flow_handle *crypto_miss_rule;
86
87 struct mlx5_flow_table *ft_check;
88 struct mlx5_flow_group *ft_check_group;
89 struct mlx5_fc *check_miss_rule_counter;
90 struct mlx5_flow_handle *check_miss_rule;
91 struct mlx5_fc *check_rule_counter;
92
93 u32 refcnt;
94 };
95
96 struct mlx5_fs_id {
97 u32 id;
98 refcount_t refcnt;
99 sci_t sci;
100 struct rhash_head hash;
101 };
102
103 struct mlx5_macsec_device {
104 struct list_head macsec_devices_list_entry;
105 void *macdev;
106 struct xarray tx_id_xa;
107 struct xarray rx_id_xa;
108 };
109
110 struct mlx5_macsec_tx {
111 struct mlx5_flow_handle *crypto_mke_rule;
112 struct mlx5_flow_handle *check_rule;
113
114 struct ida tx_halloc;
115
116 struct mlx5_macsec_tables tables;
117
118 struct mlx5_flow_table *ft_rdma_tx;
119 };
120
121 struct mlx5_roce_macsec_rx_rule {
122 u32 fs_id;
123 u16 gid_idx;
124 struct mlx5_flow_handle *op;
125 struct mlx5_flow_handle *ip;
126 struct list_head entry;
127 };
128
129 struct mlx5_macsec_rx_rule {
130 struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA];
131 struct mlx5_modify_hdr *meta_modhdr;
132 };
133
134 struct mlx5_macsec_miss {
135 struct mlx5_flow_group *g;
136 struct mlx5_flow_handle *rule;
137 };
138
139 struct mlx5_macsec_rx_roce {
140 /* Flow table/rules in NIC domain, to check if it's a RoCE packet */
141 struct mlx5_flow_group *g;
142 struct mlx5_flow_table *ft;
143 struct mlx5_flow_handle *rule;
144 struct mlx5_modify_hdr *copy_modify_hdr;
145 struct mlx5_macsec_miss nic_miss;
146
147 /* Flow table/rule in RDMA domain, to check dgid */
148 struct mlx5_flow_table *ft_ip_check;
149 struct mlx5_flow_table *ft_macsec_op_check;
150 struct mlx5_macsec_miss miss;
151 };
152
153 struct mlx5_macsec_rx {
154 struct mlx5_flow_handle *check_rule[2];
155 struct mlx5_pkt_reformat *check_rule_pkt_reformat[2];
156
157 struct mlx5_macsec_tables tables;
158 struct mlx5_macsec_rx_roce roce;
159 };
160
161 union mlx5_macsec_rule {
162 struct mlx5_macsec_tx_rule tx_rule;
163 struct mlx5_macsec_rx_rule rx_rule;
164 };
165
166 static const struct rhashtable_params rhash_sci = {
167 .key_len = sizeof_field(struct mlx5_fs_id, sci),
168 .key_offset = offsetof(struct mlx5_fs_id, sci),
169 .head_offset = offsetof(struct mlx5_fs_id, hash),
170 .automatic_shrinking = true,
171 .min_size = 1,
172 };
173
174 static const struct rhashtable_params rhash_fs_id = {
175 .key_len = sizeof_field(struct mlx5_fs_id, id),
176 .key_offset = offsetof(struct mlx5_fs_id, id),
177 .head_offset = offsetof(struct mlx5_fs_id, hash),
178 .automatic_shrinking = true,
179 .min_size = 1,
180 };
181
182 struct mlx5_macsec_fs {
183 struct mlx5_core_dev *mdev;
184 struct mlx5_macsec_tx *tx_fs;
185 struct mlx5_macsec_rx *rx_fs;
186
187 /* Stats manage */
188 struct mlx5_macsec_stats stats;
189
190 /* Tx sci -> fs id mapping handling */
191 struct rhashtable sci_hash; /* sci -> mlx5_fs_id */
192
193 /* RX fs_id -> mlx5_fs_id mapping handling */
194 struct rhashtable fs_id_hash; /* fs_id -> mlx5_fs_id */
195
196 /* TX & RX fs_id lists per macsec device */
197 struct list_head macsec_devices_list;
198 };
199
macsec_fs_destroy_groups(struct mlx5_macsec_flow_table * ft)200 static void macsec_fs_destroy_groups(struct mlx5_macsec_flow_table *ft)
201 {
202 int i;
203
204 for (i = ft->num_groups - 1; i >= 0; i--) {
205 if (!IS_ERR_OR_NULL(ft->g[i]))
206 mlx5_destroy_flow_group(ft->g[i]);
207 ft->g[i] = NULL;
208 }
209 ft->num_groups = 0;
210 }
211
macsec_fs_destroy_flow_table(struct mlx5_macsec_flow_table * ft)212 static void macsec_fs_destroy_flow_table(struct mlx5_macsec_flow_table *ft)
213 {
214 macsec_fs_destroy_groups(ft);
215 kfree(ft->g);
216 mlx5_destroy_flow_table(ft->t);
217 ft->t = NULL;
218 }
219
macsec_fs_tx_destroy(struct mlx5_macsec_fs * macsec_fs)220 static void macsec_fs_tx_destroy(struct mlx5_macsec_fs *macsec_fs)
221 {
222 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
223 struct mlx5_macsec_tables *tx_tables;
224
225 if (mlx5_is_macsec_roce_supported(macsec_fs->mdev))
226 mlx5_destroy_flow_table(tx_fs->ft_rdma_tx);
227
228 tx_tables = &tx_fs->tables;
229
230 /* Tx check table */
231 if (tx_fs->check_rule) {
232 mlx5_del_flow_rules(tx_fs->check_rule);
233 tx_fs->check_rule = NULL;
234 }
235
236 if (tx_tables->check_miss_rule) {
237 mlx5_del_flow_rules(tx_tables->check_miss_rule);
238 tx_tables->check_miss_rule = NULL;
239 }
240
241 if (tx_tables->ft_check_group) {
242 mlx5_destroy_flow_group(tx_tables->ft_check_group);
243 tx_tables->ft_check_group = NULL;
244 }
245
246 if (tx_tables->ft_check) {
247 mlx5_destroy_flow_table(tx_tables->ft_check);
248 tx_tables->ft_check = NULL;
249 }
250
251 /* Tx crypto table */
252 if (tx_fs->crypto_mke_rule) {
253 mlx5_del_flow_rules(tx_fs->crypto_mke_rule);
254 tx_fs->crypto_mke_rule = NULL;
255 }
256
257 if (tx_tables->crypto_miss_rule) {
258 mlx5_del_flow_rules(tx_tables->crypto_miss_rule);
259 tx_tables->crypto_miss_rule = NULL;
260 }
261
262 macsec_fs_destroy_flow_table(&tx_tables->ft_crypto);
263 }
264
macsec_fs_tx_create_crypto_table_groups(struct mlx5_macsec_flow_table * ft)265 static int macsec_fs_tx_create_crypto_table_groups(struct mlx5_macsec_flow_table *ft)
266 {
267 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
268 int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
269 int ix = 0;
270 u32 *in;
271 int err;
272 u8 *mc;
273
274 ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
275 if (!ft->g)
276 return -ENOMEM;
277 in = kvzalloc(inlen, GFP_KERNEL);
278
279 if (!in) {
280 kfree(ft->g);
281 ft->g = NULL;
282 return -ENOMEM;
283 }
284
285 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
286
287 /* Flow Group for MKE match */
288 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
289 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
290
291 MLX5_SET_CFG(in, start_flow_index, ix);
292 ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE;
293 MLX5_SET_CFG(in, end_flow_index, ix - 1);
294 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
295 if (IS_ERR(ft->g[ft->num_groups]))
296 goto err;
297 ft->num_groups++;
298
299 /* Flow Group for SA rules */
300 memset(in, 0, inlen);
301 memset(mc, 0, mclen);
302 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
303 MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a,
304 MLX5_ETH_WQE_FT_META_MACSEC_MASK);
305
306 MLX5_SET_CFG(in, start_flow_index, ix);
307 ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE;
308 MLX5_SET_CFG(in, end_flow_index, ix - 1);
309 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
310 if (IS_ERR(ft->g[ft->num_groups]))
311 goto err;
312 ft->num_groups++;
313
314 /* Flow Group for l2 traps */
315 memset(in, 0, inlen);
316 memset(mc, 0, mclen);
317 MLX5_SET_CFG(in, start_flow_index, ix);
318 ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
319 MLX5_SET_CFG(in, end_flow_index, ix - 1);
320 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
321 if (IS_ERR(ft->g[ft->num_groups]))
322 goto err;
323 ft->num_groups++;
324
325 kvfree(in);
326 return 0;
327
328 err:
329 err = PTR_ERR(ft->g[ft->num_groups]);
330 ft->g[ft->num_groups] = NULL;
331 kvfree(in);
332
333 return err;
334 }
335
336 static struct mlx5_flow_table
macsec_fs_auto_group_table_create(struct mlx5_flow_namespace * ns,int flags,int level,int max_fte)337 *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags,
338 int level, int max_fte)
339 {
340 struct mlx5_flow_table_attr ft_attr = {};
341 struct mlx5_flow_table *fdb = NULL;
342
343 /* reserve entry for the match all miss group and rule */
344 ft_attr.autogroup.num_reserved_entries = 1;
345 ft_attr.autogroup.max_num_groups = 1;
346 ft_attr.prio = 0;
347 ft_attr.flags = flags;
348 ft_attr.level = level;
349 ft_attr.max_fte = max_fte;
350
351 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
352
353 return fdb;
354 }
355
356 enum {
357 RDMA_TX_MACSEC_LEVEL = 0,
358 };
359
macsec_fs_tx_roce_create(struct mlx5_macsec_fs * macsec_fs)360 static int macsec_fs_tx_roce_create(struct mlx5_macsec_fs *macsec_fs)
361 {
362 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
363 struct mlx5_core_dev *mdev = macsec_fs->mdev;
364 struct mlx5_flow_namespace *ns;
365 struct mlx5_flow_table *ft;
366 int err;
367
368 if (!mlx5_is_macsec_roce_supported(mdev)) {
369 mlx5_core_dbg(mdev, "Failed to init RoCE MACsec, capabilities not supported\n");
370 return 0;
371 }
372
373 ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC);
374 if (!ns)
375 return -ENOMEM;
376
377 /* Tx RoCE crypto table */
378 ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_TX_MACSEC_LEVEL, CRYPTO_NUM_MAXSEC_FTE);
379 if (IS_ERR(ft)) {
380 err = PTR_ERR(ft);
381 mlx5_core_err(mdev, "Failed to create MACsec RoCE Tx crypto table err(%d)\n", err);
382 return err;
383 }
384 tx_fs->ft_rdma_tx = ft;
385
386 return 0;
387 }
388
macsec_fs_tx_create(struct mlx5_macsec_fs * macsec_fs)389 static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
390 {
391 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
392 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
393 struct mlx5_core_dev *mdev = macsec_fs->mdev;
394 struct mlx5_flow_table_attr ft_attr = {};
395 struct mlx5_flow_destination dest = {};
396 struct mlx5_macsec_tables *tx_tables;
397 struct mlx5_flow_act flow_act = {};
398 struct mlx5_macsec_flow_table *ft_crypto;
399 struct mlx5_flow_table *flow_table;
400 struct mlx5_flow_group *flow_group;
401 struct mlx5_flow_namespace *ns;
402 struct mlx5_flow_handle *rule;
403 struct mlx5_flow_spec *spec;
404 u32 *flow_group_in;
405 int err;
406
407 ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
408 if (!ns)
409 return -ENOMEM;
410
411 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
412 if (!spec)
413 return -ENOMEM;
414
415 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
416 if (!flow_group_in) {
417 err = -ENOMEM;
418 goto out_spec;
419 }
420
421 tx_tables = &tx_fs->tables;
422 ft_crypto = &tx_tables->ft_crypto;
423
424 /* Tx crypto table */
425 ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
426 ft_attr.level = TX_CRYPTO_TABLE_LEVEL;
427 ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
428
429 flow_table = mlx5_create_flow_table(ns, &ft_attr);
430 if (IS_ERR(flow_table)) {
431 err = PTR_ERR(flow_table);
432 mlx5_core_err(mdev, "Failed to create MACsec Tx crypto table err(%d)\n", err);
433 goto out_flow_group;
434 }
435 ft_crypto->t = flow_table;
436
437 /* Tx crypto table groups */
438 err = macsec_fs_tx_create_crypto_table_groups(ft_crypto);
439 if (err) {
440 mlx5_core_err(mdev,
441 "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
442 err);
443 goto err;
444 }
445
446 /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */
447 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
448
449 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
450 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE);
451 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
452
453 rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0);
454 if (IS_ERR(rule)) {
455 err = PTR_ERR(rule);
456 mlx5_core_err(mdev, "Failed to add MACsec TX MKE rule, err=%d\n", err);
457 goto err;
458 }
459 tx_fs->crypto_mke_rule = rule;
460
461 /* Tx crypto table Default miss rule */
462 memset(&flow_act, 0, sizeof(flow_act));
463 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
464 rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
465 if (IS_ERR(rule)) {
466 err = PTR_ERR(rule);
467 mlx5_core_err(mdev, "Failed to add MACsec Tx table default miss rule %d\n", err);
468 goto err;
469 }
470 tx_tables->crypto_miss_rule = rule;
471
472 /* Tx check table */
473 flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL,
474 TX_CHECK_TABLE_NUM_FTE);
475 if (IS_ERR(flow_table)) {
476 err = PTR_ERR(flow_table);
477 mlx5_core_err(mdev, "Fail to create MACsec TX check table, err(%d)\n", err);
478 goto err;
479 }
480 tx_tables->ft_check = flow_table;
481
482 /* Tx check table Default miss group/rule */
483 memset(flow_group_in, 0, inlen);
484 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
485 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
486 flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in);
487 if (IS_ERR(flow_group)) {
488 err = PTR_ERR(flow_group);
489 mlx5_core_err(mdev,
490 "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
491 err);
492 goto err;
493 }
494 tx_tables->ft_check_group = flow_group;
495
496 /* Tx check table default drop rule */
497 memset(&dest, 0, sizeof(struct mlx5_flow_destination));
498 memset(&flow_act, 0, sizeof(flow_act));
499 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
500 dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
501 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
502 rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
503 if (IS_ERR(rule)) {
504 err = PTR_ERR(rule);
505 mlx5_core_err(mdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err);
506 goto err;
507 }
508 tx_tables->check_miss_rule = rule;
509
510 /* Tx check table rule */
511 memset(spec, 0, sizeof(struct mlx5_flow_spec));
512 memset(&dest, 0, sizeof(struct mlx5_flow_destination));
513 memset(&flow_act, 0, sizeof(flow_act));
514
515 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
516 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
517 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
518
519 flow_act.flags = FLOW_ACT_NO_APPEND;
520 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
521 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
522 dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
523 rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
524 if (IS_ERR(rule)) {
525 err = PTR_ERR(rule);
526 mlx5_core_err(mdev, "Failed to add MACsec check rule, err=%d\n", err);
527 goto err;
528 }
529 tx_fs->check_rule = rule;
530
531 err = macsec_fs_tx_roce_create(macsec_fs);
532 if (err)
533 goto err;
534
535 kvfree(flow_group_in);
536 kvfree(spec);
537 return 0;
538
539 err:
540 macsec_fs_tx_destroy(macsec_fs);
541 out_flow_group:
542 kvfree(flow_group_in);
543 out_spec:
544 kvfree(spec);
545 return err;
546 }
547
macsec_fs_tx_ft_get(struct mlx5_macsec_fs * macsec_fs)548 static int macsec_fs_tx_ft_get(struct mlx5_macsec_fs *macsec_fs)
549 {
550 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
551 struct mlx5_macsec_tables *tx_tables;
552 int err = 0;
553
554 tx_tables = &tx_fs->tables;
555 if (tx_tables->refcnt)
556 goto out;
557
558 err = macsec_fs_tx_create(macsec_fs);
559 if (err)
560 return err;
561
562 out:
563 tx_tables->refcnt++;
564 return err;
565 }
566
macsec_fs_tx_ft_put(struct mlx5_macsec_fs * macsec_fs)567 static void macsec_fs_tx_ft_put(struct mlx5_macsec_fs *macsec_fs)
568 {
569 struct mlx5_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
570
571 if (--tx_tables->refcnt)
572 return;
573
574 macsec_fs_tx_destroy(macsec_fs);
575 }
576
macsec_fs_tx_setup_fte(struct mlx5_macsec_fs * macsec_fs,struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,u32 macsec_obj_id,u32 * fs_id)577 static int macsec_fs_tx_setup_fte(struct mlx5_macsec_fs *macsec_fs,
578 struct mlx5_flow_spec *spec,
579 struct mlx5_flow_act *flow_act,
580 u32 macsec_obj_id,
581 u32 *fs_id)
582 {
583 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
584 int err = 0;
585 u32 id;
586
587 err = ida_alloc_range(&tx_fs->tx_halloc, 1,
588 MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES,
589 GFP_KERNEL);
590 if (err < 0)
591 return err;
592
593 id = err;
594 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
595
596 /* Metadata match */
597 MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
598 MLX5_ETH_WQE_FT_META_MACSEC_MASK);
599 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
600 macsec_fs_set_tx_fs_id(id));
601
602 *fs_id = id;
603 flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
604 flow_act->crypto.obj_id = macsec_obj_id;
605
606 mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id);
607 return 0;
608 }
609
macsec_fs_tx_create_sectag_header(const struct macsec_context * ctx,char * reformatbf,size_t * reformat_size)610 static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx,
611 char *reformatbf,
612 size_t *reformat_size)
613 {
614 const struct macsec_secy *secy = ctx->secy;
615 bool sci_present = macsec_send_sci(secy);
616 struct mlx5_sectag_header sectag = {};
617 const struct macsec_tx_sc *tx_sc;
618
619 tx_sc = &secy->tx_sc;
620 sectag.ethertype = htons(ETH_P_MACSEC);
621
622 if (sci_present) {
623 sectag.tci_an |= MACSEC_TCI_SC;
624 memcpy(§ag.sci, &secy->sci,
625 sizeof(sectag.sci));
626 } else {
627 if (tx_sc->end_station)
628 sectag.tci_an |= MACSEC_TCI_ES;
629 if (tx_sc->scb)
630 sectag.tci_an |= MACSEC_TCI_SCB;
631 }
632
633 /* With GCM, C/E clear for !encrypt, both set for encrypt */
634 if (tx_sc->encrypt)
635 sectag.tci_an |= MACSEC_TCI_CONFID;
636 else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
637 sectag.tci_an |= MACSEC_TCI_C;
638
639 sectag.tci_an |= tx_sc->encoding_sa;
640
641 *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
642
643 memcpy(reformatbf, §ag, *reformat_size);
644 }
645
macsec_fs_is_macsec_device_empty(struct mlx5_macsec_device * macsec_device)646 static bool macsec_fs_is_macsec_device_empty(struct mlx5_macsec_device *macsec_device)
647 {
648 if (xa_empty(&macsec_device->tx_id_xa) &&
649 xa_empty(&macsec_device->rx_id_xa))
650 return true;
651
652 return false;
653 }
654
macsec_fs_id_del(struct list_head * macsec_devices_list,u32 fs_id,void * macdev,struct rhashtable * hash_table,bool is_tx)655 static void macsec_fs_id_del(struct list_head *macsec_devices_list, u32 fs_id,
656 void *macdev, struct rhashtable *hash_table, bool is_tx)
657 {
658 const struct rhashtable_params *rhash = (is_tx) ? &rhash_sci : &rhash_fs_id;
659 struct mlx5_macsec_device *iter, *macsec_device = NULL;
660 struct mlx5_fs_id *fs_id_found;
661 struct xarray *fs_id_xa;
662
663 list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
664 if (iter->macdev == macdev) {
665 macsec_device = iter;
666 break;
667 }
668 }
669 WARN_ON(!macsec_device);
670
671 fs_id_xa = (is_tx) ? &macsec_device->tx_id_xa :
672 &macsec_device->rx_id_xa;
673 xa_lock(fs_id_xa);
674 fs_id_found = xa_load(fs_id_xa, fs_id);
675 WARN_ON(!fs_id_found);
676
677 if (!refcount_dec_and_test(&fs_id_found->refcnt)) {
678 xa_unlock(fs_id_xa);
679 return;
680 }
681
682 if (fs_id_found->id) {
683 /* Make sure ongoing datapath readers sees a valid SA */
684 rhashtable_remove_fast(hash_table, &fs_id_found->hash, *rhash);
685 fs_id_found->id = 0;
686 }
687 xa_unlock(fs_id_xa);
688
689 xa_erase(fs_id_xa, fs_id);
690
691 kfree(fs_id_found);
692
693 if (macsec_fs_is_macsec_device_empty(macsec_device)) {
694 list_del(&macsec_device->macsec_devices_list_entry);
695 kfree(macsec_device);
696 }
697 }
698
macsec_fs_id_add(struct list_head * macsec_devices_list,u32 fs_id,void * macdev,struct rhashtable * hash_table,sci_t sci,bool is_tx)699 static int macsec_fs_id_add(struct list_head *macsec_devices_list, u32 fs_id,
700 void *macdev, struct rhashtable *hash_table, sci_t sci,
701 bool is_tx)
702 {
703 const struct rhashtable_params *rhash = (is_tx) ? &rhash_sci : &rhash_fs_id;
704 struct mlx5_macsec_device *iter, *macsec_device = NULL;
705 struct mlx5_fs_id *fs_id_iter;
706 struct xarray *fs_id_xa;
707 int err;
708
709 if (!is_tx) {
710 rcu_read_lock();
711 fs_id_iter = rhashtable_lookup(hash_table, &fs_id, rhash_fs_id);
712 if (fs_id_iter) {
713 refcount_inc(&fs_id_iter->refcnt);
714 rcu_read_unlock();
715 return 0;
716 }
717 rcu_read_unlock();
718 }
719
720 fs_id_iter = kzalloc(sizeof(*fs_id_iter), GFP_KERNEL);
721 if (!fs_id_iter)
722 return -ENOMEM;
723
724 list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
725 if (iter->macdev == macdev) {
726 macsec_device = iter;
727 break;
728 }
729 }
730
731 if (!macsec_device) { /* first time adding a SA to that device */
732 macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
733 if (!macsec_device) {
734 err = -ENOMEM;
735 goto err_alloc_dev;
736 }
737 macsec_device->macdev = macdev;
738 xa_init(&macsec_device->tx_id_xa);
739 xa_init(&macsec_device->rx_id_xa);
740 list_add(&macsec_device->macsec_devices_list_entry, macsec_devices_list);
741 }
742
743 fs_id_xa = (is_tx) ? &macsec_device->tx_id_xa :
744 &macsec_device->rx_id_xa;
745 fs_id_iter->id = fs_id;
746 refcount_set(&fs_id_iter->refcnt, 1);
747 fs_id_iter->sci = sci;
748 err = xa_err(xa_store(fs_id_xa, fs_id, fs_id_iter, GFP_KERNEL));
749 if (err)
750 goto err_store_id;
751
752 err = rhashtable_insert_fast(hash_table, &fs_id_iter->hash, *rhash);
753 if (err)
754 goto err_hash_insert;
755
756 return 0;
757
758 err_hash_insert:
759 xa_erase(fs_id_xa, fs_id);
760 err_store_id:
761 if (macsec_fs_is_macsec_device_empty(macsec_device)) {
762 list_del(&macsec_device->macsec_devices_list_entry);
763 kfree(macsec_device);
764 }
765 err_alloc_dev:
766 kfree(fs_id_iter);
767 return err;
768 }
769
macsec_fs_tx_del_rule(struct mlx5_macsec_fs * macsec_fs,struct mlx5_macsec_tx_rule * tx_rule,void * macdev)770 static void macsec_fs_tx_del_rule(struct mlx5_macsec_fs *macsec_fs,
771 struct mlx5_macsec_tx_rule *tx_rule,
772 void *macdev)
773 {
774 macsec_fs_id_del(&macsec_fs->macsec_devices_list, tx_rule->fs_id, macdev,
775 &macsec_fs->sci_hash, true);
776
777 if (tx_rule->rule) {
778 mlx5_del_flow_rules(tx_rule->rule);
779 tx_rule->rule = NULL;
780 }
781
782 if (tx_rule->pkt_reformat) {
783 mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat);
784 tx_rule->pkt_reformat = NULL;
785 }
786
787 if (tx_rule->fs_id) {
788 ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id);
789 tx_rule->fs_id = 0;
790 }
791
792 kfree(tx_rule);
793
794 macsec_fs_tx_ft_put(macsec_fs);
795 }
796
797 #define MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES 1
798
799 static union mlx5_macsec_rule *
macsec_fs_tx_add_rule(struct mlx5_macsec_fs * macsec_fs,const struct macsec_context * macsec_ctx,struct mlx5_macsec_rule_attrs * attrs,u32 * fs_id)800 macsec_fs_tx_add_rule(struct mlx5_macsec_fs *macsec_fs,
801 const struct macsec_context *macsec_ctx,
802 struct mlx5_macsec_rule_attrs *attrs, u32 *fs_id)
803 {
804 char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN];
805 struct mlx5_pkt_reformat_params reformat_params = {};
806 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
807 struct mlx5_core_dev *mdev = macsec_fs->mdev;
808 union mlx5_macsec_rule *macsec_rule = NULL;
809 struct mlx5_flow_destination dest = {};
810 struct mlx5_macsec_tables *tx_tables;
811 struct mlx5_macsec_tx_rule *tx_rule;
812 struct mlx5_flow_act flow_act = {};
813 struct mlx5_flow_handle *rule;
814 struct mlx5_flow_spec *spec;
815 size_t reformat_size;
816 int err = 0;
817
818 tx_tables = &tx_fs->tables;
819
820 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
821 if (!spec)
822 return NULL;
823
824 err = macsec_fs_tx_ft_get(macsec_fs);
825 if (err)
826 goto out_spec;
827
828 macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
829 if (!macsec_rule) {
830 macsec_fs_tx_ft_put(macsec_fs);
831 goto out_spec;
832 }
833
834 tx_rule = &macsec_rule->tx_rule;
835
836 /* Tx crypto table crypto rule */
837 macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size);
838
839 reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC;
840 reformat_params.size = reformat_size;
841 reformat_params.data = reformatbf;
842
843 if (is_vlan_dev(macsec_ctx->netdev))
844 reformat_params.param_0 = MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES;
845
846 flow_act.pkt_reformat = mlx5_packet_reformat_alloc(mdev,
847 &reformat_params,
848 MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
849 if (IS_ERR(flow_act.pkt_reformat)) {
850 err = PTR_ERR(flow_act.pkt_reformat);
851 mlx5_core_err(mdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err);
852 goto err;
853 }
854 tx_rule->pkt_reformat = flow_act.pkt_reformat;
855
856 err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, fs_id);
857 if (err) {
858 mlx5_core_err(mdev,
859 "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n",
860 err);
861 goto err;
862 }
863
864 tx_rule->fs_id = *fs_id;
865
866 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
867 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
868 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
869 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
870 dest.ft = tx_tables->ft_check;
871 rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1);
872 if (IS_ERR(rule)) {
873 err = PTR_ERR(rule);
874 mlx5_core_err(mdev, "Failed to add MACsec TX crypto rule, err=%d\n", err);
875 goto err;
876 }
877 tx_rule->rule = rule;
878
879 err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, *fs_id, macsec_ctx->secy->netdev,
880 &macsec_fs->sci_hash, attrs->sci, true);
881 if (err) {
882 mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err);
883 goto err;
884 }
885
886 goto out_spec;
887
888 err:
889 macsec_fs_tx_del_rule(macsec_fs, tx_rule, macsec_ctx->secy->netdev);
890 macsec_rule = NULL;
891 out_spec:
892 kvfree(spec);
893
894 return macsec_rule;
895 }
896
macsec_fs_tx_cleanup(struct mlx5_macsec_fs * macsec_fs)897 static void macsec_fs_tx_cleanup(struct mlx5_macsec_fs *macsec_fs)
898 {
899 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
900 struct mlx5_core_dev *mdev = macsec_fs->mdev;
901 struct mlx5_macsec_tables *tx_tables;
902
903 if (!tx_fs)
904 return;
905
906 tx_tables = &tx_fs->tables;
907 if (tx_tables->refcnt) {
908 mlx5_core_err(mdev,
909 "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n",
910 tx_tables->refcnt);
911 return;
912 }
913
914 ida_destroy(&tx_fs->tx_halloc);
915
916 if (tx_tables->check_miss_rule_counter) {
917 mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter);
918 tx_tables->check_miss_rule_counter = NULL;
919 }
920
921 if (tx_tables->check_rule_counter) {
922 mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
923 tx_tables->check_rule_counter = NULL;
924 }
925
926 kfree(tx_fs);
927 macsec_fs->tx_fs = NULL;
928 }
929
macsec_fs_tx_init(struct mlx5_macsec_fs * macsec_fs)930 static int macsec_fs_tx_init(struct mlx5_macsec_fs *macsec_fs)
931 {
932 struct mlx5_core_dev *mdev = macsec_fs->mdev;
933 struct mlx5_macsec_tables *tx_tables;
934 struct mlx5_macsec_tx *tx_fs;
935 struct mlx5_fc *flow_counter;
936 int err;
937
938 tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
939 if (!tx_fs)
940 return -ENOMEM;
941
942 tx_tables = &tx_fs->tables;
943
944 flow_counter = mlx5_fc_create(mdev, false);
945 if (IS_ERR(flow_counter)) {
946 err = PTR_ERR(flow_counter);
947 mlx5_core_err(mdev,
948 "Failed to create MACsec Tx encrypt flow counter, err(%d)\n",
949 err);
950 goto err_encrypt_counter;
951 }
952 tx_tables->check_rule_counter = flow_counter;
953
954 flow_counter = mlx5_fc_create(mdev, false);
955 if (IS_ERR(flow_counter)) {
956 err = PTR_ERR(flow_counter);
957 mlx5_core_err(mdev,
958 "Failed to create MACsec Tx drop flow counter, err(%d)\n",
959 err);
960 goto err_drop_counter;
961 }
962 tx_tables->check_miss_rule_counter = flow_counter;
963
964 ida_init(&tx_fs->tx_halloc);
965 INIT_LIST_HEAD(&macsec_fs->macsec_devices_list);
966
967 macsec_fs->tx_fs = tx_fs;
968
969 return 0;
970
971 err_drop_counter:
972 mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
973 tx_tables->check_rule_counter = NULL;
974
975 err_encrypt_counter:
976 kfree(tx_fs);
977 macsec_fs->tx_fs = NULL;
978
979 return err;
980 }
981
macsec_fs_rx_roce_miss_destroy(struct mlx5_macsec_miss * miss)982 static void macsec_fs_rx_roce_miss_destroy(struct mlx5_macsec_miss *miss)
983 {
984 mlx5_del_flow_rules(miss->rule);
985 mlx5_destroy_flow_group(miss->g);
986 }
987
macsec_fs_rdma_rx_destroy(struct mlx5_macsec_rx_roce * roce,struct mlx5_core_dev * mdev)988 static void macsec_fs_rdma_rx_destroy(struct mlx5_macsec_rx_roce *roce, struct mlx5_core_dev *mdev)
989 {
990 if (!mlx5_is_macsec_roce_supported(mdev))
991 return;
992
993 mlx5_del_flow_rules(roce->nic_miss.rule);
994 mlx5_del_flow_rules(roce->rule);
995 mlx5_modify_header_dealloc(mdev, roce->copy_modify_hdr);
996 mlx5_destroy_flow_group(roce->nic_miss.g);
997 mlx5_destroy_flow_group(roce->g);
998 mlx5_destroy_flow_table(roce->ft);
999
1000 macsec_fs_rx_roce_miss_destroy(&roce->miss);
1001 mlx5_destroy_flow_table(roce->ft_macsec_op_check);
1002 mlx5_destroy_flow_table(roce->ft_ip_check);
1003 }
1004
macsec_fs_rx_destroy(struct mlx5_macsec_fs * macsec_fs)1005 static void macsec_fs_rx_destroy(struct mlx5_macsec_fs *macsec_fs)
1006 {
1007 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
1008 struct mlx5_macsec_tables *rx_tables;
1009 int i;
1010
1011 /* Rx check table */
1012 for (i = 1; i >= 0; --i) {
1013 if (rx_fs->check_rule[i]) {
1014 mlx5_del_flow_rules(rx_fs->check_rule[i]);
1015 rx_fs->check_rule[i] = NULL;
1016 }
1017
1018 if (rx_fs->check_rule_pkt_reformat[i]) {
1019 mlx5_packet_reformat_dealloc(macsec_fs->mdev,
1020 rx_fs->check_rule_pkt_reformat[i]);
1021 rx_fs->check_rule_pkt_reformat[i] = NULL;
1022 }
1023 }
1024
1025 rx_tables = &rx_fs->tables;
1026
1027 if (rx_tables->check_miss_rule) {
1028 mlx5_del_flow_rules(rx_tables->check_miss_rule);
1029 rx_tables->check_miss_rule = NULL;
1030 }
1031
1032 if (rx_tables->ft_check_group) {
1033 mlx5_destroy_flow_group(rx_tables->ft_check_group);
1034 rx_tables->ft_check_group = NULL;
1035 }
1036
1037 if (rx_tables->ft_check) {
1038 mlx5_destroy_flow_table(rx_tables->ft_check);
1039 rx_tables->ft_check = NULL;
1040 }
1041
1042 /* Rx crypto table */
1043 if (rx_tables->crypto_miss_rule) {
1044 mlx5_del_flow_rules(rx_tables->crypto_miss_rule);
1045 rx_tables->crypto_miss_rule = NULL;
1046 }
1047
1048 macsec_fs_destroy_flow_table(&rx_tables->ft_crypto);
1049
1050 macsec_fs_rdma_rx_destroy(&macsec_fs->rx_fs->roce, macsec_fs->mdev);
1051 }
1052
macsec_fs_rx_create_crypto_table_groups(struct mlx5_macsec_flow_table * ft)1053 static int macsec_fs_rx_create_crypto_table_groups(struct mlx5_macsec_flow_table *ft)
1054 {
1055 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1056 int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
1057 int ix = 0;
1058 u32 *in;
1059 int err;
1060 u8 *mc;
1061
1062 ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1063 if (!ft->g)
1064 return -ENOMEM;
1065
1066 in = kvzalloc(inlen, GFP_KERNEL);
1067 if (!in) {
1068 kfree(ft->g);
1069 return -ENOMEM;
1070 }
1071
1072 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1073
1074 /* Flow group for SA rule with SCI */
1075 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
1076 MLX5_MATCH_MISC_PARAMETERS_5);
1077 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1078
1079 MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
1080 MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK <<
1081 MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
1082 MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2);
1083 MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3);
1084
1085 MLX5_SET_CFG(in, start_flow_index, ix);
1086 ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE;
1087 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1088 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1089 if (IS_ERR(ft->g[ft->num_groups]))
1090 goto err;
1091 ft->num_groups++;
1092
1093 /* Flow group for SA rule without SCI */
1094 memset(in, 0, inlen);
1095 memset(mc, 0, mclen);
1096 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
1097 MLX5_MATCH_MISC_PARAMETERS_5);
1098 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16);
1099 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0);
1100 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1101
1102 MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
1103 MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
1104
1105 MLX5_SET_CFG(in, start_flow_index, ix);
1106 ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE;
1107 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1108 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1109 if (IS_ERR(ft->g[ft->num_groups]))
1110 goto err;
1111 ft->num_groups++;
1112
1113 /* Flow Group for l2 traps */
1114 memset(in, 0, inlen);
1115 memset(mc, 0, mclen);
1116 MLX5_SET_CFG(in, start_flow_index, ix);
1117 ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
1118 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1119 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1120 if (IS_ERR(ft->g[ft->num_groups]))
1121 goto err;
1122 ft->num_groups++;
1123
1124 kvfree(in);
1125 return 0;
1126
1127 err:
1128 err = PTR_ERR(ft->g[ft->num_groups]);
1129 ft->g[ft->num_groups] = NULL;
1130 kvfree(in);
1131
1132 return err;
1133 }
1134
macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs * macsec_fs,struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_flow_spec * spec,int reformat_param_size)1135 static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs,
1136 struct mlx5_flow_destination *dest,
1137 struct mlx5_flow_act *flow_act,
1138 struct mlx5_flow_spec *spec,
1139 int reformat_param_size)
1140 {
1141 int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1;
1142 u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI];
1143 struct mlx5_pkt_reformat_params reformat_params = {};
1144 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
1145 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1146 struct mlx5_flow_destination roce_dest[2];
1147 struct mlx5_macsec_tables *rx_tables;
1148 struct mlx5_flow_handle *rule;
1149 int err = 0, dstn = 0;
1150
1151 rx_tables = &rx_fs->tables;
1152
1153 /* Rx check table decap 16B rule */
1154 memset(dest, 0, sizeof(*dest));
1155 memset(flow_act, 0, sizeof(*flow_act));
1156 memset(spec, 0, sizeof(*spec));
1157
1158 reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC;
1159 reformat_params.size = reformat_param_size;
1160 reformat_params.data = mlx5_reformat_buf;
1161 flow_act->pkt_reformat = mlx5_packet_reformat_alloc(mdev,
1162 &reformat_params,
1163 MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
1164 if (IS_ERR(flow_act->pkt_reformat)) {
1165 err = PTR_ERR(flow_act->pkt_reformat);
1166 mlx5_core_err(mdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err);
1167 return err;
1168 }
1169 rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat;
1170
1171 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1172 /* MACsec syndrome match */
1173 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome);
1174 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0);
1175 /* ASO return reg syndrome match */
1176 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
1177 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
1178
1179 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
1180 /* Sectag TCI SC present bit*/
1181 MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
1182 MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
1183
1184 if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI)
1185 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
1186 MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT <<
1187 MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
1188
1189 flow_act->flags = FLOW_ACT_NO_APPEND;
1190
1191 if (rx_fs->roce.ft) {
1192 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1193 roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1194 roce_dest[dstn].ft = rx_fs->roce.ft;
1195 dstn++;
1196 } else {
1197 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1198 }
1199
1200 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1201 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1202 roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1203 roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
1204 rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1);
1205
1206 if (IS_ERR(rule)) {
1207 err = PTR_ERR(rule);
1208 mlx5_core_err(mdev, "Failed to add MACsec Rx check rule, err=%d\n", err);
1209 return err;
1210 }
1211
1212 rx_fs->check_rule[rule_index] = rule;
1213
1214 return 0;
1215 }
1216
macsec_fs_rx_roce_miss_create(struct mlx5_core_dev * mdev,struct mlx5_macsec_rx_roce * roce)1217 static int macsec_fs_rx_roce_miss_create(struct mlx5_core_dev *mdev,
1218 struct mlx5_macsec_rx_roce *roce)
1219 {
1220 struct mlx5_flow_act flow_act = {};
1221 struct mlx5_flow_group *flow_group;
1222 struct mlx5_flow_handle *rule;
1223 u32 *flow_group_in;
1224 int err;
1225
1226 flow_group_in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
1227 if (!flow_group_in)
1228 return -ENOMEM;
1229
1230 /* IP check ft has no miss rule since we use default miss action which is go to next PRIO */
1231 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
1232 roce->ft_macsec_op_check->max_fte - 1);
1233 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1234 roce->ft_macsec_op_check->max_fte - 1);
1235 flow_group = mlx5_create_flow_group(roce->ft_macsec_op_check, flow_group_in);
1236 if (IS_ERR(flow_group)) {
1237 err = PTR_ERR(flow_group);
1238 mlx5_core_err(mdev,
1239 "Failed to create miss flow group for MACsec RoCE operation check table err(%d)\n",
1240 err);
1241 goto err_macsec_op_miss_group;
1242 }
1243 roce->miss.g = flow_group;
1244
1245 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1246 rule = mlx5_add_flow_rules(roce->ft_macsec_op_check, NULL, &flow_act, NULL, 0);
1247 if (IS_ERR(rule)) {
1248 err = PTR_ERR(rule);
1249 mlx5_core_err(mdev, "Failed to add miss rule to MACsec RoCE operation check table err(%d)\n",
1250 err);
1251 goto err_macsec_op_rule;
1252 }
1253 roce->miss.rule = rule;
1254
1255 kvfree(flow_group_in);
1256 return 0;
1257
1258 err_macsec_op_rule:
1259 mlx5_destroy_flow_group(roce->miss.g);
1260 err_macsec_op_miss_group:
1261 kvfree(flow_group_in);
1262 return err;
1263 }
1264
1265 #define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
1266
macsec_fs_rx_roce_jump_to_rdma_groups_create(struct mlx5_core_dev * mdev,struct mlx5_macsec_rx_roce * roce)1267 static int macsec_fs_rx_roce_jump_to_rdma_groups_create(struct mlx5_core_dev *mdev,
1268 struct mlx5_macsec_rx_roce *roce)
1269 {
1270 struct mlx5_flow_group *g;
1271 void *outer_headers_c;
1272 int ix = 0;
1273 u32 *in;
1274 int err;
1275 u8 *mc;
1276
1277 in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
1278 if (!in)
1279 return -ENOMEM;
1280
1281 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1282 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
1283 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
1284 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
1285
1286 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1287 MLX5_SET_CFG(in, start_flow_index, ix);
1288 ix += MLX5_RX_ROCE_GROUP_SIZE;
1289 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1290 g = mlx5_create_flow_group(roce->ft, in);
1291 if (IS_ERR(g)) {
1292 err = PTR_ERR(g);
1293 mlx5_core_err(mdev, "Failed to create main flow group for MACsec RoCE NIC UDP table err(%d)\n",
1294 err);
1295 goto err_udp_group;
1296 }
1297 roce->g = g;
1298
1299 memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in));
1300 MLX5_SET_CFG(in, start_flow_index, ix);
1301 ix += MLX5_RX_ROCE_GROUP_SIZE;
1302 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1303 g = mlx5_create_flow_group(roce->ft, in);
1304 if (IS_ERR(g)) {
1305 err = PTR_ERR(g);
1306 mlx5_core_err(mdev, "Failed to create miss flow group for MACsec RoCE NIC UDP table err(%d)\n",
1307 err);
1308 goto err_udp_miss_group;
1309 }
1310 roce->nic_miss.g = g;
1311
1312 kvfree(in);
1313 return 0;
1314
1315 err_udp_miss_group:
1316 mlx5_destroy_flow_group(roce->g);
1317 err_udp_group:
1318 kvfree(in);
1319 return err;
1320 }
1321
macsec_fs_rx_roce_jump_to_rdma_rules_create(struct mlx5_macsec_fs * macsec_fs,struct mlx5_macsec_rx_roce * roce)1322 static int macsec_fs_rx_roce_jump_to_rdma_rules_create(struct mlx5_macsec_fs *macsec_fs,
1323 struct mlx5_macsec_rx_roce *roce)
1324 {
1325 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1326 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1327 struct mlx5_flow_destination dst = {};
1328 struct mlx5_modify_hdr *modify_hdr;
1329 MLX5_DECLARE_FLOW_ACT(flow_act);
1330 struct mlx5_flow_handle *rule;
1331 struct mlx5_flow_spec *spec;
1332 int err;
1333
1334 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1335 if (!spec)
1336 return -ENOMEM;
1337
1338 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1339 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
1340 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
1341 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
1342 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, ROCE_V2_UDP_DPORT);
1343
1344 MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
1345 MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1346 MLX5_SET(copy_action_in, action, src_offset, 0);
1347 MLX5_SET(copy_action_in, action, length, 32);
1348 MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_5);
1349 MLX5_SET(copy_action_in, action, dst_offset, 0);
1350
1351 modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
1352 1, action);
1353
1354 if (IS_ERR(modify_hdr)) {
1355 err = PTR_ERR(modify_hdr);
1356 mlx5_core_err(mdev,
1357 "Failed to alloc macsec copy modify_header_id err(%d)\n", err);
1358 goto err_alloc_hdr;
1359 }
1360
1361 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1362 flow_act.modify_hdr = modify_hdr;
1363 dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
1364 dst.ft = roce->ft_ip_check;
1365 rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
1366 if (IS_ERR(rule)) {
1367 err = PTR_ERR(rule);
1368 mlx5_core_err(mdev, "Failed to add rule to MACsec RoCE NIC UDP table err(%d)\n",
1369 err);
1370 goto err_add_rule;
1371 }
1372 roce->rule = rule;
1373 roce->copy_modify_hdr = modify_hdr;
1374
1375 memset(&flow_act, 0, sizeof(flow_act));
1376 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1377 rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, NULL, 0);
1378 if (IS_ERR(rule)) {
1379 err = PTR_ERR(rule);
1380 mlx5_core_err(mdev, "Failed to add miss rule to MACsec RoCE NIC UDP table err(%d)\n",
1381 err);
1382 goto err_add_rule2;
1383 }
1384 roce->nic_miss.rule = rule;
1385
1386 kvfree(spec);
1387 return 0;
1388
1389 err_add_rule2:
1390 mlx5_del_flow_rules(roce->rule);
1391 err_add_rule:
1392 mlx5_modify_header_dealloc(macsec_fs->mdev, modify_hdr);
1393 err_alloc_hdr:
1394 kvfree(spec);
1395 return err;
1396 }
1397
macsec_fs_rx_roce_jump_to_rdma_create(struct mlx5_macsec_fs * macsec_fs,struct mlx5_macsec_rx_roce * roce)1398 static int macsec_fs_rx_roce_jump_to_rdma_create(struct mlx5_macsec_fs *macsec_fs,
1399 struct mlx5_macsec_rx_roce *roce)
1400 {
1401 int err;
1402
1403 err = macsec_fs_rx_roce_jump_to_rdma_groups_create(macsec_fs->mdev, roce);
1404 if (err)
1405 return err;
1406
1407 err = macsec_fs_rx_roce_jump_to_rdma_rules_create(macsec_fs, roce);
1408 if (err)
1409 goto err;
1410
1411 return 0;
1412 err:
1413 mlx5_destroy_flow_group(roce->nic_miss.g);
1414 mlx5_destroy_flow_group(roce->g);
1415 return err;
1416 }
1417
macsec_fs_rx_roce_create(struct mlx5_macsec_fs * macsec_fs)1418 static int macsec_fs_rx_roce_create(struct mlx5_macsec_fs *macsec_fs)
1419 {
1420 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
1421 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1422 struct mlx5_flow_table_attr ft_attr = {};
1423 struct mlx5_flow_namespace *ns;
1424 struct mlx5_flow_table *ft;
1425 int err = 0;
1426
1427 if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev)) {
1428 mlx5_core_dbg(mdev, "Failed to init RoCE MACsec, capabilities not supported\n");
1429 return 0;
1430 }
1431
1432 ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC);
1433 if (!ns)
1434 return -ENOMEM;
1435
1436 ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_RX_ROCE_IP_TABLE_LEVEL,
1437 CRYPTO_NUM_MAXSEC_FTE);
1438 if (IS_ERR(ft)) {
1439 err = PTR_ERR(ft);
1440 mlx5_core_err(mdev,
1441 "Failed to create MACsec IP check RoCE table err(%d)\n", err);
1442 return err;
1443 }
1444 rx_fs->roce.ft_ip_check = ft;
1445
1446 ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_RX_ROCE_MACSEC_OP_TABLE_LEVEL,
1447 CRYPTO_NUM_MAXSEC_FTE);
1448 if (IS_ERR(ft)) {
1449 err = PTR_ERR(ft);
1450 mlx5_core_err(mdev,
1451 "Failed to create MACsec operation check RoCE table err(%d)\n",
1452 err);
1453 goto err_macsec_op;
1454 }
1455 rx_fs->roce.ft_macsec_op_check = ft;
1456
1457 err = macsec_fs_rx_roce_miss_create(mdev, &rx_fs->roce);
1458 if (err)
1459 goto err_miss_create;
1460
1461 ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
1462 if (!ns) {
1463 err = -EOPNOTSUPP;
1464 goto err_ns;
1465 }
1466
1467 ft_attr.level = RX_ROCE_TABLE_LEVEL;
1468 ft_attr.max_fte = RX_ROCE_TABLE_NUM_FTE;
1469 ft = mlx5_create_flow_table(ns, &ft_attr);
1470 if (IS_ERR(ft)) {
1471 err = PTR_ERR(ft);
1472 mlx5_core_err(mdev,
1473 "Failed to create MACsec jump to RX RoCE, NIC table err(%d)\n", err);
1474 goto err_ns;
1475 }
1476 rx_fs->roce.ft = ft;
1477
1478 err = macsec_fs_rx_roce_jump_to_rdma_create(macsec_fs, &rx_fs->roce);
1479 if (err)
1480 goto err_udp_ft;
1481
1482 return 0;
1483
1484 err_udp_ft:
1485 mlx5_destroy_flow_table(rx_fs->roce.ft);
1486 err_ns:
1487 macsec_fs_rx_roce_miss_destroy(&rx_fs->roce.miss);
1488 err_miss_create:
1489 mlx5_destroy_flow_table(rx_fs->roce.ft_macsec_op_check);
1490 err_macsec_op:
1491 mlx5_destroy_flow_table(rx_fs->roce.ft_ip_check);
1492 return err;
1493 }
1494
macsec_fs_rx_create(struct mlx5_macsec_fs * macsec_fs)1495 static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs)
1496 {
1497 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1498 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
1499 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1500 struct mlx5_macsec_flow_table *ft_crypto;
1501 struct mlx5_flow_table_attr ft_attr = {};
1502 struct mlx5_flow_destination dest = {};
1503 struct mlx5_macsec_tables *rx_tables;
1504 struct mlx5_flow_table *flow_table;
1505 struct mlx5_flow_group *flow_group;
1506 struct mlx5_flow_act flow_act = {};
1507 struct mlx5_flow_namespace *ns;
1508 struct mlx5_flow_handle *rule;
1509 struct mlx5_flow_spec *spec;
1510 u32 *flow_group_in;
1511 int err;
1512
1513 ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
1514 if (!ns)
1515 return -ENOMEM;
1516
1517 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1518 if (!spec)
1519 return -ENOMEM;
1520
1521 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1522 if (!flow_group_in) {
1523 err = -ENOMEM;
1524 goto free_spec;
1525 }
1526
1527 rx_tables = &rx_fs->tables;
1528 ft_crypto = &rx_tables->ft_crypto;
1529
1530 err = macsec_fs_rx_roce_create(macsec_fs);
1531 if (err)
1532 goto out_flow_group;
1533
1534 /* Rx crypto table */
1535 ft_attr.level = RX_CRYPTO_TABLE_LEVEL;
1536 ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
1537
1538 flow_table = mlx5_create_flow_table(ns, &ft_attr);
1539 if (IS_ERR(flow_table)) {
1540 err = PTR_ERR(flow_table);
1541 mlx5_core_err(mdev, "Failed to create MACsec Rx crypto table err(%d)\n", err);
1542 goto err;
1543 }
1544 ft_crypto->t = flow_table;
1545
1546 /* Rx crypto table groups */
1547 err = macsec_fs_rx_create_crypto_table_groups(ft_crypto);
1548 if (err) {
1549 mlx5_core_err(mdev,
1550 "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
1551 err);
1552 goto err;
1553 }
1554
1555 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1556 rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
1557 if (IS_ERR(rule)) {
1558 err = PTR_ERR(rule);
1559 mlx5_core_err(mdev,
1560 "Failed to add MACsec Rx crypto table default miss rule %d\n",
1561 err);
1562 goto err;
1563 }
1564 rx_tables->crypto_miss_rule = rule;
1565
1566 /* Rx check table */
1567 flow_table = macsec_fs_auto_group_table_create(ns,
1568 MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT,
1569 RX_CHECK_TABLE_LEVEL,
1570 RX_CHECK_TABLE_NUM_FTE);
1571 if (IS_ERR(flow_table)) {
1572 err = PTR_ERR(flow_table);
1573 mlx5_core_err(mdev, "Fail to create MACsec RX check table, err(%d)\n", err);
1574 goto err;
1575 }
1576 rx_tables->ft_check = flow_table;
1577
1578 /* Rx check table Default miss group/rule */
1579 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
1580 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
1581 flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in);
1582 if (IS_ERR(flow_group)) {
1583 err = PTR_ERR(flow_group);
1584 mlx5_core_err(mdev,
1585 "Failed to create default flow group for MACsec Rx check table err(%d)\n",
1586 err);
1587 goto err;
1588 }
1589 rx_tables->ft_check_group = flow_group;
1590
1591 /* Rx check table default drop rule */
1592 memset(&flow_act, 0, sizeof(flow_act));
1593
1594 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1595 dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
1596 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1597 rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
1598 if (IS_ERR(rule)) {
1599 err = PTR_ERR(rule);
1600 mlx5_core_err(mdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err);
1601 goto err;
1602 }
1603 rx_tables->check_miss_rule = rule;
1604
1605 /* Rx check table decap rules */
1606 err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
1607 MLX5_SECTAG_HEADER_SIZE_WITH_SCI);
1608 if (err)
1609 goto err;
1610
1611 err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
1612 MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI);
1613 if (err)
1614 goto err;
1615
1616 goto out_flow_group;
1617
1618 err:
1619 macsec_fs_rx_destroy(macsec_fs);
1620 out_flow_group:
1621 kvfree(flow_group_in);
1622 free_spec:
1623 kvfree(spec);
1624 return err;
1625 }
1626
macsec_fs_rx_ft_get(struct mlx5_macsec_fs * macsec_fs)1627 static int macsec_fs_rx_ft_get(struct mlx5_macsec_fs *macsec_fs)
1628 {
1629 struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
1630 int err = 0;
1631
1632 if (rx_tables->refcnt)
1633 goto out;
1634
1635 err = macsec_fs_rx_create(macsec_fs);
1636 if (err)
1637 return err;
1638
1639 out:
1640 rx_tables->refcnt++;
1641 return err;
1642 }
1643
macsec_fs_rx_ft_put(struct mlx5_macsec_fs * macsec_fs)1644 static void macsec_fs_rx_ft_put(struct mlx5_macsec_fs *macsec_fs)
1645 {
1646 struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
1647
1648 if (--rx_tables->refcnt)
1649 return;
1650
1651 macsec_fs_rx_destroy(macsec_fs);
1652 }
1653
macsec_fs_rx_del_rule(struct mlx5_macsec_fs * macsec_fs,struct mlx5_macsec_rx_rule * rx_rule,void * macdev,u32 fs_id)1654 static void macsec_fs_rx_del_rule(struct mlx5_macsec_fs *macsec_fs,
1655 struct mlx5_macsec_rx_rule *rx_rule,
1656 void *macdev, u32 fs_id)
1657 {
1658 int i;
1659
1660 macsec_fs_id_del(&macsec_fs->macsec_devices_list, fs_id, macdev,
1661 &macsec_fs->fs_id_hash, false);
1662
1663 for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) {
1664 if (rx_rule->rule[i]) {
1665 mlx5_del_flow_rules(rx_rule->rule[i]);
1666 rx_rule->rule[i] = NULL;
1667 }
1668 }
1669
1670 if (rx_rule->meta_modhdr) {
1671 mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr);
1672 rx_rule->meta_modhdr = NULL;
1673 }
1674
1675 kfree(rx_rule);
1676
1677 macsec_fs_rx_ft_put(macsec_fs);
1678 }
1679
macsec_fs_rx_setup_fte(struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_macsec_rule_attrs * attrs,bool sci_present)1680 static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec,
1681 struct mlx5_flow_act *flow_act,
1682 struct mlx5_macsec_rule_attrs *attrs,
1683 bool sci_present)
1684 {
1685 u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num;
1686 struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto;
1687 __be32 *sci_p = (__be32 *)(&attrs->sci);
1688
1689 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1690
1691 /* MACsec ethertype */
1692 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
1693 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC);
1694
1695 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
1696
1697 /* Sectag AN + TCI SC present bit*/
1698 MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
1699 MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
1700 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
1701 tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
1702
1703 if (sci_present) {
1704 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1705 misc_parameters_5.macsec_tag_2);
1706 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2,
1707 be32_to_cpu(sci_p[0]));
1708
1709 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1710 misc_parameters_5.macsec_tag_3);
1711 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3,
1712 be32_to_cpu(sci_p[1]));
1713 } else {
1714 /* When SCI isn't present in the Sectag, need to match the source */
1715 /* MAC address only if the SCI contains the default MACsec PORT */
1716 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1717 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1718 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16),
1719 sci_p, ETH_ALEN);
1720 }
1721
1722 crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
1723 crypto_params->obj_id = attrs->macsec_obj_id;
1724 }
1725
1726 static union mlx5_macsec_rule *
macsec_fs_rx_add_rule(struct mlx5_macsec_fs * macsec_fs,const struct macsec_context * macsec_ctx,struct mlx5_macsec_rule_attrs * attrs,u32 fs_id)1727 macsec_fs_rx_add_rule(struct mlx5_macsec_fs *macsec_fs,
1728 const struct macsec_context *macsec_ctx,
1729 struct mlx5_macsec_rule_attrs *attrs,
1730 u32 fs_id)
1731 {
1732 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1733 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
1734 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1735 union mlx5_macsec_rule *macsec_rule = NULL;
1736 struct mlx5_modify_hdr *modify_hdr = NULL;
1737 struct mlx5_macsec_flow_table *ft_crypto;
1738 struct mlx5_flow_destination dest = {};
1739 struct mlx5_macsec_tables *rx_tables;
1740 struct mlx5_macsec_rx_rule *rx_rule;
1741 struct mlx5_flow_act flow_act = {};
1742 struct mlx5_flow_handle *rule;
1743 struct mlx5_flow_spec *spec;
1744 int err = 0;
1745
1746 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1747 if (!spec)
1748 return NULL;
1749
1750 err = macsec_fs_rx_ft_get(macsec_fs);
1751 if (err)
1752 goto out_spec;
1753
1754 macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
1755 if (!macsec_rule) {
1756 macsec_fs_rx_ft_put(macsec_fs);
1757 goto out_spec;
1758 }
1759
1760 rx_rule = &macsec_rule->rx_rule;
1761 rx_tables = &rx_fs->tables;
1762 ft_crypto = &rx_tables->ft_crypto;
1763
1764 /* Set bit[31 - 30] macsec marker - 0x01 */
1765 /* Set bit[15-0] fs id */
1766 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1767 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1768 MLX5_SET(set_action_in, action, data, macsec_fs_set_rx_fs_id(fs_id));
1769 MLX5_SET(set_action_in, action, offset, 0);
1770 MLX5_SET(set_action_in, action, length, 32);
1771
1772 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
1773 1, action);
1774 if (IS_ERR(modify_hdr)) {
1775 err = PTR_ERR(modify_hdr);
1776 mlx5_core_err(mdev, "Fail to alloc MACsec set modify_header_id err=%d\n", err);
1777 modify_hdr = NULL;
1778 goto err;
1779 }
1780 rx_rule->meta_modhdr = modify_hdr;
1781
1782 /* Rx crypto table with SCI rule */
1783 macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true);
1784
1785 flow_act.modify_hdr = modify_hdr;
1786 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1787 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
1788 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1789
1790 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1791 dest.ft = rx_tables->ft_check;
1792 rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
1793 if (IS_ERR(rule)) {
1794 err = PTR_ERR(rule);
1795 mlx5_core_err(mdev,
1796 "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n",
1797 err);
1798 goto err;
1799 }
1800 rx_rule->rule[0] = rule;
1801
1802 /* Rx crypto table without SCI rule */
1803 if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) {
1804 memset(spec, 0, sizeof(struct mlx5_flow_spec));
1805 memset(&dest, 0, sizeof(struct mlx5_flow_destination));
1806 memset(&flow_act, 0, sizeof(flow_act));
1807
1808 macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false);
1809
1810 flow_act.modify_hdr = modify_hdr;
1811 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1812 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
1813 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1814
1815 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1816 dest.ft = rx_tables->ft_check;
1817 rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
1818 if (IS_ERR(rule)) {
1819 err = PTR_ERR(rule);
1820 mlx5_core_err(mdev,
1821 "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n",
1822 err);
1823 goto err;
1824 }
1825 rx_rule->rule[1] = rule;
1826 }
1827
1828 err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, fs_id, macsec_ctx->secy->netdev,
1829 &macsec_fs->fs_id_hash, attrs->sci, false);
1830 if (err) {
1831 mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err);
1832 goto err;
1833 }
1834
1835 kvfree(spec);
1836 return macsec_rule;
1837
1838 err:
1839 macsec_fs_rx_del_rule(macsec_fs, rx_rule, macsec_ctx->secy->netdev, fs_id);
1840 macsec_rule = NULL;
1841 out_spec:
1842 kvfree(spec);
1843 return macsec_rule;
1844 }
1845
macsec_fs_rx_init(struct mlx5_macsec_fs * macsec_fs)1846 static int macsec_fs_rx_init(struct mlx5_macsec_fs *macsec_fs)
1847 {
1848 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1849 struct mlx5_macsec_tables *rx_tables;
1850 struct mlx5_macsec_rx *rx_fs;
1851 struct mlx5_fc *flow_counter;
1852 int err;
1853
1854 rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL);
1855 if (!rx_fs)
1856 return -ENOMEM;
1857
1858 flow_counter = mlx5_fc_create(mdev, false);
1859 if (IS_ERR(flow_counter)) {
1860 err = PTR_ERR(flow_counter);
1861 mlx5_core_err(mdev,
1862 "Failed to create MACsec Rx encrypt flow counter, err(%d)\n",
1863 err);
1864 goto err_encrypt_counter;
1865 }
1866
1867 rx_tables = &rx_fs->tables;
1868 rx_tables->check_rule_counter = flow_counter;
1869
1870 flow_counter = mlx5_fc_create(mdev, false);
1871 if (IS_ERR(flow_counter)) {
1872 err = PTR_ERR(flow_counter);
1873 mlx5_core_err(mdev,
1874 "Failed to create MACsec Rx drop flow counter, err(%d)\n",
1875 err);
1876 goto err_drop_counter;
1877 }
1878 rx_tables->check_miss_rule_counter = flow_counter;
1879
1880 macsec_fs->rx_fs = rx_fs;
1881
1882 return 0;
1883
1884 err_drop_counter:
1885 mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
1886 rx_tables->check_rule_counter = NULL;
1887
1888 err_encrypt_counter:
1889 kfree(rx_fs);
1890 macsec_fs->rx_fs = NULL;
1891
1892 return err;
1893 }
1894
macsec_fs_rx_cleanup(struct mlx5_macsec_fs * macsec_fs)1895 static void macsec_fs_rx_cleanup(struct mlx5_macsec_fs *macsec_fs)
1896 {
1897 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
1898 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1899 struct mlx5_macsec_tables *rx_tables;
1900
1901 if (!rx_fs)
1902 return;
1903
1904 rx_tables = &rx_fs->tables;
1905
1906 if (rx_tables->refcnt) {
1907 mlx5_core_err(mdev,
1908 "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n",
1909 rx_tables->refcnt);
1910 return;
1911 }
1912
1913 if (rx_tables->check_miss_rule_counter) {
1914 mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter);
1915 rx_tables->check_miss_rule_counter = NULL;
1916 }
1917
1918 if (rx_tables->check_rule_counter) {
1919 mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
1920 rx_tables->check_rule_counter = NULL;
1921 }
1922
1923 kfree(rx_fs);
1924 macsec_fs->rx_fs = NULL;
1925 }
1926
set_ipaddr_spec_v4(struct sockaddr_in * in,struct mlx5_flow_spec * spec,bool is_dst_ip)1927 static void set_ipaddr_spec_v4(struct sockaddr_in *in, struct mlx5_flow_spec *spec, bool is_dst_ip)
1928 {
1929 MLX5_SET(fte_match_param, spec->match_value,
1930 outer_headers.ip_version, MLX5_FS_IPV4_VERSION);
1931
1932 if (is_dst_ip) {
1933 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1934 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1935 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1936 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1937 &in->sin_addr.s_addr, 4);
1938 } else {
1939 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1940 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
1941 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1942 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
1943 &in->sin_addr.s_addr, 4);
1944 }
1945 }
1946
set_ipaddr_spec_v6(struct sockaddr_in6 * in6,struct mlx5_flow_spec * spec,bool is_dst_ip)1947 static void set_ipaddr_spec_v6(struct sockaddr_in6 *in6, struct mlx5_flow_spec *spec,
1948 bool is_dst_ip)
1949 {
1950 MLX5_SET(fte_match_param, spec->match_value,
1951 outer_headers.ip_version, MLX5_FS_IPV6_VERSION);
1952
1953 if (is_dst_ip) {
1954 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1955 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1956 0xff, 16);
1957 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1958 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1959 &in6->sin6_addr, 16);
1960 } else {
1961 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1962 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
1963 0xff, 16);
1964 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1965 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
1966 &in6->sin6_addr, 16);
1967 }
1968 }
1969
set_ipaddr_spec(const struct sockaddr * addr,struct mlx5_flow_spec * spec,bool is_dst_ip)1970 static void set_ipaddr_spec(const struct sockaddr *addr,
1971 struct mlx5_flow_spec *spec, bool is_dst_ip)
1972 {
1973 struct sockaddr_in6 *in6;
1974
1975 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1976 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1977 outer_headers.ip_version);
1978
1979 if (addr->sa_family == AF_INET) {
1980 struct sockaddr_in *in = (struct sockaddr_in *)addr;
1981
1982 set_ipaddr_spec_v4(in, spec, is_dst_ip);
1983 return;
1984 }
1985
1986 in6 = (struct sockaddr_in6 *)addr;
1987 set_ipaddr_spec_v6(in6, spec, is_dst_ip);
1988 }
1989
macsec_fs_del_roce_rule_rx(struct mlx5_roce_macsec_rx_rule * rx_rule)1990 static void macsec_fs_del_roce_rule_rx(struct mlx5_roce_macsec_rx_rule *rx_rule)
1991 {
1992 mlx5_del_flow_rules(rx_rule->op);
1993 mlx5_del_flow_rules(rx_rule->ip);
1994 list_del(&rx_rule->entry);
1995 kfree(rx_rule);
1996 }
1997
macsec_fs_del_roce_rules_rx(struct mlx5_macsec_fs * macsec_fs,u32 fs_id,struct list_head * rx_rules_list)1998 static void macsec_fs_del_roce_rules_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id,
1999 struct list_head *rx_rules_list)
2000 {
2001 struct mlx5_roce_macsec_rx_rule *rx_rule, *next;
2002
2003 if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev))
2004 return;
2005
2006 list_for_each_entry_safe(rx_rule, next, rx_rules_list, entry) {
2007 if (rx_rule->fs_id == fs_id)
2008 macsec_fs_del_roce_rule_rx(rx_rule);
2009 }
2010 }
2011
macsec_fs_del_roce_rule_tx(struct mlx5_core_dev * mdev,struct mlx5_roce_macsec_tx_rule * tx_rule)2012 static void macsec_fs_del_roce_rule_tx(struct mlx5_core_dev *mdev,
2013 struct mlx5_roce_macsec_tx_rule *tx_rule)
2014 {
2015 mlx5_del_flow_rules(tx_rule->rule);
2016 mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr);
2017 list_del(&tx_rule->entry);
2018 kfree(tx_rule);
2019 }
2020
macsec_fs_del_roce_rules_tx(struct mlx5_macsec_fs * macsec_fs,u32 fs_id,struct list_head * tx_rules_list)2021 static void macsec_fs_del_roce_rules_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id,
2022 struct list_head *tx_rules_list)
2023 {
2024 struct mlx5_roce_macsec_tx_rule *tx_rule, *next;
2025
2026 if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev))
2027 return;
2028
2029 list_for_each_entry_safe(tx_rule, next, tx_rules_list, entry) {
2030 if (tx_rule->fs_id == fs_id)
2031 macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule);
2032 }
2033 }
2034
mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs * macsec_fs,void * macsec_stats)2035 void mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs *macsec_fs, void *macsec_stats)
2036 {
2037 struct mlx5_macsec_stats *stats = (struct mlx5_macsec_stats *)macsec_stats;
2038 struct mlx5_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
2039 struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
2040 struct mlx5_core_dev *mdev = macsec_fs->mdev;
2041
2042 if (tx_tables->check_rule_counter)
2043 mlx5_fc_query(mdev, tx_tables->check_rule_counter,
2044 &stats->macsec_tx_pkts, &stats->macsec_tx_bytes);
2045
2046 if (tx_tables->check_miss_rule_counter)
2047 mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter,
2048 &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop);
2049
2050 if (rx_tables->check_rule_counter)
2051 mlx5_fc_query(mdev, rx_tables->check_rule_counter,
2052 &stats->macsec_rx_pkts, &stats->macsec_rx_bytes);
2053
2054 if (rx_tables->check_miss_rule_counter)
2055 mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter,
2056 &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop);
2057 }
2058
mlx5_macsec_fs_get_stats(struct mlx5_macsec_fs * macsec_fs)2059 struct mlx5_macsec_stats *mlx5_macsec_fs_get_stats(struct mlx5_macsec_fs *macsec_fs)
2060 {
2061 if (!macsec_fs)
2062 return NULL;
2063
2064 return &macsec_fs->stats;
2065 }
2066
mlx5_macsec_fs_get_fs_id_from_hashtable(struct mlx5_macsec_fs * macsec_fs,sci_t * sci)2067 u32 mlx5_macsec_fs_get_fs_id_from_hashtable(struct mlx5_macsec_fs *macsec_fs, sci_t *sci)
2068 {
2069 struct mlx5_fs_id *mlx5_fs_id;
2070 u32 fs_id = 0;
2071
2072 rcu_read_lock();
2073 mlx5_fs_id = rhashtable_lookup(&macsec_fs->sci_hash, sci, rhash_sci);
2074 if (mlx5_fs_id)
2075 fs_id = mlx5_fs_id->id;
2076 rcu_read_unlock();
2077
2078 return fs_id;
2079 }
2080
2081 union mlx5_macsec_rule *
mlx5_macsec_fs_add_rule(struct mlx5_macsec_fs * macsec_fs,const struct macsec_context * macsec_ctx,struct mlx5_macsec_rule_attrs * attrs,u32 * sa_fs_id)2082 mlx5_macsec_fs_add_rule(struct mlx5_macsec_fs *macsec_fs,
2083 const struct macsec_context *macsec_ctx,
2084 struct mlx5_macsec_rule_attrs *attrs,
2085 u32 *sa_fs_id)
2086 {
2087 struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs,
2088 .macdev = macsec_ctx->secy->netdev,
2089 .is_tx =
2090 (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT)
2091 };
2092 union mlx5_macsec_rule *macsec_rule;
2093 u32 tx_new_fs_id;
2094
2095 macsec_rule = (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
2096 macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, &tx_new_fs_id) :
2097 macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id);
2098
2099 data.fs_id = (data.is_tx) ? tx_new_fs_id : *sa_fs_id;
2100 if (macsec_rule)
2101 blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh,
2102 MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
2103 &data);
2104
2105 return macsec_rule;
2106 }
2107
mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs * macsec_fs,union mlx5_macsec_rule * macsec_rule,int action,void * macdev,u32 sa_fs_id)2108 void mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs *macsec_fs,
2109 union mlx5_macsec_rule *macsec_rule,
2110 int action, void *macdev, u32 sa_fs_id)
2111 {
2112 struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs,
2113 .macdev = macdev,
2114 .is_tx = (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT)
2115 };
2116
2117 data.fs_id = (data.is_tx) ? macsec_rule->tx_rule.fs_id : sa_fs_id;
2118 blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh,
2119 MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
2120 &data);
2121
2122 (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
2123 macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule, macdev) :
2124 macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule, macdev, sa_fs_id);
2125 }
2126
mlx5_macsec_fs_add_roce_rule_rx(struct mlx5_macsec_fs * macsec_fs,u32 fs_id,u16 gid_idx,const struct sockaddr * addr,struct list_head * rx_rules_list)2127 static int mlx5_macsec_fs_add_roce_rule_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx,
2128 const struct sockaddr *addr,
2129 struct list_head *rx_rules_list)
2130 {
2131 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
2132 struct mlx5_roce_macsec_rx_rule *rx_rule;
2133 struct mlx5_flow_destination dest = {};
2134 struct mlx5_flow_act flow_act = {};
2135 struct mlx5_flow_handle *new_rule;
2136 struct mlx5_flow_spec *spec;
2137 int err = 0;
2138
2139 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2140 if (!spec)
2141 return -ENOMEM;
2142
2143 rx_rule = kzalloc(sizeof(*rx_rule), GFP_KERNEL);
2144 if (!rx_rule) {
2145 err = -ENOMEM;
2146 goto out;
2147 }
2148
2149 set_ipaddr_spec(addr, spec, true);
2150
2151 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2152 dest.ft = rx_fs->roce.ft_macsec_op_check;
2153 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2154 new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_ip_check, spec, &flow_act,
2155 &dest, 1);
2156 if (IS_ERR(new_rule)) {
2157 err = PTR_ERR(new_rule);
2158 goto ip_rule_err;
2159 }
2160 rx_rule->ip = new_rule;
2161
2162 memset(&flow_act, 0, sizeof(flow_act));
2163 memset(spec, 0, sizeof(*spec));
2164
2165 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
2166 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_5);
2167 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_5,
2168 macsec_fs_set_rx_fs_id(fs_id));
2169 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
2170 new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_macsec_op_check, spec, &flow_act,
2171 NULL, 0);
2172 if (IS_ERR(new_rule)) {
2173 err = PTR_ERR(new_rule);
2174 goto op_rule_err;
2175 }
2176 rx_rule->op = new_rule;
2177 rx_rule->gid_idx = gid_idx;
2178 rx_rule->fs_id = fs_id;
2179 list_add_tail(&rx_rule->entry, rx_rules_list);
2180
2181 goto out;
2182
2183 op_rule_err:
2184 mlx5_del_flow_rules(rx_rule->ip);
2185 rx_rule->ip = NULL;
2186 ip_rule_err:
2187 kfree(rx_rule);
2188 out:
2189 kvfree(spec);
2190 return err;
2191 }
2192
mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs * macsec_fs,u32 fs_id,u16 gid_idx,const struct sockaddr * addr,struct list_head * tx_rules_list)2193 static int mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx,
2194 const struct sockaddr *addr,
2195 struct list_head *tx_rules_list)
2196 {
2197 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
2198 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
2199 struct mlx5_core_dev *mdev = macsec_fs->mdev;
2200 struct mlx5_modify_hdr *modify_hdr = NULL;
2201 struct mlx5_roce_macsec_tx_rule *tx_rule;
2202 struct mlx5_flow_destination dest = {};
2203 struct mlx5_flow_act flow_act = {};
2204 struct mlx5_flow_handle *new_rule;
2205 struct mlx5_flow_spec *spec;
2206 int err = 0;
2207
2208 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2209 if (!spec)
2210 return -ENOMEM;
2211
2212 tx_rule = kzalloc(sizeof(*tx_rule), GFP_KERNEL);
2213 if (!tx_rule) {
2214 err = -ENOMEM;
2215 goto out;
2216 }
2217
2218 set_ipaddr_spec(addr, spec, false);
2219
2220 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
2221 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_A);
2222 MLX5_SET(set_action_in, action, data, macsec_fs_set_tx_fs_id(fs_id));
2223 MLX5_SET(set_action_in, action, offset, 0);
2224 MLX5_SET(set_action_in, action, length, 32);
2225
2226 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
2227 1, action);
2228 if (IS_ERR(modify_hdr)) {
2229 err = PTR_ERR(modify_hdr);
2230 mlx5_core_err(mdev, "Fail to alloc ROCE MACsec set modify_header_id err=%d\n",
2231 err);
2232 modify_hdr = NULL;
2233 goto modify_hdr_err;
2234 }
2235 tx_rule->meta_modhdr = modify_hdr;
2236
2237 flow_act.modify_hdr = modify_hdr;
2238 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2239
2240 dest.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
2241 dest.ft = tx_fs->tables.ft_crypto.t;
2242 new_rule = mlx5_add_flow_rules(tx_fs->ft_rdma_tx, spec, &flow_act, &dest, 1);
2243 if (IS_ERR(new_rule)) {
2244 err = PTR_ERR(new_rule);
2245 mlx5_core_err(mdev, "Failed to add ROCE TX rule, err=%d\n", err);
2246 goto rule_err;
2247 }
2248 tx_rule->rule = new_rule;
2249 tx_rule->gid_idx = gid_idx;
2250 tx_rule->fs_id = fs_id;
2251 list_add_tail(&tx_rule->entry, tx_rules_list);
2252
2253 goto out;
2254
2255 rule_err:
2256 mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr);
2257 modify_hdr_err:
2258 kfree(tx_rule);
2259 out:
2260 kvfree(spec);
2261 return err;
2262 }
2263
mlx5_macsec_del_roce_rule(u16 gid_idx,struct mlx5_macsec_fs * macsec_fs,struct list_head * tx_rules_list,struct list_head * rx_rules_list)2264 void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs,
2265 struct list_head *tx_rules_list, struct list_head *rx_rules_list)
2266 {
2267 struct mlx5_roce_macsec_rx_rule *rx_rule, *next_rx;
2268 struct mlx5_roce_macsec_tx_rule *tx_rule, *next_tx;
2269
2270 list_for_each_entry_safe(tx_rule, next_tx, tx_rules_list, entry) {
2271 if (tx_rule->gid_idx == gid_idx)
2272 macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule);
2273 }
2274
2275 list_for_each_entry_safe(rx_rule, next_rx, rx_rules_list, entry) {
2276 if (rx_rule->gid_idx == gid_idx)
2277 macsec_fs_del_roce_rule_rx(rx_rule);
2278 }
2279 }
2280 EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_rule);
2281
mlx5_macsec_add_roce_rule(void * macdev,const struct sockaddr * addr,u16 gid_idx,struct list_head * tx_rules_list,struct list_head * rx_rules_list,struct mlx5_macsec_fs * macsec_fs)2282 int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx,
2283 struct list_head *tx_rules_list, struct list_head *rx_rules_list,
2284 struct mlx5_macsec_fs *macsec_fs)
2285 {
2286 struct mlx5_macsec_device *iter, *macsec_device = NULL;
2287 struct mlx5_core_dev *mdev = macsec_fs->mdev;
2288 struct mlx5_fs_id *fs_id_iter;
2289 unsigned long index = 0;
2290 int err;
2291
2292 list_for_each_entry(iter, &macsec_fs->macsec_devices_list, macsec_devices_list_entry) {
2293 if (iter->macdev == macdev) {
2294 macsec_device = iter;
2295 break;
2296 }
2297 }
2298
2299 if (!macsec_device)
2300 return 0;
2301
2302 xa_for_each(&macsec_device->tx_id_xa, index, fs_id_iter) {
2303 err = mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id_iter->id, gid_idx, addr,
2304 tx_rules_list);
2305 if (err) {
2306 mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n");
2307 goto out;
2308 }
2309 }
2310
2311 index = 0;
2312 xa_for_each(&macsec_device->rx_id_xa, index, fs_id_iter) {
2313 err = mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id_iter->id, gid_idx, addr,
2314 rx_rules_list);
2315 if (err) {
2316 mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n");
2317 goto out;
2318 }
2319 }
2320
2321 return 0;
2322 out:
2323 mlx5_macsec_del_roce_rule(gid_idx, macsec_fs, tx_rules_list, rx_rules_list);
2324 return err;
2325 }
2326 EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_rule);
2327
mlx5_macsec_add_roce_sa_rules(u32 fs_id,const struct sockaddr * addr,u16 gid_idx,struct list_head * tx_rules_list,struct list_head * rx_rules_list,struct mlx5_macsec_fs * macsec_fs,bool is_tx)2328 void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx,
2329 struct list_head *tx_rules_list,
2330 struct list_head *rx_rules_list,
2331 struct mlx5_macsec_fs *macsec_fs, bool is_tx)
2332 {
2333 (is_tx) ?
2334 mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id, gid_idx, addr,
2335 tx_rules_list) :
2336 mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id, gid_idx, addr,
2337 rx_rules_list);
2338 }
2339 EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_sa_rules);
2340
mlx5_macsec_del_roce_sa_rules(u32 fs_id,struct mlx5_macsec_fs * macsec_fs,struct list_head * tx_rules_list,struct list_head * rx_rules_list,bool is_tx)2341 void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs,
2342 struct list_head *tx_rules_list,
2343 struct list_head *rx_rules_list, bool is_tx)
2344 {
2345 (is_tx) ?
2346 macsec_fs_del_roce_rules_tx(macsec_fs, fs_id, tx_rules_list) :
2347 macsec_fs_del_roce_rules_rx(macsec_fs, fs_id, rx_rules_list);
2348 }
2349 EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_sa_rules);
2350
mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs * macsec_fs)2351 void mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs *macsec_fs)
2352 {
2353 macsec_fs_rx_cleanup(macsec_fs);
2354 macsec_fs_tx_cleanup(macsec_fs);
2355 rhashtable_destroy(&macsec_fs->fs_id_hash);
2356 rhashtable_destroy(&macsec_fs->sci_hash);
2357 kfree(macsec_fs);
2358 }
2359
2360 struct mlx5_macsec_fs *
mlx5_macsec_fs_init(struct mlx5_core_dev * mdev)2361 mlx5_macsec_fs_init(struct mlx5_core_dev *mdev)
2362 {
2363 struct mlx5_macsec_fs *macsec_fs;
2364 int err;
2365
2366 macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL);
2367 if (!macsec_fs)
2368 return NULL;
2369
2370 macsec_fs->mdev = mdev;
2371
2372 err = rhashtable_init(&macsec_fs->sci_hash, &rhash_sci);
2373 if (err) {
2374 mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
2375 err);
2376 goto err_hash;
2377 }
2378
2379 err = rhashtable_init(&macsec_fs->fs_id_hash, &rhash_fs_id);
2380 if (err) {
2381 mlx5_core_err(mdev, "MACsec offload: Failed to init FS_ID hash table, err=%d\n",
2382 err);
2383 goto sci_hash_cleanup;
2384 }
2385
2386 err = macsec_fs_tx_init(macsec_fs);
2387 if (err) {
2388 mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
2389 goto fs_id_hash_cleanup;
2390 }
2391
2392 err = macsec_fs_rx_init(macsec_fs);
2393 if (err) {
2394 mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
2395 goto tx_cleanup;
2396 }
2397
2398 BLOCKING_INIT_NOTIFIER_HEAD(&mdev->macsec_nh);
2399
2400 return macsec_fs;
2401
2402 tx_cleanup:
2403 macsec_fs_tx_cleanup(macsec_fs);
2404 fs_id_hash_cleanup:
2405 rhashtable_destroy(&macsec_fs->fs_id_hash);
2406 sci_hash_cleanup:
2407 rhashtable_destroy(&macsec_fs->sci_hash);
2408 err_hash:
2409 kfree(macsec_fs);
2410 return NULL;
2411 }
2412