1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/xarray.h>
7 
8 #include "en.h"
9 #include "lib/aso.h"
10 #include "lib/mlx5.h"
11 #include "en_accel/macsec.h"
12 #include "en_accel/macsec_fs.h"
13 
14 #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
15 #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
16 
17 enum mlx5_macsec_aso_event_arm {
18 	MLX5E_ASO_EPN_ARM = BIT(0),
19 };
20 
21 enum {
22 	MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
23 };
24 
25 struct mlx5e_macsec_handle {
26 	struct mlx5e_macsec *macsec;
27 	u32 obj_id;
28 	u8 idx;
29 };
30 
31 enum {
32 	MLX5_MACSEC_EPN,
33 };
34 
35 struct mlx5e_macsec_aso_out {
36 	u8 event_arm;
37 	u32 mode_param;
38 };
39 
40 struct mlx5e_macsec_aso_in {
41 	u8 mode;
42 	u32 obj_id;
43 };
44 
45 struct mlx5e_macsec_epn_state {
46 	u32 epn_msb;
47 	u8 epn_enabled;
48 	u8 overlap;
49 };
50 
51 struct mlx5e_macsec_async_work {
52 	struct mlx5e_macsec *macsec;
53 	struct mlx5_core_dev *mdev;
54 	struct work_struct work;
55 	u32 obj_id;
56 };
57 
58 struct mlx5e_macsec_sa {
59 	bool active;
60 	u8  assoc_num;
61 	u32 macsec_obj_id;
62 	u32 enc_key_id;
63 	u32 next_pn;
64 	sci_t sci;
65 	salt_t salt;
66 
67 	struct rhash_head hash;
68 	u32 fs_id;
69 	union mlx5e_macsec_rule *macsec_rule;
70 	struct rcu_head rcu_head;
71 	struct mlx5e_macsec_epn_state epn_state;
72 };
73 
74 struct mlx5e_macsec_rx_sc;
75 struct mlx5e_macsec_rx_sc_xarray_element {
76 	u32 fs_id;
77 	struct mlx5e_macsec_rx_sc *rx_sc;
78 };
79 
80 struct mlx5e_macsec_rx_sc {
81 	bool active;
82 	sci_t sci;
83 	struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
84 	struct list_head rx_sc_list_element;
85 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
86 	struct metadata_dst *md_dst;
87 	struct rcu_head rcu_head;
88 };
89 
90 struct mlx5e_macsec_umr {
91 	dma_addr_t dma_addr;
92 	u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
93 	u32 mkey;
94 };
95 
96 struct mlx5e_macsec_aso {
97 	/* ASO */
98 	struct mlx5_aso *maso;
99 	/* Protects macsec ASO */
100 	struct mutex aso_lock;
101 	/* UMR */
102 	struct mlx5e_macsec_umr *umr;
103 
104 	u32 pdn;
105 };
106 
107 static const struct rhashtable_params rhash_sci = {
108 	.key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
109 	.key_offset = offsetof(struct mlx5e_macsec_sa, sci),
110 	.head_offset = offsetof(struct mlx5e_macsec_sa, hash),
111 	.automatic_shrinking = true,
112 	.min_size = 1,
113 };
114 
115 struct mlx5e_macsec_device {
116 	const struct net_device *netdev;
117 	struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
118 	struct list_head macsec_rx_sc_list_head;
119 	unsigned char *dev_addr;
120 	struct list_head macsec_device_list_element;
121 };
122 
123 struct mlx5e_macsec {
124 	struct list_head macsec_device_list_head;
125 	int num_of_devices;
126 	struct mlx5e_macsec_fs *macsec_fs;
127 	struct mutex lock; /* Protects mlx5e_macsec internal contexts */
128 
129 	/* Tx sci -> fs id mapping handling */
130 	struct rhashtable sci_hash;      /* sci -> mlx5e_macsec_sa */
131 
132 	/* Rx fs_id -> rx_sc mapping */
133 	struct xarray sc_xarray;
134 
135 	struct mlx5_core_dev *mdev;
136 
137 	/* Stats manage */
138 	struct mlx5e_macsec_stats stats;
139 
140 	/* ASO */
141 	struct mlx5e_macsec_aso aso;
142 
143 	struct notifier_block nb;
144 	struct workqueue_struct *wq;
145 };
146 
147 struct mlx5_macsec_obj_attrs {
148 	u32 aso_pdn;
149 	u32 next_pn;
150 	__be64 sci;
151 	u32 enc_key_id;
152 	bool encrypt;
153 	struct mlx5e_macsec_epn_state epn_state;
154 	salt_t salt;
155 	__be32 ssci;
156 	bool replay_protect;
157 	u32 replay_window;
158 };
159 
160 struct mlx5_aso_ctrl_param {
161 	u8   data_mask_mode;
162 	u8   condition_0_operand;
163 	u8   condition_1_operand;
164 	u8   condition_0_offset;
165 	u8   condition_1_offset;
166 	u8   data_offset;
167 	u8   condition_operand;
168 	u32  condition_0_data;
169 	u32  condition_0_mask;
170 	u32  condition_1_data;
171 	u32  condition_1_mask;
172 	u64  bitwise_data;
173 	u64  data_mask;
174 };
175 
176 static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
177 {
178 	struct mlx5e_macsec_umr *umr;
179 	struct device *dma_device;
180 	dma_addr_t dma_addr;
181 	int err;
182 
183 	umr = kzalloc(sizeof(*umr), GFP_KERNEL);
184 	if (!umr) {
185 		err = -ENOMEM;
186 		return err;
187 	}
188 
189 	dma_device = mlx5_core_dma_dev(mdev);
190 	dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
191 	err = dma_mapping_error(dma_device, dma_addr);
192 	if (err) {
193 		mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
194 		goto out_dma;
195 	}
196 
197 	err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
198 	if (err) {
199 		mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
200 		goto out_mkey;
201 	}
202 
203 	umr->dma_addr = dma_addr;
204 
205 	aso->umr = umr;
206 
207 	return 0;
208 
209 out_mkey:
210 	dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
211 out_dma:
212 	kfree(umr);
213 	return err;
214 }
215 
216 static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
217 {
218 	struct mlx5e_macsec_umr *umr = aso->umr;
219 
220 	mlx5_core_destroy_mkey(mdev, umr->mkey);
221 	dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
222 	kfree(umr);
223 }
224 
225 static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
226 {
227 	u8 window_sz;
228 
229 	if (!attrs->replay_protect)
230 		return 0;
231 
232 	switch (attrs->replay_window) {
233 	case 256:
234 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
235 		break;
236 	case 128:
237 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
238 		break;
239 	case 64:
240 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
241 		break;
242 	case 32:
243 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
244 		break;
245 	default:
246 		return -EINVAL;
247 	}
248 	MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
249 	MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
250 
251 	return 0;
252 }
253 
254 static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
255 				      struct mlx5_macsec_obj_attrs *attrs,
256 				      bool is_tx,
257 				      u32 *macsec_obj_id)
258 {
259 	u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
260 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
261 	void *aso_ctx;
262 	void *obj;
263 	int err;
264 
265 	obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
266 	aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
267 
268 	MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
269 	MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
270 	MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
271 	MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
272 	MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
273 
274 	/* Epn */
275 	if (attrs->epn_state.epn_enabled) {
276 		void *salt_p;
277 		int i;
278 
279 		MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
280 		MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
281 		MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
282 		MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
283 		MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
284 		salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
285 		for (i = 0; i < 3 ; i++)
286 			memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
287 	} else {
288 		MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
289 	}
290 
291 	MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
292 	if (is_tx) {
293 		MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
294 	} else {
295 		err = macsec_set_replay_protection(attrs, aso_ctx);
296 		if (err)
297 			return err;
298 	}
299 
300 	/* general object fields set */
301 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
302 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
303 
304 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
305 	if (err) {
306 		mlx5_core_err(mdev,
307 			      "MACsec offload: Failed to create MACsec object (err = %d)\n",
308 			      err);
309 		return err;
310 	}
311 
312 	*macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
313 
314 	return err;
315 }
316 
317 static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
318 {
319 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
320 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
321 
322 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
323 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
324 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
325 
326 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
327 }
328 
329 static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
330 				    struct mlx5e_macsec_sa *sa,
331 				    bool is_tx)
332 {
333 	int action =  (is_tx) ?  MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
334 				 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
335 
336 	if ((is_tx) && sa->fs_id) {
337 		/* Make sure ongoing datapath readers sees a valid SA */
338 		rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
339 		sa->fs_id = 0;
340 	}
341 
342 	if (!sa->macsec_rule)
343 		return;
344 
345 	mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action);
346 	mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
347 	sa->macsec_rule = NULL;
348 }
349 
350 static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
351 				struct mlx5e_macsec_sa *sa,
352 				bool encrypt,
353 				bool is_tx)
354 {
355 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
356 	struct mlx5e_macsec *macsec = priv->macsec;
357 	struct mlx5_macsec_rule_attrs rule_attrs;
358 	struct mlx5_core_dev *mdev = priv->mdev;
359 	struct mlx5_macsec_obj_attrs obj_attrs;
360 	union mlx5e_macsec_rule *macsec_rule;
361 	struct macsec_key *key;
362 	int err;
363 
364 	obj_attrs.next_pn = sa->next_pn;
365 	obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
366 	obj_attrs.enc_key_id = sa->enc_key_id;
367 	obj_attrs.encrypt = encrypt;
368 	obj_attrs.aso_pdn = macsec->aso.pdn;
369 	obj_attrs.epn_state = sa->epn_state;
370 
371 	key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
372 
373 	if (sa->epn_state.epn_enabled) {
374 		obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
375 					   cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
376 
377 		memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
378 	}
379 
380 	obj_attrs.replay_window = ctx->secy->replay_window;
381 	obj_attrs.replay_protect = ctx->secy->replay_protect;
382 
383 	err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
384 	if (err)
385 		return err;
386 
387 	rule_attrs.macsec_obj_id = sa->macsec_obj_id;
388 	rule_attrs.sci = sa->sci;
389 	rule_attrs.assoc_num = sa->assoc_num;
390 	rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
391 				      MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
392 
393 	macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id);
394 	if (!macsec_rule) {
395 		err = -ENOMEM;
396 		goto destroy_macsec_object;
397 	}
398 
399 	sa->macsec_rule = macsec_rule;
400 
401 	if (is_tx) {
402 		err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
403 		if (err)
404 			goto destroy_macsec_object_and_rule;
405 	}
406 
407 	return 0;
408 
409 destroy_macsec_object_and_rule:
410 	mlx5e_macsec_cleanup_sa(macsec, sa, is_tx);
411 destroy_macsec_object:
412 	mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
413 
414 	return err;
415 }
416 
417 static struct mlx5e_macsec_rx_sc *
418 mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
419 {
420 	struct mlx5e_macsec_rx_sc *iter;
421 
422 	list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
423 		if (iter->sci == sci)
424 			return iter;
425 	}
426 
427 	return NULL;
428 }
429 
430 static int macsec_rx_sa_active_update(struct macsec_context *ctx,
431 				      struct mlx5e_macsec_sa *rx_sa,
432 				      bool active)
433 {
434 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
435 	struct mlx5e_macsec *macsec = priv->macsec;
436 	int err = 0;
437 
438 	if (rx_sa->active == active)
439 		return 0;
440 
441 	rx_sa->active = active;
442 	if (!active) {
443 		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
444 		return 0;
445 	}
446 
447 	err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
448 	if (err)
449 		rx_sa->active = false;
450 
451 	return err;
452 }
453 
454 static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
455 {
456 	const struct net_device *netdev = ctx->netdev;
457 	const struct macsec_secy *secy = ctx->secy;
458 
459 	if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
460 		netdev_err(netdev,
461 			   "MACsec offload is supported only when validate_frame is in strict mode\n");
462 		return false;
463 	}
464 
465 	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
466 		netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
467 			   MACSEC_DEFAULT_ICV_LEN);
468 		return false;
469 	}
470 
471 	if (!secy->protect_frames) {
472 		netdev_err(netdev,
473 			   "MACsec offload is supported only when protect_frames is set\n");
474 		return false;
475 	}
476 
477 	if (!ctx->secy->tx_sc.encrypt) {
478 		netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
479 		return false;
480 	}
481 
482 	return true;
483 }
484 
485 static struct mlx5e_macsec_device *
486 mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
487 				       const struct macsec_context *ctx)
488 {
489 	struct mlx5e_macsec_device *iter;
490 	const struct list_head *list;
491 
492 	list = &macsec->macsec_device_list_head;
493 	list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
494 		if (iter->netdev == ctx->secy->netdev)
495 			return iter;
496 	}
497 
498 	return NULL;
499 }
500 
501 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
502 			      const pn_t *next_pn_halves)
503 {
504 	struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
505 
506 	sa->salt = key->salt;
507 	epn_state->epn_enabled = 1;
508 	epn_state->epn_msb = next_pn_halves->upper;
509 	epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
510 }
511 
512 static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
513 {
514 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
515 	const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
516 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
517 	const struct macsec_secy *secy = ctx->secy;
518 	struct mlx5e_macsec_device *macsec_device;
519 	struct mlx5_core_dev *mdev = priv->mdev;
520 	u8 assoc_num = ctx->sa.assoc_num;
521 	struct mlx5e_macsec_sa *tx_sa;
522 	struct mlx5e_macsec *macsec;
523 	int err = 0;
524 
525 	mutex_lock(&priv->macsec->lock);
526 
527 	macsec = priv->macsec;
528 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
529 	if (!macsec_device) {
530 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
531 		err = -EEXIST;
532 		goto out;
533 	}
534 
535 	if (macsec_device->tx_sa[assoc_num]) {
536 		netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
537 		err = -EEXIST;
538 		goto out;
539 	}
540 
541 	tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
542 	if (!tx_sa) {
543 		err = -ENOMEM;
544 		goto out;
545 	}
546 
547 	tx_sa->active = ctx_tx_sa->active;
548 	tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
549 	tx_sa->sci = secy->sci;
550 	tx_sa->assoc_num = assoc_num;
551 
552 	if (secy->xpn)
553 		update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves);
554 
555 	err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
556 					 MLX5_ACCEL_OBJ_MACSEC_KEY,
557 					 &tx_sa->enc_key_id);
558 	if (err)
559 		goto destroy_sa;
560 
561 	macsec_device->tx_sa[assoc_num] = tx_sa;
562 	if (!secy->operational ||
563 	    assoc_num != tx_sc->encoding_sa ||
564 	    !tx_sa->active)
565 		goto out;
566 
567 	err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
568 	if (err)
569 		goto destroy_encryption_key;
570 
571 	mutex_unlock(&macsec->lock);
572 
573 	return 0;
574 
575 destroy_encryption_key:
576 	macsec_device->tx_sa[assoc_num] = NULL;
577 	mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
578 destroy_sa:
579 	kfree(tx_sa);
580 out:
581 	mutex_unlock(&macsec->lock);
582 
583 	return err;
584 }
585 
586 static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
587 {
588 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
589 	const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
590 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
591 	struct mlx5e_macsec_device *macsec_device;
592 	u8 assoc_num = ctx->sa.assoc_num;
593 	struct mlx5e_macsec_sa *tx_sa;
594 	struct mlx5e_macsec *macsec;
595 	struct net_device *netdev;
596 	int err = 0;
597 
598 	mutex_lock(&priv->macsec->lock);
599 
600 	macsec = priv->macsec;
601 	netdev = ctx->netdev;
602 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
603 	if (!macsec_device) {
604 		netdev_err(netdev, "MACsec offload: Failed to find device context\n");
605 		err = -EINVAL;
606 		goto out;
607 	}
608 
609 	tx_sa = macsec_device->tx_sa[assoc_num];
610 	if (!tx_sa) {
611 		netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
612 		err = -EEXIST;
613 		goto out;
614 	}
615 
616 	if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
617 		netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
618 			   assoc_num);
619 		err = -EINVAL;
620 		goto out;
621 	}
622 
623 	if (tx_sa->active == ctx_tx_sa->active)
624 		goto out;
625 
626 	tx_sa->active = ctx_tx_sa->active;
627 	if (tx_sa->assoc_num != tx_sc->encoding_sa)
628 		goto out;
629 
630 	if (ctx_tx_sa->active) {
631 		err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
632 		if (err)
633 			goto out;
634 	} else {
635 		if (!tx_sa->macsec_rule) {
636 			err = -EINVAL;
637 			goto out;
638 		}
639 
640 		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
641 	}
642 out:
643 	mutex_unlock(&macsec->lock);
644 
645 	return err;
646 }
647 
648 static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
649 {
650 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
651 	struct mlx5e_macsec_device *macsec_device;
652 	u8 assoc_num = ctx->sa.assoc_num;
653 	struct mlx5e_macsec_sa *tx_sa;
654 	struct mlx5e_macsec *macsec;
655 	int err = 0;
656 
657 	mutex_lock(&priv->macsec->lock);
658 	macsec = priv->macsec;
659 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
660 	if (!macsec_device) {
661 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
662 		err = -EINVAL;
663 		goto out;
664 	}
665 
666 	tx_sa = macsec_device->tx_sa[assoc_num];
667 	if (!tx_sa) {
668 		netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
669 		err = -EEXIST;
670 		goto out;
671 	}
672 
673 	mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
674 	mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
675 	kfree_rcu(tx_sa);
676 	macsec_device->tx_sa[assoc_num] = NULL;
677 
678 out:
679 	mutex_unlock(&macsec->lock);
680 
681 	return err;
682 }
683 
684 static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci)
685 {
686 	struct mlx5e_macsec_sa *macsec_sa;
687 	u32 fs_id = 0;
688 
689 	rcu_read_lock();
690 	macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci);
691 	if (macsec_sa)
692 		fs_id = macsec_sa->fs_id;
693 	rcu_read_unlock();
694 
695 	return fs_id;
696 }
697 
698 static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
699 {
700 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
701 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
702 	const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
703 	struct mlx5e_macsec_device *macsec_device;
704 	struct mlx5e_macsec_rx_sc *rx_sc;
705 	struct list_head *rx_sc_list;
706 	struct mlx5e_macsec *macsec;
707 	int err = 0;
708 
709 	mutex_lock(&priv->macsec->lock);
710 	macsec = priv->macsec;
711 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
712 	if (!macsec_device) {
713 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
714 		err = -EINVAL;
715 		goto out;
716 	}
717 
718 	rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
719 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
720 	if (rx_sc) {
721 		netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
722 			   ctx_rx_sc->sci);
723 		err = -EEXIST;
724 		goto out;
725 	}
726 
727 	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
728 	if (!rx_sc) {
729 		err = -ENOMEM;
730 		goto out;
731 	}
732 
733 	sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
734 	if (!sc_xarray_element) {
735 		err = -ENOMEM;
736 		goto destroy_rx_sc;
737 	}
738 
739 	sc_xarray_element->rx_sc = rx_sc;
740 	err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
741 		       XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
742 	if (err) {
743 		if (err == -EBUSY)
744 			netdev_err(ctx->netdev,
745 				   "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
746 				   MLX5_MACEC_RX_FS_ID_MAX);
747 		goto destroy_sc_xarray_elemenet;
748 	}
749 
750 	rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
751 	if (!rx_sc->md_dst) {
752 		err = -ENOMEM;
753 		goto erase_xa_alloc;
754 	}
755 
756 	rx_sc->sci = ctx_rx_sc->sci;
757 	rx_sc->active = ctx_rx_sc->active;
758 	list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
759 
760 	rx_sc->sc_xarray_element = sc_xarray_element;
761 	rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
762 	mutex_unlock(&macsec->lock);
763 
764 	return 0;
765 
766 erase_xa_alloc:
767 	xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
768 destroy_sc_xarray_elemenet:
769 	kfree(sc_xarray_element);
770 destroy_rx_sc:
771 	kfree(rx_sc);
772 
773 out:
774 	mutex_unlock(&macsec->lock);
775 
776 	return err;
777 }
778 
779 static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
780 {
781 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
782 	const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
783 	struct mlx5e_macsec_device *macsec_device;
784 	struct mlx5e_macsec_rx_sc *rx_sc;
785 	struct mlx5e_macsec_sa *rx_sa;
786 	struct mlx5e_macsec *macsec;
787 	struct list_head *list;
788 	int i;
789 	int err = 0;
790 
791 	mutex_lock(&priv->macsec->lock);
792 
793 	macsec = priv->macsec;
794 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
795 	if (!macsec_device) {
796 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
797 		err = -EINVAL;
798 		goto out;
799 	}
800 
801 	list = &macsec_device->macsec_rx_sc_list_head;
802 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
803 	if (!rx_sc) {
804 		err = -EINVAL;
805 		goto out;
806 	}
807 
808 	if (rx_sc->active == ctx_rx_sc->active)
809 		goto out;
810 
811 	rx_sc->active = ctx_rx_sc->active;
812 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
813 		rx_sa = rx_sc->rx_sa[i];
814 		if (!rx_sa)
815 			continue;
816 
817 		err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active);
818 		if (err)
819 			goto out;
820 	}
821 
822 out:
823 	mutex_unlock(&macsec->lock);
824 
825 	return err;
826 }
827 
828 static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc)
829 {
830 	struct mlx5e_macsec_sa *rx_sa;
831 	int i;
832 
833 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
834 		rx_sa = rx_sc->rx_sa[i];
835 		if (!rx_sa)
836 			continue;
837 
838 		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
839 		mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
840 
841 		kfree(rx_sa);
842 		rx_sc->rx_sa[i] = NULL;
843 	}
844 
845 	/* At this point the relevant MACsec offload Rx rule already removed at
846 	 * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
847 	 * Rx related data propagating using xa_erase which uses rcu to sync,
848 	 * once fs_id is erased then this rx_sc is hidden from datapath.
849 	 */
850 	list_del_rcu(&rx_sc->rx_sc_list_element);
851 	xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
852 	metadata_dst_free(rx_sc->md_dst);
853 	kfree(rx_sc->sc_xarray_element);
854 	kfree_rcu(rx_sc);
855 }
856 
857 static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
858 {
859 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
860 	struct mlx5e_macsec_device *macsec_device;
861 	struct mlx5e_macsec_rx_sc *rx_sc;
862 	struct mlx5e_macsec *macsec;
863 	struct list_head *list;
864 	int err = 0;
865 
866 	mutex_lock(&priv->macsec->lock);
867 
868 	macsec = priv->macsec;
869 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
870 	if (!macsec_device) {
871 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
872 		err = -EINVAL;
873 		goto out;
874 	}
875 
876 	list = &macsec_device->macsec_rx_sc_list_head;
877 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
878 	if (!rx_sc) {
879 		netdev_err(ctx->netdev,
880 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
881 			   ctx->sa.rx_sa->sc->sci);
882 		err = -EINVAL;
883 		goto out;
884 	}
885 
886 	macsec_del_rxsc_ctx(macsec, rx_sc);
887 out:
888 	mutex_unlock(&macsec->lock);
889 
890 	return err;
891 }
892 
893 static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
894 {
895 	const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
896 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
897 	struct mlx5e_macsec_device *macsec_device;
898 	struct mlx5_core_dev *mdev = priv->mdev;
899 	u8 assoc_num = ctx->sa.assoc_num;
900 	struct mlx5e_macsec_rx_sc *rx_sc;
901 	sci_t sci = ctx_rx_sa->sc->sci;
902 	struct mlx5e_macsec_sa *rx_sa;
903 	struct mlx5e_macsec *macsec;
904 	struct list_head *list;
905 	int err = 0;
906 
907 	mutex_lock(&priv->macsec->lock);
908 
909 	macsec = priv->macsec;
910 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
911 	if (!macsec_device) {
912 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
913 		err = -EINVAL;
914 		goto out;
915 	}
916 
917 	list = &macsec_device->macsec_rx_sc_list_head;
918 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
919 	if (!rx_sc) {
920 		netdev_err(ctx->netdev,
921 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
922 			   ctx->sa.rx_sa->sc->sci);
923 		err = -EINVAL;
924 		goto out;
925 	}
926 
927 	if (rx_sc->rx_sa[assoc_num]) {
928 		netdev_err(ctx->netdev,
929 			   "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
930 			   sci, assoc_num);
931 		err = -EEXIST;
932 		goto out;
933 	}
934 
935 	rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
936 	if (!rx_sa) {
937 		err = -ENOMEM;
938 		goto out;
939 	}
940 
941 	rx_sa->active = ctx_rx_sa->active;
942 	rx_sa->next_pn = ctx_rx_sa->next_pn;
943 	rx_sa->sci = sci;
944 	rx_sa->assoc_num = assoc_num;
945 	rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
946 
947 	if (ctx->secy->xpn)
948 		update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves);
949 
950 	err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
951 					 MLX5_ACCEL_OBJ_MACSEC_KEY,
952 					 &rx_sa->enc_key_id);
953 	if (err)
954 		goto destroy_sa;
955 
956 	rx_sc->rx_sa[assoc_num] = rx_sa;
957 	if (!rx_sa->active)
958 		goto out;
959 
960 	//TODO - add support for both authentication and encryption flows
961 	err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
962 	if (err)
963 		goto destroy_encryption_key;
964 
965 	goto out;
966 
967 destroy_encryption_key:
968 	rx_sc->rx_sa[assoc_num] = NULL;
969 	mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
970 destroy_sa:
971 	kfree(rx_sa);
972 out:
973 	mutex_unlock(&macsec->lock);
974 
975 	return err;
976 }
977 
978 static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
979 {
980 	const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
981 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
982 	struct mlx5e_macsec_device *macsec_device;
983 	u8 assoc_num = ctx->sa.assoc_num;
984 	struct mlx5e_macsec_rx_sc *rx_sc;
985 	sci_t sci = ctx_rx_sa->sc->sci;
986 	struct mlx5e_macsec_sa *rx_sa;
987 	struct mlx5e_macsec *macsec;
988 	struct list_head *list;
989 	int err = 0;
990 
991 	mutex_lock(&priv->macsec->lock);
992 
993 	macsec = priv->macsec;
994 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
995 	if (!macsec_device) {
996 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
997 		err = -EINVAL;
998 		goto out;
999 	}
1000 
1001 	list = &macsec_device->macsec_rx_sc_list_head;
1002 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1003 	if (!rx_sc) {
1004 		netdev_err(ctx->netdev,
1005 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
1006 			   ctx->sa.rx_sa->sc->sci);
1007 		err = -EINVAL;
1008 		goto out;
1009 	}
1010 
1011 	rx_sa = rx_sc->rx_sa[assoc_num];
1012 	if (!rx_sa) {
1013 		netdev_err(ctx->netdev,
1014 			   "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1015 			   sci, assoc_num);
1016 		err = -EINVAL;
1017 		goto out;
1018 	}
1019 
1020 	if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
1021 		netdev_err(ctx->netdev,
1022 			   "MACsec offload update RX sa %d PN isn't supported\n",
1023 			   assoc_num);
1024 		err = -EINVAL;
1025 		goto out;
1026 	}
1027 
1028 	err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active);
1029 out:
1030 	mutex_unlock(&macsec->lock);
1031 
1032 	return err;
1033 }
1034 
1035 static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
1036 {
1037 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1038 	struct mlx5e_macsec_device *macsec_device;
1039 	sci_t sci = ctx->sa.rx_sa->sc->sci;
1040 	struct mlx5e_macsec_rx_sc *rx_sc;
1041 	u8 assoc_num = ctx->sa.assoc_num;
1042 	struct mlx5e_macsec_sa *rx_sa;
1043 	struct mlx5e_macsec *macsec;
1044 	struct list_head *list;
1045 	int err = 0;
1046 
1047 	mutex_lock(&priv->macsec->lock);
1048 
1049 	macsec = priv->macsec;
1050 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1051 	if (!macsec_device) {
1052 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1053 		err = -EINVAL;
1054 		goto out;
1055 	}
1056 
1057 	list = &macsec_device->macsec_rx_sc_list_head;
1058 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1059 	if (!rx_sc) {
1060 		netdev_err(ctx->netdev,
1061 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
1062 			   ctx->sa.rx_sa->sc->sci);
1063 		err = -EINVAL;
1064 		goto out;
1065 	}
1066 
1067 	rx_sa = rx_sc->rx_sa[assoc_num];
1068 	if (!rx_sa) {
1069 		netdev_err(ctx->netdev,
1070 			   "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1071 			   sci, assoc_num);
1072 		err = -EINVAL;
1073 		goto out;
1074 	}
1075 
1076 	mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
1077 	mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
1078 	kfree(rx_sa);
1079 	rx_sc->rx_sa[assoc_num] = NULL;
1080 
1081 out:
1082 	mutex_unlock(&macsec->lock);
1083 
1084 	return err;
1085 }
1086 
1087 static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
1088 {
1089 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1090 	const struct net_device *dev = ctx->secy->netdev;
1091 	const struct net_device *netdev = ctx->netdev;
1092 	struct mlx5e_macsec_device *macsec_device;
1093 	struct mlx5e_macsec *macsec;
1094 	int err = 0;
1095 
1096 	if (!mlx5e_macsec_secy_features_validate(ctx))
1097 		return -EINVAL;
1098 
1099 	mutex_lock(&priv->macsec->lock);
1100 	macsec = priv->macsec;
1101 	if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
1102 		netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
1103 		goto out;
1104 	}
1105 
1106 	if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
1107 		netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
1108 			   MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
1109 		err = -EBUSY;
1110 		goto out;
1111 	}
1112 
1113 	macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
1114 	if (!macsec_device) {
1115 		err = -ENOMEM;
1116 		goto out;
1117 	}
1118 
1119 	macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
1120 	if (!macsec_device->dev_addr) {
1121 		kfree(macsec_device);
1122 		err = -ENOMEM;
1123 		goto out;
1124 	}
1125 
1126 	macsec_device->netdev = dev;
1127 
1128 	INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
1129 	list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
1130 
1131 	++macsec->num_of_devices;
1132 out:
1133 	mutex_unlock(&macsec->lock);
1134 
1135 	return err;
1136 }
1137 
1138 static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
1139 				      struct mlx5e_macsec_device *macsec_device)
1140 {
1141 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1142 	const struct net_device *dev = ctx->secy->netdev;
1143 	struct mlx5e_macsec *macsec = priv->macsec;
1144 	struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1145 	struct mlx5e_macsec_sa *rx_sa;
1146 	struct list_head *list;
1147 	int i, err = 0;
1148 
1149 
1150 	list = &macsec_device->macsec_rx_sc_list_head;
1151 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1152 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1153 			rx_sa = rx_sc->rx_sa[i];
1154 			if (!rx_sa || !rx_sa->macsec_rule)
1155 				continue;
1156 
1157 			mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
1158 		}
1159 	}
1160 
1161 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1162 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1163 			rx_sa = rx_sc->rx_sa[i];
1164 			if (!rx_sa)
1165 				continue;
1166 
1167 			if (rx_sa->active) {
1168 				err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
1169 				if (err)
1170 					goto out;
1171 			}
1172 		}
1173 	}
1174 
1175 	memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
1176 out:
1177 	return err;
1178 }
1179 
1180 /* this function is called from 2 macsec ops functions:
1181  *  macsec_set_mac_address – MAC address was changed, therefore we need to destroy
1182  *  and create new Tx contexts(macsec object + steering).
1183  *  macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
1184  *  destroy Tx and Rx contexts(macsec object + steering)
1185  */
1186 static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
1187 {
1188 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
1189 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1190 	const struct net_device *dev = ctx->secy->netdev;
1191 	struct mlx5e_macsec_device *macsec_device;
1192 	struct mlx5e_macsec_sa *tx_sa;
1193 	struct mlx5e_macsec *macsec;
1194 	int i, err = 0;
1195 
1196 	if (!mlx5e_macsec_secy_features_validate(ctx))
1197 		return -EINVAL;
1198 
1199 	mutex_lock(&priv->macsec->lock);
1200 
1201 	macsec = priv->macsec;
1202 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1203 	if (!macsec_device) {
1204 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1205 		err = -EINVAL;
1206 		goto out;
1207 	}
1208 
1209 	/* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
1210 	if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
1211 		err = macsec_upd_secy_hw_address(ctx, macsec_device);
1212 		if (err)
1213 			goto out;
1214 	}
1215 
1216 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1217 		tx_sa = macsec_device->tx_sa[i];
1218 		if (!tx_sa)
1219 			continue;
1220 
1221 		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
1222 	}
1223 
1224 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1225 		tx_sa = macsec_device->tx_sa[i];
1226 		if (!tx_sa)
1227 			continue;
1228 
1229 		if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
1230 			err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
1231 			if (err)
1232 				goto out;
1233 		}
1234 	}
1235 
1236 out:
1237 	mutex_unlock(&macsec->lock);
1238 
1239 	return err;
1240 }
1241 
1242 static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
1243 {
1244 	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1245 	struct mlx5e_macsec_device *macsec_device;
1246 	struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1247 	struct mlx5e_macsec_sa *tx_sa;
1248 	struct mlx5e_macsec *macsec;
1249 	struct list_head *list;
1250 	int err = 0;
1251 	int i;
1252 
1253 	mutex_lock(&priv->macsec->lock);
1254 	macsec = priv->macsec;
1255 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1256 	if (!macsec_device) {
1257 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1258 		err = -EINVAL;
1259 
1260 		goto out;
1261 	}
1262 
1263 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1264 		tx_sa = macsec_device->tx_sa[i];
1265 		if (!tx_sa)
1266 			continue;
1267 
1268 		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
1269 		mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
1270 		kfree(tx_sa);
1271 		macsec_device->tx_sa[i] = NULL;
1272 	}
1273 
1274 	list = &macsec_device->macsec_rx_sc_list_head;
1275 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
1276 		macsec_del_rxsc_ctx(macsec, rx_sc);
1277 
1278 	kfree(macsec_device->dev_addr);
1279 	macsec_device->dev_addr = NULL;
1280 
1281 	list_del_rcu(&macsec_device->macsec_device_list_element);
1282 	--macsec->num_of_devices;
1283 	kfree(macsec_device);
1284 
1285 out:
1286 	mutex_unlock(&macsec->lock);
1287 
1288 	return err;
1289 }
1290 
1291 static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
1292 				     struct mlx5_macsec_obj_attrs *attrs)
1293 {
1294 	attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
1295 	attrs->epn_state.overlap = sa->epn_state.overlap;
1296 }
1297 
1298 static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
1299 					  struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1300 					  struct mlx5_aso_ctrl_param *param)
1301 {
1302 	struct mlx5e_macsec_umr *umr = macsec_aso->umr;
1303 
1304 	memset(aso_ctrl, 0, sizeof(*aso_ctrl));
1305 	aso_ctrl->va_l = cpu_to_be32(umr->dma_addr | ASO_CTRL_READ_EN);
1306 	aso_ctrl->va_h = cpu_to_be32((u64)umr->dma_addr >> 32);
1307 	aso_ctrl->l_key = cpu_to_be32(umr->mkey);
1308 
1309 	if (!param)
1310 		return;
1311 
1312 	aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
1313 	aso_ctrl->condition_1_0_operand = param->condition_1_operand |
1314 						param->condition_0_operand << 4;
1315 	aso_ctrl->condition_1_0_offset = param->condition_1_offset |
1316 						param->condition_0_offset << 4;
1317 	aso_ctrl->data_offset_condition_operand = param->data_offset |
1318 						param->condition_operand << 6;
1319 	aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
1320 	aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
1321 	aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
1322 	aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
1323 	aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
1324 	aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
1325 }
1326 
1327 static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
1328 				   u32 macsec_id)
1329 {
1330 	u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
1331 	u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
1332 	u64 modify_field_select = 0;
1333 	void *obj;
1334 	int err;
1335 
1336 	/* General object fields set */
1337 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1338 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
1339 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
1340 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1341 	if (err) {
1342 		mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
1343 			      macsec_id, err);
1344 		return err;
1345 	}
1346 
1347 	obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
1348 	modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
1349 
1350 	/* EPN */
1351 	if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
1352 	    !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
1353 		mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
1354 			      macsec_id);
1355 		return -EOPNOTSUPP;
1356 	}
1357 
1358 	obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
1359 	MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
1360 		   MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
1361 	MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
1362 	MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
1363 
1364 	/* General object fields set */
1365 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1366 
1367 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1368 }
1369 
1370 static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
1371 				  struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1372 				  struct mlx5e_macsec_aso_in *in)
1373 {
1374 	struct mlx5_aso_ctrl_param param = {};
1375 
1376 	param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
1377 	param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
1378 	param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
1379 	if (in->mode == MLX5_MACSEC_EPN) {
1380 		param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
1381 		param.bitwise_data = BIT_ULL(54);
1382 		param.data_mask = param.bitwise_data;
1383 	}
1384 	macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, &param);
1385 }
1386 
1387 static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1388 				    struct mlx5e_macsec_aso_in *in)
1389 {
1390 	struct mlx5e_macsec_aso *aso;
1391 	struct mlx5_aso_wqe *aso_wqe;
1392 	struct mlx5_aso *maso;
1393 	int err;
1394 
1395 	aso = &macsec->aso;
1396 	maso = aso->maso;
1397 
1398 	mutex_lock(&aso->aso_lock);
1399 	aso_wqe = mlx5_aso_get_wqe(maso);
1400 	mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1401 			   MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1402 	macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
1403 	mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1404 	err = mlx5_aso_poll_cq(maso, false);
1405 	mutex_unlock(&aso->aso_lock);
1406 
1407 	return err;
1408 }
1409 
1410 static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1411 			    struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
1412 {
1413 	struct mlx5e_macsec_aso *aso;
1414 	struct mlx5_aso_wqe *aso_wqe;
1415 	struct mlx5_aso *maso;
1416 	int err;
1417 
1418 	aso = &macsec->aso;
1419 	maso = aso->maso;
1420 
1421 	mutex_lock(&aso->aso_lock);
1422 
1423 	aso_wqe = mlx5_aso_get_wqe(maso);
1424 	mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1425 			   MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1426 	macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
1427 
1428 	mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1429 	err = mlx5_aso_poll_cq(maso, false);
1430 	if (err)
1431 		goto err_out;
1432 
1433 	if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
1434 		out->event_arm |= MLX5E_ASO_EPN_ARM;
1435 
1436 	out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
1437 
1438 err_out:
1439 	mutex_unlock(&aso->aso_lock);
1440 	return err;
1441 }
1442 
1443 static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1444 							    const u32 obj_id)
1445 {
1446 	const struct list_head *device_list;
1447 	struct mlx5e_macsec_sa *macsec_sa;
1448 	struct mlx5e_macsec_device *iter;
1449 	int i;
1450 
1451 	device_list = &macsec->macsec_device_list_head;
1452 
1453 	list_for_each_entry(iter, device_list, macsec_device_list_element) {
1454 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1455 			macsec_sa = iter->tx_sa[i];
1456 			if (!macsec_sa || !macsec_sa->active)
1457 				continue;
1458 			if (macsec_sa->macsec_obj_id == obj_id)
1459 				return macsec_sa;
1460 		}
1461 	}
1462 
1463 	return NULL;
1464 }
1465 
1466 static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1467 							    const u32 obj_id)
1468 {
1469 	const struct list_head *device_list, *sc_list;
1470 	struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
1471 	struct mlx5e_macsec_sa *macsec_sa;
1472 	struct mlx5e_macsec_device *iter;
1473 	int i;
1474 
1475 	device_list = &macsec->macsec_device_list_head;
1476 
1477 	list_for_each_entry(iter, device_list, macsec_device_list_element) {
1478 		sc_list = &iter->macsec_rx_sc_list_head;
1479 		list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
1480 			for (i = 0; i < MACSEC_NUM_AN; ++i) {
1481 				macsec_sa = mlx5e_rx_sc->rx_sa[i];
1482 				if (!macsec_sa || !macsec_sa->active)
1483 					continue;
1484 				if (macsec_sa->macsec_obj_id == obj_id)
1485 					return macsec_sa;
1486 			}
1487 		}
1488 	}
1489 
1490 	return NULL;
1491 }
1492 
1493 static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
1494 			      struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
1495 {
1496 	struct mlx5_macsec_obj_attrs attrs = {};
1497 	struct mlx5e_macsec_aso_in in = {};
1498 
1499 	/* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
1500 	 * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
1501 	 * esn_overlap to OLD (1).
1502 	 * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
1503 	 * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
1504 	 * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
1505 	 */
1506 
1507 	if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
1508 		sa->epn_state.epn_msb++;
1509 		sa->epn_state.overlap = 0;
1510 	} else {
1511 		sa->epn_state.overlap = 1;
1512 	}
1513 
1514 	macsec_build_accel_attrs(sa, &attrs);
1515 	mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
1516 
1517 	/* Re-set EPN arm event */
1518 	in.obj_id = obj_id;
1519 	in.mode = MLX5_MACSEC_EPN;
1520 	macsec_aso_set_arm_event(mdev, macsec, &in);
1521 }
1522 
1523 static void macsec_async_event(struct work_struct *work)
1524 {
1525 	struct mlx5e_macsec_async_work *async_work;
1526 	struct mlx5e_macsec_aso_out out = {};
1527 	struct mlx5e_macsec_aso_in in = {};
1528 	struct mlx5e_macsec_sa *macsec_sa;
1529 	struct mlx5e_macsec *macsec;
1530 	struct mlx5_core_dev *mdev;
1531 	u32 obj_id;
1532 
1533 	async_work = container_of(work, struct mlx5e_macsec_async_work, work);
1534 	macsec = async_work->macsec;
1535 	mutex_lock(&macsec->lock);
1536 
1537 	mdev = async_work->mdev;
1538 	obj_id = async_work->obj_id;
1539 	macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
1540 	if (!macsec_sa) {
1541 		macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
1542 		if (!macsec_sa) {
1543 			mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
1544 			goto out_async_work;
1545 		}
1546 	}
1547 
1548 	/* Query MACsec ASO context */
1549 	in.obj_id = obj_id;
1550 	macsec_aso_query(mdev, macsec, &in, &out);
1551 
1552 	/* EPN case */
1553 	if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
1554 		macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
1555 
1556 out_async_work:
1557 	kfree(async_work);
1558 	mutex_unlock(&macsec->lock);
1559 }
1560 
1561 static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
1562 {
1563 	struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
1564 	struct mlx5e_macsec_async_work *async_work;
1565 	struct mlx5_eqe_obj_change *obj_change;
1566 	struct mlx5_eqe *eqe = data;
1567 	u16 obj_type;
1568 	u32 obj_id;
1569 
1570 	if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
1571 		return NOTIFY_DONE;
1572 
1573 	obj_change = &eqe->data.obj_change;
1574 	obj_type = be16_to_cpu(obj_change->obj_type);
1575 	obj_id = be32_to_cpu(obj_change->obj_id);
1576 
1577 	if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
1578 		return NOTIFY_DONE;
1579 
1580 	async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
1581 	if (!async_work)
1582 		return NOTIFY_DONE;
1583 
1584 	async_work->macsec = macsec;
1585 	async_work->mdev = macsec->mdev;
1586 	async_work->obj_id = obj_id;
1587 
1588 	INIT_WORK(&async_work->work, macsec_async_event);
1589 
1590 	WARN_ON(!queue_work(macsec->wq, &async_work->work));
1591 
1592 	return NOTIFY_OK;
1593 }
1594 
1595 static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1596 {
1597 	struct mlx5_aso *maso;
1598 	int err;
1599 
1600 	err = mlx5_core_alloc_pd(mdev, &aso->pdn);
1601 	if (err) {
1602 		mlx5_core_err(mdev,
1603 			      "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
1604 			      err);
1605 		return err;
1606 	}
1607 
1608 	maso = mlx5_aso_create(mdev, aso->pdn);
1609 	if (IS_ERR(maso)) {
1610 		err = PTR_ERR(maso);
1611 		goto err_aso;
1612 	}
1613 
1614 	err = mlx5e_macsec_aso_reg_mr(mdev, aso);
1615 	if (err)
1616 		goto err_aso_reg;
1617 
1618 	mutex_init(&aso->aso_lock);
1619 
1620 	aso->maso = maso;
1621 
1622 	return 0;
1623 
1624 err_aso_reg:
1625 	mlx5_aso_destroy(maso);
1626 err_aso:
1627 	mlx5_core_dealloc_pd(mdev, aso->pdn);
1628 	return err;
1629 }
1630 
1631 static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1632 {
1633 	if (!aso)
1634 		return;
1635 
1636 	mlx5e_macsec_aso_dereg_mr(mdev, aso);
1637 
1638 	mlx5_aso_destroy(aso->maso);
1639 
1640 	mlx5_core_dealloc_pd(mdev, aso->pdn);
1641 }
1642 
1643 bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
1644 {
1645 	if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
1646 	    MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
1647 		return false;
1648 
1649 	if (!MLX5_CAP_GEN(mdev, log_max_dek))
1650 		return false;
1651 
1652 	if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
1653 		return false;
1654 
1655 	if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
1656 	    !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
1657 		return false;
1658 
1659 	if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
1660 	    !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
1661 		return false;
1662 
1663 	if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
1664 	    !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
1665 		return false;
1666 
1667 	if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
1668 	    !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
1669 		return false;
1670 
1671 	return true;
1672 }
1673 
1674 void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats)
1675 {
1676 	mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats);
1677 }
1678 
1679 struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec)
1680 {
1681 	if (!macsec)
1682 		return NULL;
1683 
1684 	return &macsec->stats;
1685 }
1686 
1687 static const struct macsec_ops macsec_offload_ops = {
1688 	.mdo_add_txsa = mlx5e_macsec_add_txsa,
1689 	.mdo_upd_txsa = mlx5e_macsec_upd_txsa,
1690 	.mdo_del_txsa = mlx5e_macsec_del_txsa,
1691 	.mdo_add_rxsc = mlx5e_macsec_add_rxsc,
1692 	.mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
1693 	.mdo_del_rxsc = mlx5e_macsec_del_rxsc,
1694 	.mdo_add_rxsa = mlx5e_macsec_add_rxsa,
1695 	.mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
1696 	.mdo_del_rxsa = mlx5e_macsec_del_rxsa,
1697 	.mdo_add_secy = mlx5e_macsec_add_secy,
1698 	.mdo_upd_secy = mlx5e_macsec_upd_secy,
1699 	.mdo_del_secy = mlx5e_macsec_del_secy,
1700 };
1701 
1702 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
1703 {
1704 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
1705 	u32 fs_id;
1706 
1707 	fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
1708 	if (!fs_id)
1709 		goto err_out;
1710 
1711 	return true;
1712 
1713 err_out:
1714 	dev_kfree_skb_any(skb);
1715 	return false;
1716 }
1717 
1718 void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
1719 				struct sk_buff *skb,
1720 				struct mlx5_wqe_eth_seg *eseg)
1721 {
1722 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
1723 	u32 fs_id;
1724 
1725 	fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
1726 	if (!fs_id)
1727 		return;
1728 
1729 	eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
1730 }
1731 
1732 void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
1733 					struct sk_buff *skb,
1734 					struct mlx5_cqe64 *cqe)
1735 {
1736 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
1737 	u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
1738 	struct mlx5e_priv *priv = netdev_priv(netdev);
1739 	struct mlx5e_macsec_rx_sc *rx_sc;
1740 	struct mlx5e_macsec *macsec;
1741 	u32  fs_id;
1742 
1743 	macsec = priv->macsec;
1744 	if (!macsec)
1745 		return;
1746 
1747 	fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
1748 
1749 	rcu_read_lock();
1750 	sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
1751 	rx_sc = sc_xarray_element->rx_sc;
1752 	if (rx_sc) {
1753 		dst_hold(&rx_sc->md_dst->dst);
1754 		skb_dst_set(skb, &rx_sc->md_dst->dst);
1755 	}
1756 
1757 	rcu_read_unlock();
1758 }
1759 
1760 void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
1761 {
1762 	struct net_device *netdev = priv->netdev;
1763 
1764 	if (!mlx5e_is_macsec_device(priv->mdev))
1765 		return;
1766 
1767 	/* Enable MACsec */
1768 	mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
1769 	netdev->macsec_ops = &macsec_offload_ops;
1770 	netdev->features |= NETIF_F_HW_MACSEC;
1771 	netif_keep_dst(netdev);
1772 }
1773 
1774 int mlx5e_macsec_init(struct mlx5e_priv *priv)
1775 {
1776 	struct mlx5_core_dev *mdev = priv->mdev;
1777 	struct mlx5e_macsec *macsec = NULL;
1778 	struct mlx5e_macsec_fs *macsec_fs;
1779 	int err;
1780 
1781 	if (!mlx5e_is_macsec_device(priv->mdev)) {
1782 		mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
1783 		return 0;
1784 	}
1785 
1786 	macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
1787 	if (!macsec)
1788 		return -ENOMEM;
1789 
1790 	INIT_LIST_HEAD(&macsec->macsec_device_list_head);
1791 	mutex_init(&macsec->lock);
1792 
1793 	err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
1794 	if (err) {
1795 		mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
1796 			      err);
1797 		goto err_hash;
1798 	}
1799 
1800 	err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
1801 	if (err) {
1802 		mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
1803 		goto err_aso;
1804 	}
1805 
1806 	macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
1807 	if (!macsec->wq) {
1808 		err = -ENOMEM;
1809 		goto err_wq;
1810 	}
1811 
1812 	xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
1813 
1814 	priv->macsec = macsec;
1815 
1816 	macsec->mdev = mdev;
1817 
1818 	macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev);
1819 	if (!macsec_fs) {
1820 		err = -ENOMEM;
1821 		goto err_out;
1822 	}
1823 
1824 	macsec->macsec_fs = macsec_fs;
1825 
1826 	macsec->nb.notifier_call = macsec_obj_change_event;
1827 	mlx5_notifier_register(mdev, &macsec->nb);
1828 
1829 	mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
1830 
1831 	return 0;
1832 
1833 err_out:
1834 	destroy_workqueue(macsec->wq);
1835 err_wq:
1836 	mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
1837 err_aso:
1838 	rhashtable_destroy(&macsec->sci_hash);
1839 err_hash:
1840 	kfree(macsec);
1841 	priv->macsec = NULL;
1842 	return err;
1843 }
1844 
1845 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
1846 {
1847 	struct mlx5e_macsec *macsec = priv->macsec;
1848 	struct mlx5_core_dev *mdev = priv->mdev;
1849 
1850 	if (!macsec)
1851 		return;
1852 
1853 	mlx5_notifier_unregister(mdev, &macsec->nb);
1854 	mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
1855 	destroy_workqueue(macsec->wq);
1856 	mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
1857 	rhashtable_destroy(&macsec->sci_hash);
1858 	mutex_destroy(&macsec->lock);
1859 	kfree(macsec);
1860 }
1861