1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/xarray.h>
7 #include <linux/if_vlan.h>
8 
9 #include "en.h"
10 #include "lib/aso.h"
11 #include "lib/crypto.h"
12 #include "en_accel/macsec.h"
13 #include "en_accel/macsec_fs.h"
14 
15 #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
16 #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
17 
18 enum mlx5_macsec_aso_event_arm {
19 	MLX5E_ASO_EPN_ARM = BIT(0),
20 };
21 
22 enum {
23 	MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
24 };
25 
26 struct mlx5e_macsec_handle {
27 	struct mlx5e_macsec *macsec;
28 	u32 obj_id;
29 	u8 idx;
30 };
31 
32 enum {
33 	MLX5_MACSEC_EPN,
34 };
35 
36 struct mlx5e_macsec_aso_out {
37 	u8 event_arm;
38 	u32 mode_param;
39 };
40 
41 struct mlx5e_macsec_aso_in {
42 	u8 mode;
43 	u32 obj_id;
44 };
45 
46 struct mlx5e_macsec_epn_state {
47 	u32 epn_msb;
48 	u8 epn_enabled;
49 	u8 overlap;
50 };
51 
52 struct mlx5e_macsec_async_work {
53 	struct mlx5e_macsec *macsec;
54 	struct mlx5_core_dev *mdev;
55 	struct work_struct work;
56 	u32 obj_id;
57 };
58 
59 struct mlx5e_macsec_sa {
60 	bool active;
61 	u8  assoc_num;
62 	u32 macsec_obj_id;
63 	u32 enc_key_id;
64 	u32 next_pn;
65 	sci_t sci;
66 	ssci_t ssci;
67 	salt_t salt;
68 
69 	struct rhash_head hash;
70 	u32 fs_id;
71 	union mlx5e_macsec_rule *macsec_rule;
72 	struct rcu_head rcu_head;
73 	struct mlx5e_macsec_epn_state epn_state;
74 };
75 
76 struct mlx5e_macsec_rx_sc;
77 struct mlx5e_macsec_rx_sc_xarray_element {
78 	u32 fs_id;
79 	struct mlx5e_macsec_rx_sc *rx_sc;
80 };
81 
82 struct mlx5e_macsec_rx_sc {
83 	bool active;
84 	sci_t sci;
85 	struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
86 	struct list_head rx_sc_list_element;
87 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
88 	struct metadata_dst *md_dst;
89 	struct rcu_head rcu_head;
90 };
91 
92 struct mlx5e_macsec_umr {
93 	u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
94 	dma_addr_t dma_addr;
95 	u32 mkey;
96 };
97 
98 struct mlx5e_macsec_aso {
99 	/* ASO */
100 	struct mlx5_aso *maso;
101 	/* Protects macsec ASO */
102 	struct mutex aso_lock;
103 	/* UMR */
104 	struct mlx5e_macsec_umr *umr;
105 
106 	u32 pdn;
107 };
108 
109 static const struct rhashtable_params rhash_sci = {
110 	.key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
111 	.key_offset = offsetof(struct mlx5e_macsec_sa, sci),
112 	.head_offset = offsetof(struct mlx5e_macsec_sa, hash),
113 	.automatic_shrinking = true,
114 	.min_size = 1,
115 };
116 
117 struct mlx5e_macsec_device {
118 	const struct net_device *netdev;
119 	struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
120 	struct list_head macsec_rx_sc_list_head;
121 	unsigned char *dev_addr;
122 	struct list_head macsec_device_list_element;
123 };
124 
125 struct mlx5e_macsec {
126 	struct list_head macsec_device_list_head;
127 	int num_of_devices;
128 	struct mlx5e_macsec_fs *macsec_fs;
129 	struct mutex lock; /* Protects mlx5e_macsec internal contexts */
130 
131 	/* Tx sci -> fs id mapping handling */
132 	struct rhashtable sci_hash;      /* sci -> mlx5e_macsec_sa */
133 
134 	/* Rx fs_id -> rx_sc mapping */
135 	struct xarray sc_xarray;
136 
137 	struct mlx5_core_dev *mdev;
138 
139 	/* Stats manage */
140 	struct mlx5e_macsec_stats stats;
141 
142 	/* ASO */
143 	struct mlx5e_macsec_aso aso;
144 
145 	struct notifier_block nb;
146 	struct workqueue_struct *wq;
147 };
148 
149 struct mlx5_macsec_obj_attrs {
150 	u32 aso_pdn;
151 	u32 next_pn;
152 	__be64 sci;
153 	u32 enc_key_id;
154 	bool encrypt;
155 	struct mlx5e_macsec_epn_state epn_state;
156 	salt_t salt;
157 	__be32 ssci;
158 	bool replay_protect;
159 	u32 replay_window;
160 };
161 
162 struct mlx5_aso_ctrl_param {
163 	u8   data_mask_mode;
164 	u8   condition_0_operand;
165 	u8   condition_1_operand;
166 	u8   condition_0_offset;
167 	u8   condition_1_offset;
168 	u8   data_offset;
169 	u8   condition_operand;
170 	u32  condition_0_data;
171 	u32  condition_0_mask;
172 	u32  condition_1_data;
173 	u32  condition_1_mask;
174 	u64  bitwise_data;
175 	u64  data_mask;
176 };
177 
178 static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
179 {
180 	struct mlx5e_macsec_umr *umr;
181 	struct device *dma_device;
182 	dma_addr_t dma_addr;
183 	int err;
184 
185 	umr = kzalloc(sizeof(*umr), GFP_KERNEL);
186 	if (!umr) {
187 		err = -ENOMEM;
188 		return err;
189 	}
190 
191 	dma_device = mlx5_core_dma_dev(mdev);
192 	dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
193 	err = dma_mapping_error(dma_device, dma_addr);
194 	if (err) {
195 		mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
196 		goto out_dma;
197 	}
198 
199 	err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
200 	if (err) {
201 		mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
202 		goto out_mkey;
203 	}
204 
205 	umr->dma_addr = dma_addr;
206 
207 	aso->umr = umr;
208 
209 	return 0;
210 
211 out_mkey:
212 	dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
213 out_dma:
214 	kfree(umr);
215 	return err;
216 }
217 
218 static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
219 {
220 	struct mlx5e_macsec_umr *umr = aso->umr;
221 
222 	mlx5_core_destroy_mkey(mdev, umr->mkey);
223 	dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
224 	kfree(umr);
225 }
226 
227 static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
228 {
229 	u8 window_sz;
230 
231 	if (!attrs->replay_protect)
232 		return 0;
233 
234 	switch (attrs->replay_window) {
235 	case 256:
236 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
237 		break;
238 	case 128:
239 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
240 		break;
241 	case 64:
242 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
243 		break;
244 	case 32:
245 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
246 		break;
247 	default:
248 		return -EINVAL;
249 	}
250 	MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
251 	MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
252 
253 	return 0;
254 }
255 
256 static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
257 				      struct mlx5_macsec_obj_attrs *attrs,
258 				      bool is_tx,
259 				      u32 *macsec_obj_id)
260 {
261 	u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
262 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
263 	void *aso_ctx;
264 	void *obj;
265 	int err;
266 
267 	obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
268 	aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
269 
270 	MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
271 	MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
272 	MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
273 	MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
274 	MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
275 
276 	/* Epn */
277 	if (attrs->epn_state.epn_enabled) {
278 		void *salt_p;
279 		int i;
280 
281 		MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
282 		MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
283 		MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
284 		MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
285 		MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
286 		salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
287 		for (i = 0; i < 3 ; i++)
288 			memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
289 	} else {
290 		MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
291 	}
292 
293 	MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
294 	if (is_tx) {
295 		MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
296 	} else {
297 		err = macsec_set_replay_protection(attrs, aso_ctx);
298 		if (err)
299 			return err;
300 	}
301 
302 	/* general object fields set */
303 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
304 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
305 
306 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
307 	if (err) {
308 		mlx5_core_err(mdev,
309 			      "MACsec offload: Failed to create MACsec object (err = %d)\n",
310 			      err);
311 		return err;
312 	}
313 
314 	*macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
315 
316 	return err;
317 }
318 
319 static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
320 {
321 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
322 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
323 
324 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
325 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
326 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
327 
328 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
329 }
330 
331 static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
332 				    struct mlx5e_macsec_sa *sa,
333 				    bool is_tx)
334 {
335 	int action =  (is_tx) ?  MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
336 				 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
337 
338 	if ((is_tx) && sa->fs_id) {
339 		/* Make sure ongoing datapath readers sees a valid SA */
340 		rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
341 		sa->fs_id = 0;
342 	}
343 
344 	if (!sa->macsec_rule)
345 		return;
346 
347 	mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action);
348 	mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
349 	sa->macsec_rule = NULL;
350 }
351 
352 static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
353 				struct mlx5e_macsec_sa *sa,
354 				bool encrypt,
355 				bool is_tx)
356 {
357 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
358 	struct mlx5e_macsec *macsec = priv->macsec;
359 	struct mlx5_macsec_rule_attrs rule_attrs;
360 	struct mlx5_core_dev *mdev = priv->mdev;
361 	struct mlx5_macsec_obj_attrs obj_attrs;
362 	union mlx5e_macsec_rule *macsec_rule;
363 	int err;
364 
365 	obj_attrs.next_pn = sa->next_pn;
366 	obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
367 	obj_attrs.enc_key_id = sa->enc_key_id;
368 	obj_attrs.encrypt = encrypt;
369 	obj_attrs.aso_pdn = macsec->aso.pdn;
370 	obj_attrs.epn_state = sa->epn_state;
371 
372 	if (sa->epn_state.epn_enabled) {
373 		obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
374 		memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
375 	}
376 
377 	obj_attrs.replay_window = ctx->secy->replay_window;
378 	obj_attrs.replay_protect = ctx->secy->replay_protect;
379 
380 	err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
381 	if (err)
382 		return err;
383 
384 	rule_attrs.macsec_obj_id = sa->macsec_obj_id;
385 	rule_attrs.sci = sa->sci;
386 	rule_attrs.assoc_num = sa->assoc_num;
387 	rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
388 				      MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
389 
390 	macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id);
391 	if (!macsec_rule) {
392 		err = -ENOMEM;
393 		goto destroy_macsec_object;
394 	}
395 
396 	sa->macsec_rule = macsec_rule;
397 
398 	if (is_tx) {
399 		err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
400 		if (err)
401 			goto destroy_macsec_object_and_rule;
402 	}
403 
404 	return 0;
405 
406 destroy_macsec_object_and_rule:
407 	mlx5e_macsec_cleanup_sa(macsec, sa, is_tx);
408 destroy_macsec_object:
409 	mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
410 
411 	return err;
412 }
413 
414 static struct mlx5e_macsec_rx_sc *
415 mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
416 {
417 	struct mlx5e_macsec_rx_sc *iter;
418 
419 	list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
420 		if (iter->sci == sci)
421 			return iter;
422 	}
423 
424 	return NULL;
425 }
426 
427 static int macsec_rx_sa_active_update(struct macsec_context *ctx,
428 				      struct mlx5e_macsec_sa *rx_sa,
429 				      bool active)
430 {
431 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
432 	struct mlx5e_macsec *macsec = priv->macsec;
433 	int err = 0;
434 
435 	if (rx_sa->active == active)
436 		return 0;
437 
438 	rx_sa->active = active;
439 	if (!active) {
440 		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
441 		return 0;
442 	}
443 
444 	err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
445 	if (err)
446 		rx_sa->active = false;
447 
448 	return err;
449 }
450 
451 static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
452 {
453 	const struct net_device *netdev = ctx->netdev;
454 	const struct macsec_secy *secy = ctx->secy;
455 
456 	if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
457 		netdev_err(netdev,
458 			   "MACsec offload is supported only when validate_frame is in strict mode\n");
459 		return false;
460 	}
461 
462 	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
463 		netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
464 			   MACSEC_DEFAULT_ICV_LEN);
465 		return false;
466 	}
467 
468 	if (!secy->protect_frames) {
469 		netdev_err(netdev,
470 			   "MACsec offload is supported only when protect_frames is set\n");
471 		return false;
472 	}
473 
474 	if (!ctx->secy->tx_sc.encrypt) {
475 		netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
476 		return false;
477 	}
478 
479 	return true;
480 }
481 
482 static struct mlx5e_macsec_device *
483 mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
484 				       const struct macsec_context *ctx)
485 {
486 	struct mlx5e_macsec_device *iter;
487 	const struct list_head *list;
488 
489 	list = &macsec->macsec_device_list_head;
490 	list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
491 		if (iter->netdev == ctx->secy->netdev)
492 			return iter;
493 	}
494 
495 	return NULL;
496 }
497 
498 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
499 			      const pn_t *next_pn_halves, ssci_t ssci)
500 {
501 	struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
502 
503 	sa->ssci = ssci;
504 	sa->salt = key->salt;
505 	epn_state->epn_enabled = 1;
506 	epn_state->epn_msb = next_pn_halves->upper;
507 	epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
508 }
509 
510 static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
511 {
512 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
513 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
514 	const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
515 	const struct macsec_secy *secy = ctx->secy;
516 	struct mlx5e_macsec_device *macsec_device;
517 	struct mlx5_core_dev *mdev = priv->mdev;
518 	u8 assoc_num = ctx->sa.assoc_num;
519 	struct mlx5e_macsec_sa *tx_sa;
520 	struct mlx5e_macsec *macsec;
521 	int err = 0;
522 
523 	mutex_lock(&priv->macsec->lock);
524 
525 	macsec = priv->macsec;
526 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
527 	if (!macsec_device) {
528 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
529 		err = -EEXIST;
530 		goto out;
531 	}
532 
533 	if (macsec_device->tx_sa[assoc_num]) {
534 		netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
535 		err = -EEXIST;
536 		goto out;
537 	}
538 
539 	tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
540 	if (!tx_sa) {
541 		err = -ENOMEM;
542 		goto out;
543 	}
544 
545 	tx_sa->active = ctx_tx_sa->active;
546 	tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
547 	tx_sa->sci = secy->sci;
548 	tx_sa->assoc_num = assoc_num;
549 
550 	if (secy->xpn)
551 		update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
552 				  ctx_tx_sa->ssci);
553 
554 	err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
555 					 MLX5_ACCEL_OBJ_MACSEC_KEY,
556 					 &tx_sa->enc_key_id);
557 	if (err)
558 		goto destroy_sa;
559 
560 	macsec_device->tx_sa[assoc_num] = tx_sa;
561 	if (!secy->operational ||
562 	    assoc_num != tx_sc->encoding_sa ||
563 	    !tx_sa->active)
564 		goto out;
565 
566 	err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
567 	if (err)
568 		goto destroy_encryption_key;
569 
570 	mutex_unlock(&macsec->lock);
571 
572 	return 0;
573 
574 destroy_encryption_key:
575 	macsec_device->tx_sa[assoc_num] = NULL;
576 	mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
577 destroy_sa:
578 	kfree(tx_sa);
579 out:
580 	mutex_unlock(&macsec->lock);
581 
582 	return err;
583 }
584 
585 static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
586 {
587 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
588 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
589 	const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
590 	struct mlx5e_macsec_device *macsec_device;
591 	u8 assoc_num = ctx->sa.assoc_num;
592 	struct mlx5e_macsec_sa *tx_sa;
593 	struct mlx5e_macsec *macsec;
594 	struct net_device *netdev;
595 	int err = 0;
596 
597 	mutex_lock(&priv->macsec->lock);
598 
599 	macsec = priv->macsec;
600 	netdev = ctx->netdev;
601 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
602 	if (!macsec_device) {
603 		netdev_err(netdev, "MACsec offload: Failed to find device context\n");
604 		err = -EINVAL;
605 		goto out;
606 	}
607 
608 	tx_sa = macsec_device->tx_sa[assoc_num];
609 	if (!tx_sa) {
610 		netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
611 		err = -EEXIST;
612 		goto out;
613 	}
614 
615 	if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
616 		netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
617 			   assoc_num);
618 		err = -EINVAL;
619 		goto out;
620 	}
621 
622 	if (tx_sa->active == ctx_tx_sa->active)
623 		goto out;
624 
625 	tx_sa->active = ctx_tx_sa->active;
626 	if (tx_sa->assoc_num != tx_sc->encoding_sa)
627 		goto out;
628 
629 	if (ctx_tx_sa->active) {
630 		err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
631 		if (err)
632 			goto out;
633 	} else {
634 		if (!tx_sa->macsec_rule) {
635 			err = -EINVAL;
636 			goto out;
637 		}
638 
639 		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
640 	}
641 out:
642 	mutex_unlock(&macsec->lock);
643 
644 	return err;
645 }
646 
647 static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
648 {
649 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
650 	struct mlx5e_macsec_device *macsec_device;
651 	u8 assoc_num = ctx->sa.assoc_num;
652 	struct mlx5e_macsec_sa *tx_sa;
653 	struct mlx5e_macsec *macsec;
654 	int err = 0;
655 
656 	mutex_lock(&priv->macsec->lock);
657 	macsec = priv->macsec;
658 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
659 	if (!macsec_device) {
660 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
661 		err = -EINVAL;
662 		goto out;
663 	}
664 
665 	tx_sa = macsec_device->tx_sa[assoc_num];
666 	if (!tx_sa) {
667 		netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
668 		err = -EEXIST;
669 		goto out;
670 	}
671 
672 	mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
673 	mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
674 	kfree_rcu_mightsleep(tx_sa);
675 	macsec_device->tx_sa[assoc_num] = NULL;
676 
677 out:
678 	mutex_unlock(&macsec->lock);
679 
680 	return err;
681 }
682 
683 static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci)
684 {
685 	struct mlx5e_macsec_sa *macsec_sa;
686 	u32 fs_id = 0;
687 
688 	rcu_read_lock();
689 	macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci);
690 	if (macsec_sa)
691 		fs_id = macsec_sa->fs_id;
692 	rcu_read_unlock();
693 
694 	return fs_id;
695 }
696 
697 static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
698 {
699 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
700 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
701 	const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
702 	struct mlx5e_macsec_device *macsec_device;
703 	struct mlx5e_macsec_rx_sc *rx_sc;
704 	struct list_head *rx_sc_list;
705 	struct mlx5e_macsec *macsec;
706 	int err = 0;
707 
708 	mutex_lock(&priv->macsec->lock);
709 	macsec = priv->macsec;
710 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
711 	if (!macsec_device) {
712 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
713 		err = -EINVAL;
714 		goto out;
715 	}
716 
717 	rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
718 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
719 	if (rx_sc) {
720 		netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
721 			   ctx_rx_sc->sci);
722 		err = -EEXIST;
723 		goto out;
724 	}
725 
726 	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
727 	if (!rx_sc) {
728 		err = -ENOMEM;
729 		goto out;
730 	}
731 
732 	sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
733 	if (!sc_xarray_element) {
734 		err = -ENOMEM;
735 		goto destroy_rx_sc;
736 	}
737 
738 	sc_xarray_element->rx_sc = rx_sc;
739 	err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
740 		       XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
741 	if (err) {
742 		if (err == -EBUSY)
743 			netdev_err(ctx->netdev,
744 				   "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
745 				   MLX5_MACEC_RX_FS_ID_MAX);
746 		goto destroy_sc_xarray_elemenet;
747 	}
748 
749 	rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
750 	if (!rx_sc->md_dst) {
751 		err = -ENOMEM;
752 		goto erase_xa_alloc;
753 	}
754 
755 	rx_sc->sci = ctx_rx_sc->sci;
756 	rx_sc->active = ctx_rx_sc->active;
757 	list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
758 
759 	rx_sc->sc_xarray_element = sc_xarray_element;
760 	rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
761 	mutex_unlock(&macsec->lock);
762 
763 	return 0;
764 
765 erase_xa_alloc:
766 	xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
767 destroy_sc_xarray_elemenet:
768 	kfree(sc_xarray_element);
769 destroy_rx_sc:
770 	kfree(rx_sc);
771 
772 out:
773 	mutex_unlock(&macsec->lock);
774 
775 	return err;
776 }
777 
778 static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
779 {
780 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
781 	const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
782 	struct mlx5e_macsec_device *macsec_device;
783 	struct mlx5e_macsec_rx_sc *rx_sc;
784 	struct mlx5e_macsec_sa *rx_sa;
785 	struct mlx5e_macsec *macsec;
786 	struct list_head *list;
787 	int i;
788 	int err = 0;
789 
790 	mutex_lock(&priv->macsec->lock);
791 
792 	macsec = priv->macsec;
793 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
794 	if (!macsec_device) {
795 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
796 		err = -EINVAL;
797 		goto out;
798 	}
799 
800 	list = &macsec_device->macsec_rx_sc_list_head;
801 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
802 	if (!rx_sc) {
803 		err = -EINVAL;
804 		goto out;
805 	}
806 
807 	if (rx_sc->active == ctx_rx_sc->active)
808 		goto out;
809 
810 	rx_sc->active = ctx_rx_sc->active;
811 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
812 		rx_sa = rx_sc->rx_sa[i];
813 		if (!rx_sa)
814 			continue;
815 
816 		err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active);
817 		if (err)
818 			goto out;
819 	}
820 
821 out:
822 	mutex_unlock(&macsec->lock);
823 
824 	return err;
825 }
826 
827 static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc)
828 {
829 	struct mlx5e_macsec_sa *rx_sa;
830 	int i;
831 
832 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
833 		rx_sa = rx_sc->rx_sa[i];
834 		if (!rx_sa)
835 			continue;
836 
837 		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
838 		mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
839 
840 		kfree(rx_sa);
841 		rx_sc->rx_sa[i] = NULL;
842 	}
843 
844 	/* At this point the relevant MACsec offload Rx rule already removed at
845 	 * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
846 	 * Rx related data propagating using xa_erase which uses rcu to sync,
847 	 * once fs_id is erased then this rx_sc is hidden from datapath.
848 	 */
849 	list_del_rcu(&rx_sc->rx_sc_list_element);
850 	xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
851 	metadata_dst_free(rx_sc->md_dst);
852 	kfree(rx_sc->sc_xarray_element);
853 	kfree_rcu_mightsleep(rx_sc);
854 }
855 
856 static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
857 {
858 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
859 	struct mlx5e_macsec_device *macsec_device;
860 	struct mlx5e_macsec_rx_sc *rx_sc;
861 	struct mlx5e_macsec *macsec;
862 	struct list_head *list;
863 	int err = 0;
864 
865 	mutex_lock(&priv->macsec->lock);
866 
867 	macsec = priv->macsec;
868 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
869 	if (!macsec_device) {
870 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
871 		err = -EINVAL;
872 		goto out;
873 	}
874 
875 	list = &macsec_device->macsec_rx_sc_list_head;
876 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
877 	if (!rx_sc) {
878 		netdev_err(ctx->netdev,
879 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
880 			   ctx->sa.rx_sa->sc->sci);
881 		err = -EINVAL;
882 		goto out;
883 	}
884 
885 	macsec_del_rxsc_ctx(macsec, rx_sc);
886 out:
887 	mutex_unlock(&macsec->lock);
888 
889 	return err;
890 }
891 
892 static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
893 {
894 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
895 	const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
896 	struct mlx5e_macsec_device *macsec_device;
897 	struct mlx5_core_dev *mdev = priv->mdev;
898 	u8 assoc_num = ctx->sa.assoc_num;
899 	struct mlx5e_macsec_rx_sc *rx_sc;
900 	sci_t sci = ctx_rx_sa->sc->sci;
901 	struct mlx5e_macsec_sa *rx_sa;
902 	struct mlx5e_macsec *macsec;
903 	struct list_head *list;
904 	int err = 0;
905 
906 	mutex_lock(&priv->macsec->lock);
907 
908 	macsec = priv->macsec;
909 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
910 	if (!macsec_device) {
911 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
912 		err = -EINVAL;
913 		goto out;
914 	}
915 
916 	list = &macsec_device->macsec_rx_sc_list_head;
917 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
918 	if (!rx_sc) {
919 		netdev_err(ctx->netdev,
920 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
921 			   ctx->sa.rx_sa->sc->sci);
922 		err = -EINVAL;
923 		goto out;
924 	}
925 
926 	if (rx_sc->rx_sa[assoc_num]) {
927 		netdev_err(ctx->netdev,
928 			   "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
929 			   sci, assoc_num);
930 		err = -EEXIST;
931 		goto out;
932 	}
933 
934 	rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
935 	if (!rx_sa) {
936 		err = -ENOMEM;
937 		goto out;
938 	}
939 
940 	rx_sa->active = ctx_rx_sa->active;
941 	rx_sa->next_pn = ctx_rx_sa->next_pn;
942 	rx_sa->sci = sci;
943 	rx_sa->assoc_num = assoc_num;
944 	rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
945 
946 	if (ctx->secy->xpn)
947 		update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
948 				  ctx_rx_sa->ssci);
949 
950 	err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
951 					 MLX5_ACCEL_OBJ_MACSEC_KEY,
952 					 &rx_sa->enc_key_id);
953 	if (err)
954 		goto destroy_sa;
955 
956 	rx_sc->rx_sa[assoc_num] = rx_sa;
957 	if (!rx_sa->active)
958 		goto out;
959 
960 	//TODO - add support for both authentication and encryption flows
961 	err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
962 	if (err)
963 		goto destroy_encryption_key;
964 
965 	goto out;
966 
967 destroy_encryption_key:
968 	rx_sc->rx_sa[assoc_num] = NULL;
969 	mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
970 destroy_sa:
971 	kfree(rx_sa);
972 out:
973 	mutex_unlock(&macsec->lock);
974 
975 	return err;
976 }
977 
978 static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
979 {
980 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
981 	const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
982 	struct mlx5e_macsec_device *macsec_device;
983 	u8 assoc_num = ctx->sa.assoc_num;
984 	struct mlx5e_macsec_rx_sc *rx_sc;
985 	sci_t sci = ctx_rx_sa->sc->sci;
986 	struct mlx5e_macsec_sa *rx_sa;
987 	struct mlx5e_macsec *macsec;
988 	struct list_head *list;
989 	int err = 0;
990 
991 	mutex_lock(&priv->macsec->lock);
992 
993 	macsec = priv->macsec;
994 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
995 	if (!macsec_device) {
996 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
997 		err = -EINVAL;
998 		goto out;
999 	}
1000 
1001 	list = &macsec_device->macsec_rx_sc_list_head;
1002 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1003 	if (!rx_sc) {
1004 		netdev_err(ctx->netdev,
1005 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
1006 			   ctx->sa.rx_sa->sc->sci);
1007 		err = -EINVAL;
1008 		goto out;
1009 	}
1010 
1011 	rx_sa = rx_sc->rx_sa[assoc_num];
1012 	if (!rx_sa) {
1013 		netdev_err(ctx->netdev,
1014 			   "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1015 			   sci, assoc_num);
1016 		err = -EINVAL;
1017 		goto out;
1018 	}
1019 
1020 	if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
1021 		netdev_err(ctx->netdev,
1022 			   "MACsec offload update RX sa %d PN isn't supported\n",
1023 			   assoc_num);
1024 		err = -EINVAL;
1025 		goto out;
1026 	}
1027 
1028 	err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active);
1029 out:
1030 	mutex_unlock(&macsec->lock);
1031 
1032 	return err;
1033 }
1034 
1035 static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
1036 {
1037 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1038 	struct mlx5e_macsec_device *macsec_device;
1039 	sci_t sci = ctx->sa.rx_sa->sc->sci;
1040 	struct mlx5e_macsec_rx_sc *rx_sc;
1041 	u8 assoc_num = ctx->sa.assoc_num;
1042 	struct mlx5e_macsec_sa *rx_sa;
1043 	struct mlx5e_macsec *macsec;
1044 	struct list_head *list;
1045 	int err = 0;
1046 
1047 	mutex_lock(&priv->macsec->lock);
1048 
1049 	macsec = priv->macsec;
1050 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1051 	if (!macsec_device) {
1052 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1053 		err = -EINVAL;
1054 		goto out;
1055 	}
1056 
1057 	list = &macsec_device->macsec_rx_sc_list_head;
1058 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1059 	if (!rx_sc) {
1060 		netdev_err(ctx->netdev,
1061 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
1062 			   ctx->sa.rx_sa->sc->sci);
1063 		err = -EINVAL;
1064 		goto out;
1065 	}
1066 
1067 	rx_sa = rx_sc->rx_sa[assoc_num];
1068 	if (!rx_sa) {
1069 		netdev_err(ctx->netdev,
1070 			   "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1071 			   sci, assoc_num);
1072 		err = -EINVAL;
1073 		goto out;
1074 	}
1075 
1076 	mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
1077 	mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
1078 	kfree(rx_sa);
1079 	rx_sc->rx_sa[assoc_num] = NULL;
1080 
1081 out:
1082 	mutex_unlock(&macsec->lock);
1083 
1084 	return err;
1085 }
1086 
1087 static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
1088 {
1089 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1090 	const struct net_device *dev = ctx->secy->netdev;
1091 	const struct net_device *netdev = ctx->netdev;
1092 	struct mlx5e_macsec_device *macsec_device;
1093 	struct mlx5e_macsec *macsec;
1094 	int err = 0;
1095 
1096 	if (!mlx5e_macsec_secy_features_validate(ctx))
1097 		return -EINVAL;
1098 
1099 	mutex_lock(&priv->macsec->lock);
1100 	macsec = priv->macsec;
1101 	if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
1102 		netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
1103 		goto out;
1104 	}
1105 
1106 	if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
1107 		netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
1108 			   MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
1109 		err = -EBUSY;
1110 		goto out;
1111 	}
1112 
1113 	macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
1114 	if (!macsec_device) {
1115 		err = -ENOMEM;
1116 		goto out;
1117 	}
1118 
1119 	macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
1120 	if (!macsec_device->dev_addr) {
1121 		kfree(macsec_device);
1122 		err = -ENOMEM;
1123 		goto out;
1124 	}
1125 
1126 	macsec_device->netdev = dev;
1127 
1128 	INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
1129 	list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
1130 
1131 	++macsec->num_of_devices;
1132 out:
1133 	mutex_unlock(&macsec->lock);
1134 
1135 	return err;
1136 }
1137 
1138 static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
1139 				      struct mlx5e_macsec_device *macsec_device)
1140 {
1141 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1142 	const struct net_device *dev = ctx->secy->netdev;
1143 	struct mlx5e_macsec *macsec = priv->macsec;
1144 	struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1145 	struct mlx5e_macsec_sa *rx_sa;
1146 	struct list_head *list;
1147 	int i, err = 0;
1148 
1149 
1150 	list = &macsec_device->macsec_rx_sc_list_head;
1151 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1152 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1153 			rx_sa = rx_sc->rx_sa[i];
1154 			if (!rx_sa || !rx_sa->macsec_rule)
1155 				continue;
1156 
1157 			mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
1158 		}
1159 	}
1160 
1161 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1162 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1163 			rx_sa = rx_sc->rx_sa[i];
1164 			if (!rx_sa)
1165 				continue;
1166 
1167 			if (rx_sa->active) {
1168 				err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
1169 				if (err)
1170 					goto out;
1171 			}
1172 		}
1173 	}
1174 
1175 	memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
1176 out:
1177 	return err;
1178 }
1179 
1180 /* this function is called from 2 macsec ops functions:
1181  *  macsec_set_mac_address – MAC address was changed, therefore we need to destroy
1182  *  and create new Tx contexts(macsec object + steering).
1183  *  macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
1184  *  destroy Tx and Rx contexts(macsec object + steering)
1185  */
1186 static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
1187 {
1188 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1189 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
1190 	const struct net_device *dev = ctx->secy->netdev;
1191 	struct mlx5e_macsec_device *macsec_device;
1192 	struct mlx5e_macsec_sa *tx_sa;
1193 	struct mlx5e_macsec *macsec;
1194 	int i, err = 0;
1195 
1196 	if (!mlx5e_macsec_secy_features_validate(ctx))
1197 		return -EINVAL;
1198 
1199 	mutex_lock(&priv->macsec->lock);
1200 
1201 	macsec = priv->macsec;
1202 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1203 	if (!macsec_device) {
1204 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1205 		err = -EINVAL;
1206 		goto out;
1207 	}
1208 
1209 	/* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
1210 	if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
1211 		err = macsec_upd_secy_hw_address(ctx, macsec_device);
1212 		if (err)
1213 			goto out;
1214 	}
1215 
1216 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1217 		tx_sa = macsec_device->tx_sa[i];
1218 		if (!tx_sa)
1219 			continue;
1220 
1221 		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
1222 	}
1223 
1224 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1225 		tx_sa = macsec_device->tx_sa[i];
1226 		if (!tx_sa)
1227 			continue;
1228 
1229 		if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
1230 			err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
1231 			if (err)
1232 				goto out;
1233 		}
1234 	}
1235 
1236 out:
1237 	mutex_unlock(&macsec->lock);
1238 
1239 	return err;
1240 }
1241 
1242 static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
1243 {
1244 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1245 	struct mlx5e_macsec_device *macsec_device;
1246 	struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1247 	struct mlx5e_macsec_sa *tx_sa;
1248 	struct mlx5e_macsec *macsec;
1249 	struct list_head *list;
1250 	int err = 0;
1251 	int i;
1252 
1253 	mutex_lock(&priv->macsec->lock);
1254 	macsec = priv->macsec;
1255 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1256 	if (!macsec_device) {
1257 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1258 		err = -EINVAL;
1259 
1260 		goto out;
1261 	}
1262 
1263 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1264 		tx_sa = macsec_device->tx_sa[i];
1265 		if (!tx_sa)
1266 			continue;
1267 
1268 		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
1269 		mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
1270 		kfree(tx_sa);
1271 		macsec_device->tx_sa[i] = NULL;
1272 	}
1273 
1274 	list = &macsec_device->macsec_rx_sc_list_head;
1275 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
1276 		macsec_del_rxsc_ctx(macsec, rx_sc);
1277 
1278 	kfree(macsec_device->dev_addr);
1279 	macsec_device->dev_addr = NULL;
1280 
1281 	list_del_rcu(&macsec_device->macsec_device_list_element);
1282 	--macsec->num_of_devices;
1283 	kfree(macsec_device);
1284 
1285 out:
1286 	mutex_unlock(&macsec->lock);
1287 
1288 	return err;
1289 }
1290 
1291 static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
1292 				     struct mlx5_macsec_obj_attrs *attrs)
1293 {
1294 	attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
1295 	attrs->epn_state.overlap = sa->epn_state.overlap;
1296 }
1297 
1298 static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
1299 					  struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1300 					  struct mlx5_aso_ctrl_param *param)
1301 {
1302 	struct mlx5e_macsec_umr *umr = macsec_aso->umr;
1303 
1304 	memset(aso_ctrl, 0, sizeof(*aso_ctrl));
1305 	aso_ctrl->va_l = cpu_to_be32(umr->dma_addr | ASO_CTRL_READ_EN);
1306 	aso_ctrl->va_h = cpu_to_be32((u64)umr->dma_addr >> 32);
1307 	aso_ctrl->l_key = cpu_to_be32(umr->mkey);
1308 
1309 	if (!param)
1310 		return;
1311 
1312 	aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
1313 	aso_ctrl->condition_1_0_operand = param->condition_1_operand |
1314 						param->condition_0_operand << 4;
1315 	aso_ctrl->condition_1_0_offset = param->condition_1_offset |
1316 						param->condition_0_offset << 4;
1317 	aso_ctrl->data_offset_condition_operand = param->data_offset |
1318 						param->condition_operand << 6;
1319 	aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
1320 	aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
1321 	aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
1322 	aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
1323 	aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
1324 	aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
1325 }
1326 
1327 static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
1328 				   u32 macsec_id)
1329 {
1330 	u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
1331 	u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
1332 	u64 modify_field_select = 0;
1333 	void *obj;
1334 	int err;
1335 
1336 	/* General object fields set */
1337 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1338 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
1339 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
1340 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1341 	if (err) {
1342 		mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
1343 			      macsec_id, err);
1344 		return err;
1345 	}
1346 
1347 	obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
1348 	modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
1349 
1350 	/* EPN */
1351 	if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
1352 	    !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
1353 		mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
1354 			      macsec_id);
1355 		return -EOPNOTSUPP;
1356 	}
1357 
1358 	obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
1359 	MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
1360 		   MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
1361 	MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
1362 	MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
1363 
1364 	/* General object fields set */
1365 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1366 
1367 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1368 }
1369 
1370 static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
1371 				  struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1372 				  struct mlx5e_macsec_aso_in *in)
1373 {
1374 	struct mlx5_aso_ctrl_param param = {};
1375 
1376 	param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
1377 	param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
1378 	param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
1379 	if (in->mode == MLX5_MACSEC_EPN) {
1380 		param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
1381 		param.bitwise_data = BIT_ULL(54);
1382 		param.data_mask = param.bitwise_data;
1383 	}
1384 	macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, &param);
1385 }
1386 
1387 static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1388 				    struct mlx5e_macsec_aso_in *in)
1389 {
1390 	struct mlx5e_macsec_aso *aso;
1391 	struct mlx5_aso_wqe *aso_wqe;
1392 	struct mlx5_aso *maso;
1393 	int err;
1394 
1395 	aso = &macsec->aso;
1396 	maso = aso->maso;
1397 
1398 	mutex_lock(&aso->aso_lock);
1399 	aso_wqe = mlx5_aso_get_wqe(maso);
1400 	mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1401 			   MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1402 	macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
1403 	mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1404 	err = mlx5_aso_poll_cq(maso, false);
1405 	mutex_unlock(&aso->aso_lock);
1406 
1407 	return err;
1408 }
1409 
1410 static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1411 			    struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
1412 {
1413 	struct mlx5e_macsec_aso *aso;
1414 	struct mlx5_aso_wqe *aso_wqe;
1415 	struct mlx5_aso *maso;
1416 	unsigned long expires;
1417 	int err;
1418 
1419 	aso = &macsec->aso;
1420 	maso = aso->maso;
1421 
1422 	mutex_lock(&aso->aso_lock);
1423 
1424 	aso_wqe = mlx5_aso_get_wqe(maso);
1425 	mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1426 			   MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1427 	macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
1428 
1429 	mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1430 	expires = jiffies + msecs_to_jiffies(10);
1431 	do {
1432 		err = mlx5_aso_poll_cq(maso, false);
1433 		if (err)
1434 			usleep_range(2, 10);
1435 	} while (err && time_is_after_jiffies(expires));
1436 
1437 	if (err)
1438 		goto err_out;
1439 
1440 	if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
1441 		out->event_arm |= MLX5E_ASO_EPN_ARM;
1442 
1443 	out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
1444 
1445 err_out:
1446 	mutex_unlock(&aso->aso_lock);
1447 	return err;
1448 }
1449 
1450 static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1451 							    const u32 obj_id)
1452 {
1453 	const struct list_head *device_list;
1454 	struct mlx5e_macsec_sa *macsec_sa;
1455 	struct mlx5e_macsec_device *iter;
1456 	int i;
1457 
1458 	device_list = &macsec->macsec_device_list_head;
1459 
1460 	list_for_each_entry(iter, device_list, macsec_device_list_element) {
1461 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1462 			macsec_sa = iter->tx_sa[i];
1463 			if (!macsec_sa || !macsec_sa->active)
1464 				continue;
1465 			if (macsec_sa->macsec_obj_id == obj_id)
1466 				return macsec_sa;
1467 		}
1468 	}
1469 
1470 	return NULL;
1471 }
1472 
1473 static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1474 							    const u32 obj_id)
1475 {
1476 	const struct list_head *device_list, *sc_list;
1477 	struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
1478 	struct mlx5e_macsec_sa *macsec_sa;
1479 	struct mlx5e_macsec_device *iter;
1480 	int i;
1481 
1482 	device_list = &macsec->macsec_device_list_head;
1483 
1484 	list_for_each_entry(iter, device_list, macsec_device_list_element) {
1485 		sc_list = &iter->macsec_rx_sc_list_head;
1486 		list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
1487 			for (i = 0; i < MACSEC_NUM_AN; ++i) {
1488 				macsec_sa = mlx5e_rx_sc->rx_sa[i];
1489 				if (!macsec_sa || !macsec_sa->active)
1490 					continue;
1491 				if (macsec_sa->macsec_obj_id == obj_id)
1492 					return macsec_sa;
1493 			}
1494 		}
1495 	}
1496 
1497 	return NULL;
1498 }
1499 
1500 static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
1501 			      struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
1502 {
1503 	struct mlx5_macsec_obj_attrs attrs = {};
1504 	struct mlx5e_macsec_aso_in in = {};
1505 
1506 	/* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
1507 	 * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
1508 	 * esn_overlap to OLD (1).
1509 	 * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
1510 	 * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
1511 	 * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
1512 	 */
1513 
1514 	if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
1515 		sa->epn_state.epn_msb++;
1516 		sa->epn_state.overlap = 0;
1517 	} else {
1518 		sa->epn_state.overlap = 1;
1519 	}
1520 
1521 	macsec_build_accel_attrs(sa, &attrs);
1522 	mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
1523 
1524 	/* Re-set EPN arm event */
1525 	in.obj_id = obj_id;
1526 	in.mode = MLX5_MACSEC_EPN;
1527 	macsec_aso_set_arm_event(mdev, macsec, &in);
1528 }
1529 
1530 static void macsec_async_event(struct work_struct *work)
1531 {
1532 	struct mlx5e_macsec_async_work *async_work;
1533 	struct mlx5e_macsec_aso_out out = {};
1534 	struct mlx5e_macsec_aso_in in = {};
1535 	struct mlx5e_macsec_sa *macsec_sa;
1536 	struct mlx5e_macsec *macsec;
1537 	struct mlx5_core_dev *mdev;
1538 	u32 obj_id;
1539 
1540 	async_work = container_of(work, struct mlx5e_macsec_async_work, work);
1541 	macsec = async_work->macsec;
1542 	mutex_lock(&macsec->lock);
1543 
1544 	mdev = async_work->mdev;
1545 	obj_id = async_work->obj_id;
1546 	macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
1547 	if (!macsec_sa) {
1548 		macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
1549 		if (!macsec_sa) {
1550 			mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
1551 			goto out_async_work;
1552 		}
1553 	}
1554 
1555 	/* Query MACsec ASO context */
1556 	in.obj_id = obj_id;
1557 	macsec_aso_query(mdev, macsec, &in, &out);
1558 
1559 	/* EPN case */
1560 	if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
1561 		macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
1562 
1563 out_async_work:
1564 	kfree(async_work);
1565 	mutex_unlock(&macsec->lock);
1566 }
1567 
1568 static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
1569 {
1570 	struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
1571 	struct mlx5e_macsec_async_work *async_work;
1572 	struct mlx5_eqe_obj_change *obj_change;
1573 	struct mlx5_eqe *eqe = data;
1574 	u16 obj_type;
1575 	u32 obj_id;
1576 
1577 	if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
1578 		return NOTIFY_DONE;
1579 
1580 	obj_change = &eqe->data.obj_change;
1581 	obj_type = be16_to_cpu(obj_change->obj_type);
1582 	obj_id = be32_to_cpu(obj_change->obj_id);
1583 
1584 	if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
1585 		return NOTIFY_DONE;
1586 
1587 	async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
1588 	if (!async_work)
1589 		return NOTIFY_DONE;
1590 
1591 	async_work->macsec = macsec;
1592 	async_work->mdev = macsec->mdev;
1593 	async_work->obj_id = obj_id;
1594 
1595 	INIT_WORK(&async_work->work, macsec_async_event);
1596 
1597 	WARN_ON(!queue_work(macsec->wq, &async_work->work));
1598 
1599 	return NOTIFY_OK;
1600 }
1601 
1602 static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1603 {
1604 	struct mlx5_aso *maso;
1605 	int err;
1606 
1607 	err = mlx5_core_alloc_pd(mdev, &aso->pdn);
1608 	if (err) {
1609 		mlx5_core_err(mdev,
1610 			      "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
1611 			      err);
1612 		return err;
1613 	}
1614 
1615 	maso = mlx5_aso_create(mdev, aso->pdn);
1616 	if (IS_ERR(maso)) {
1617 		err = PTR_ERR(maso);
1618 		goto err_aso;
1619 	}
1620 
1621 	err = mlx5e_macsec_aso_reg_mr(mdev, aso);
1622 	if (err)
1623 		goto err_aso_reg;
1624 
1625 	mutex_init(&aso->aso_lock);
1626 
1627 	aso->maso = maso;
1628 
1629 	return 0;
1630 
1631 err_aso_reg:
1632 	mlx5_aso_destroy(maso);
1633 err_aso:
1634 	mlx5_core_dealloc_pd(mdev, aso->pdn);
1635 	return err;
1636 }
1637 
1638 static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1639 {
1640 	if (!aso)
1641 		return;
1642 
1643 	mlx5e_macsec_aso_dereg_mr(mdev, aso);
1644 
1645 	mlx5_aso_destroy(aso->maso);
1646 
1647 	mlx5_core_dealloc_pd(mdev, aso->pdn);
1648 }
1649 
1650 bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
1651 {
1652 	if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
1653 	    MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
1654 		return false;
1655 
1656 	if (!MLX5_CAP_GEN(mdev, log_max_dek))
1657 		return false;
1658 
1659 	if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
1660 		return false;
1661 
1662 	if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
1663 	    !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
1664 		return false;
1665 
1666 	if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
1667 	    !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
1668 		return false;
1669 
1670 	if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
1671 	    !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
1672 		return false;
1673 
1674 	if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
1675 	    !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
1676 		return false;
1677 
1678 	return true;
1679 }
1680 
1681 void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats)
1682 {
1683 	mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats);
1684 }
1685 
1686 struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec)
1687 {
1688 	if (!macsec)
1689 		return NULL;
1690 
1691 	return &macsec->stats;
1692 }
1693 
1694 static const struct macsec_ops macsec_offload_ops = {
1695 	.mdo_add_txsa = mlx5e_macsec_add_txsa,
1696 	.mdo_upd_txsa = mlx5e_macsec_upd_txsa,
1697 	.mdo_del_txsa = mlx5e_macsec_del_txsa,
1698 	.mdo_add_rxsc = mlx5e_macsec_add_rxsc,
1699 	.mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
1700 	.mdo_del_rxsc = mlx5e_macsec_del_rxsc,
1701 	.mdo_add_rxsa = mlx5e_macsec_add_rxsa,
1702 	.mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
1703 	.mdo_del_rxsa = mlx5e_macsec_del_rxsa,
1704 	.mdo_add_secy = mlx5e_macsec_add_secy,
1705 	.mdo_upd_secy = mlx5e_macsec_upd_secy,
1706 	.mdo_del_secy = mlx5e_macsec_del_secy,
1707 };
1708 
1709 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
1710 {
1711 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
1712 	u32 fs_id;
1713 
1714 	fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
1715 	if (!fs_id)
1716 		goto err_out;
1717 
1718 	return true;
1719 
1720 err_out:
1721 	dev_kfree_skb_any(skb);
1722 	return false;
1723 }
1724 
1725 void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
1726 				struct sk_buff *skb,
1727 				struct mlx5_wqe_eth_seg *eseg)
1728 {
1729 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
1730 	u32 fs_id;
1731 
1732 	fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
1733 	if (!fs_id)
1734 		return;
1735 
1736 	eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
1737 }
1738 
1739 void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
1740 					struct sk_buff *skb,
1741 					struct mlx5_cqe64 *cqe)
1742 {
1743 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
1744 	u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
1745 	struct mlx5e_priv *priv = macsec_netdev_priv(netdev);
1746 	struct mlx5e_macsec_rx_sc *rx_sc;
1747 	struct mlx5e_macsec *macsec;
1748 	u32  fs_id;
1749 
1750 	macsec = priv->macsec;
1751 	if (!macsec)
1752 		return;
1753 
1754 	fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
1755 
1756 	rcu_read_lock();
1757 	sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
1758 	rx_sc = sc_xarray_element->rx_sc;
1759 	if (rx_sc) {
1760 		dst_hold(&rx_sc->md_dst->dst);
1761 		skb_dst_set(skb, &rx_sc->md_dst->dst);
1762 	}
1763 
1764 	rcu_read_unlock();
1765 }
1766 
1767 void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
1768 {
1769 	struct net_device *netdev = priv->netdev;
1770 
1771 	if (!mlx5e_is_macsec_device(priv->mdev))
1772 		return;
1773 
1774 	/* Enable MACsec */
1775 	mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
1776 	netdev->macsec_ops = &macsec_offload_ops;
1777 	netdev->features |= NETIF_F_HW_MACSEC;
1778 	netif_keep_dst(netdev);
1779 }
1780 
1781 int mlx5e_macsec_init(struct mlx5e_priv *priv)
1782 {
1783 	struct mlx5_core_dev *mdev = priv->mdev;
1784 	struct mlx5e_macsec *macsec = NULL;
1785 	struct mlx5e_macsec_fs *macsec_fs;
1786 	int err;
1787 
1788 	if (!mlx5e_is_macsec_device(priv->mdev)) {
1789 		mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
1790 		return 0;
1791 	}
1792 
1793 	macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
1794 	if (!macsec)
1795 		return -ENOMEM;
1796 
1797 	INIT_LIST_HEAD(&macsec->macsec_device_list_head);
1798 	mutex_init(&macsec->lock);
1799 
1800 	err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
1801 	if (err) {
1802 		mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
1803 			      err);
1804 		goto err_hash;
1805 	}
1806 
1807 	err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
1808 	if (err) {
1809 		mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
1810 		goto err_aso;
1811 	}
1812 
1813 	macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
1814 	if (!macsec->wq) {
1815 		err = -ENOMEM;
1816 		goto err_wq;
1817 	}
1818 
1819 	xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
1820 
1821 	priv->macsec = macsec;
1822 
1823 	macsec->mdev = mdev;
1824 
1825 	macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev);
1826 	if (!macsec_fs) {
1827 		err = -ENOMEM;
1828 		goto err_out;
1829 	}
1830 
1831 	macsec->macsec_fs = macsec_fs;
1832 
1833 	macsec->nb.notifier_call = macsec_obj_change_event;
1834 	mlx5_notifier_register(mdev, &macsec->nb);
1835 
1836 	mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
1837 
1838 	return 0;
1839 
1840 err_out:
1841 	destroy_workqueue(macsec->wq);
1842 err_wq:
1843 	mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
1844 err_aso:
1845 	rhashtable_destroy(&macsec->sci_hash);
1846 err_hash:
1847 	kfree(macsec);
1848 	priv->macsec = NULL;
1849 	return err;
1850 }
1851 
1852 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
1853 {
1854 	struct mlx5e_macsec *macsec = priv->macsec;
1855 	struct mlx5_core_dev *mdev = priv->mdev;
1856 
1857 	if (!macsec)
1858 		return;
1859 
1860 	mlx5_notifier_unregister(mdev, &macsec->nb);
1861 	mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
1862 	destroy_workqueue(macsec->wq);
1863 	mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
1864 	rhashtable_destroy(&macsec->sci_hash);
1865 	mutex_destroy(&macsec->lock);
1866 	kfree(macsec);
1867 }
1868