xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c (revision 9144f784f852f9a125cabe9927b986d909bfa439)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/xarray.h>
7 #include <linux/if_vlan.h>
8 
9 #include "en.h"
10 #include "lib/aso.h"
11 #include "lib/crypto.h"
12 #include "en_accel/macsec.h"
13 
14 #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
15 #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
16 
17 enum mlx5_macsec_aso_event_arm {
18 	MLX5E_ASO_EPN_ARM = BIT(0),
19 };
20 
21 enum {
22 	MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
23 };
24 
25 struct mlx5e_macsec_handle {
26 	struct mlx5e_macsec *macsec;
27 	u32 obj_id;
28 	u8 idx;
29 };
30 
31 enum {
32 	MLX5_MACSEC_EPN,
33 };
34 
35 struct mlx5e_macsec_aso_out {
36 	u8 event_arm;
37 	u32 mode_param;
38 };
39 
40 struct mlx5e_macsec_aso_in {
41 	u8 mode;
42 	u32 obj_id;
43 };
44 
45 struct mlx5e_macsec_epn_state {
46 	u32 epn_msb;
47 	u8 epn_enabled;
48 	u8 overlap;
49 };
50 
51 struct mlx5e_macsec_async_work {
52 	struct mlx5e_macsec *macsec;
53 	struct mlx5_core_dev *mdev;
54 	struct work_struct work;
55 	u32 obj_id;
56 };
57 
58 struct mlx5e_macsec_sa {
59 	bool active;
60 	u8  assoc_num;
61 	u32 macsec_obj_id;
62 	u32 enc_key_id;
63 	u32 next_pn;
64 	sci_t sci;
65 	ssci_t ssci;
66 	salt_t salt;
67 
68 	union mlx5_macsec_rule *macsec_rule;
69 	struct rcu_head rcu_head;
70 	struct mlx5e_macsec_epn_state epn_state;
71 };
72 
73 struct mlx5e_macsec_rx_sc;
74 struct mlx5e_macsec_rx_sc_xarray_element {
75 	u32 fs_id;
76 	struct mlx5e_macsec_rx_sc *rx_sc;
77 };
78 
79 struct mlx5e_macsec_rx_sc {
80 	bool active;
81 	sci_t sci;
82 	struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
83 	struct list_head rx_sc_list_element;
84 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
85 	struct metadata_dst *md_dst;
86 	struct rcu_head rcu_head;
87 };
88 
89 struct mlx5e_macsec_umr {
90 	u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
91 	dma_addr_t dma_addr;
92 	u32 mkey;
93 };
94 
95 struct mlx5e_macsec_aso {
96 	/* ASO */
97 	struct mlx5_aso *maso;
98 	/* Protects macsec ASO */
99 	struct mutex aso_lock;
100 	/* UMR */
101 	struct mlx5e_macsec_umr *umr;
102 
103 	u32 pdn;
104 };
105 
106 struct mlx5e_macsec_device {
107 	const struct net_device *netdev;
108 	struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
109 	struct list_head macsec_rx_sc_list_head;
110 	unsigned char *dev_addr;
111 	struct list_head macsec_device_list_element;
112 };
113 
114 struct mlx5e_macsec {
115 	struct list_head macsec_device_list_head;
116 	int num_of_devices;
117 	struct mutex lock; /* Protects mlx5e_macsec internal contexts */
118 
119 	/* Rx fs_id -> rx_sc mapping */
120 	struct xarray sc_xarray;
121 
122 	struct mlx5_core_dev *mdev;
123 
124 	/* ASO */
125 	struct mlx5e_macsec_aso aso;
126 
127 	struct notifier_block nb;
128 	struct workqueue_struct *wq;
129 };
130 
131 struct mlx5_macsec_obj_attrs {
132 	u32 aso_pdn;
133 	u32 next_pn;
134 	__be64 sci;
135 	u32 enc_key_id;
136 	bool encrypt;
137 	struct mlx5e_macsec_epn_state epn_state;
138 	salt_t salt;
139 	__be32 ssci;
140 	bool replay_protect;
141 	u32 replay_window;
142 };
143 
144 struct mlx5_aso_ctrl_param {
145 	u8   data_mask_mode;
146 	u8   condition_0_operand;
147 	u8   condition_1_operand;
148 	u8   condition_0_offset;
149 	u8   condition_1_offset;
150 	u8   data_offset;
151 	u8   condition_operand;
152 	u32  condition_0_data;
153 	u32  condition_0_mask;
154 	u32  condition_1_data;
155 	u32  condition_1_mask;
156 	u64  bitwise_data;
157 	u64  data_mask;
158 };
159 
mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev * mdev,struct mlx5e_macsec_aso * aso)160 static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
161 {
162 	struct mlx5e_macsec_umr *umr;
163 	struct device *dma_device;
164 	dma_addr_t dma_addr;
165 	int err;
166 
167 	umr = kzalloc(sizeof(*umr), GFP_KERNEL);
168 	if (!umr) {
169 		err = -ENOMEM;
170 		return err;
171 	}
172 
173 	dma_device = mlx5_core_dma_dev(mdev);
174 	dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
175 	err = dma_mapping_error(dma_device, dma_addr);
176 	if (err) {
177 		mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
178 		goto out_dma;
179 	}
180 
181 	err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
182 	if (err) {
183 		mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
184 		goto out_mkey;
185 	}
186 
187 	umr->dma_addr = dma_addr;
188 
189 	aso->umr = umr;
190 
191 	return 0;
192 
193 out_mkey:
194 	dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
195 out_dma:
196 	kfree(umr);
197 	return err;
198 }
199 
mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev * mdev,struct mlx5e_macsec_aso * aso)200 static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
201 {
202 	struct mlx5e_macsec_umr *umr = aso->umr;
203 
204 	mlx5_core_destroy_mkey(mdev, umr->mkey);
205 	dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
206 	kfree(umr);
207 }
208 
macsec_set_replay_protection(struct mlx5_macsec_obj_attrs * attrs,void * aso_ctx)209 static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
210 {
211 	u8 window_sz;
212 
213 	if (!attrs->replay_protect)
214 		return 0;
215 
216 	switch (attrs->replay_window) {
217 	case 256:
218 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
219 		break;
220 	case 128:
221 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
222 		break;
223 	case 64:
224 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
225 		break;
226 	case 32:
227 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
228 		break;
229 	default:
230 		return -EINVAL;
231 	}
232 	MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
233 	MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
234 
235 	return 0;
236 }
237 
mlx5e_macsec_create_object(struct mlx5_core_dev * mdev,struct mlx5_macsec_obj_attrs * attrs,bool is_tx,u32 * macsec_obj_id)238 static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
239 				      struct mlx5_macsec_obj_attrs *attrs,
240 				      bool is_tx,
241 				      u32 *macsec_obj_id)
242 {
243 	u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
244 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
245 	void *aso_ctx;
246 	void *obj;
247 	int err;
248 
249 	obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
250 	aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
251 
252 	MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
253 	MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
254 	MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
255 	MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
256 	MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
257 
258 	/* Epn */
259 	if (attrs->epn_state.epn_enabled) {
260 		void *salt_p;
261 		int i;
262 
263 		MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
264 		MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
265 		MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
266 		MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
267 		MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
268 		salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
269 		for (i = 0; i < 3 ; i++)
270 			memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
271 	} else {
272 		MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
273 	}
274 
275 	MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
276 	if (is_tx) {
277 		MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
278 	} else {
279 		err = macsec_set_replay_protection(attrs, aso_ctx);
280 		if (err)
281 			return err;
282 	}
283 
284 	/* general object fields set */
285 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
286 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
287 
288 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
289 	if (err) {
290 		mlx5_core_err(mdev,
291 			      "MACsec offload: Failed to create MACsec object (err = %d)\n",
292 			      err);
293 		return err;
294 	}
295 
296 	*macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
297 
298 	return err;
299 }
300 
mlx5e_macsec_destroy_object(struct mlx5_core_dev * mdev,u32 macsec_obj_id)301 static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
302 {
303 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
304 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
305 
306 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
307 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
308 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
309 
310 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
311 }
312 
mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec * macsec,struct mlx5e_macsec_sa * sa,bool is_tx,struct net_device * netdev,u32 fs_id)313 static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec,
314 				       struct mlx5e_macsec_sa *sa, bool is_tx,
315 				       struct net_device *netdev, u32 fs_id)
316 {
317 	int action =  (is_tx) ?  MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
318 				 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
319 
320 	if (!sa->macsec_rule)
321 		return;
322 
323 	mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
324 				fs_id);
325 	sa->macsec_rule = NULL;
326 }
327 
mlx5e_macsec_cleanup_sa(struct mlx5e_macsec * macsec,struct mlx5e_macsec_sa * sa,bool is_tx,struct net_device * netdev,u32 fs_id)328 static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
329 				    struct mlx5e_macsec_sa *sa, bool is_tx,
330 				    struct net_device *netdev, u32 fs_id)
331 {
332 	mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id);
333 	mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
334 }
335 
mlx5e_macsec_init_sa_fs(struct macsec_context * ctx,struct mlx5e_macsec_sa * sa,bool encrypt,bool is_tx,u32 * fs_id)336 static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
337 				   struct mlx5e_macsec_sa *sa, bool encrypt,
338 				   bool is_tx, u32 *fs_id)
339 {
340 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
341 	struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
342 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
343 	struct mlx5_macsec_rule_attrs rule_attrs;
344 	union mlx5_macsec_rule *macsec_rule;
345 
346 	if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
347 		return 0;
348 
349 	rule_attrs.macsec_obj_id = sa->macsec_obj_id;
350 	rule_attrs.sci = sa->sci;
351 	rule_attrs.assoc_num = sa->assoc_num;
352 	rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
353 				      MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
354 
355 	macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id);
356 	if (!macsec_rule)
357 		return -ENOMEM;
358 
359 	sa->macsec_rule = macsec_rule;
360 
361 	return 0;
362 }
363 
mlx5e_macsec_init_sa(struct macsec_context * ctx,struct mlx5e_macsec_sa * sa,bool encrypt,bool is_tx,u32 * fs_id)364 static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
365 				struct mlx5e_macsec_sa *sa,
366 				bool encrypt, bool is_tx, u32 *fs_id)
367 {
368 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
369 	struct mlx5e_macsec *macsec = priv->macsec;
370 	struct mlx5_core_dev *mdev = priv->mdev;
371 	struct mlx5_macsec_obj_attrs obj_attrs;
372 	int err;
373 
374 	obj_attrs.next_pn = sa->next_pn;
375 	obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
376 	obj_attrs.enc_key_id = sa->enc_key_id;
377 	obj_attrs.encrypt = encrypt;
378 	obj_attrs.aso_pdn = macsec->aso.pdn;
379 	obj_attrs.epn_state = sa->epn_state;
380 
381 	if (sa->epn_state.epn_enabled) {
382 		obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
383 		memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
384 	}
385 
386 	obj_attrs.replay_window = ctx->secy->replay_window;
387 	obj_attrs.replay_protect = ctx->secy->replay_protect;
388 
389 	err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
390 	if (err)
391 		return err;
392 
393 	if (sa->active) {
394 		err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id);
395 		if (err)
396 			goto destroy_macsec_object;
397 	}
398 
399 	return 0;
400 
401 destroy_macsec_object:
402 	mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
403 
404 	return err;
405 }
406 
407 static struct mlx5e_macsec_rx_sc *
mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head * list,sci_t sci)408 mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
409 {
410 	struct mlx5e_macsec_rx_sc *iter;
411 
412 	list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
413 		if (iter->sci == sci)
414 			return iter;
415 	}
416 
417 	return NULL;
418 }
419 
macsec_rx_sa_active_update(struct macsec_context * ctx,struct mlx5e_macsec_sa * rx_sa,bool active,u32 * fs_id)420 static int macsec_rx_sa_active_update(struct macsec_context *ctx,
421 				      struct mlx5e_macsec_sa *rx_sa,
422 				      bool active, u32 *fs_id)
423 {
424 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
425 	struct mlx5e_macsec *macsec = priv->macsec;
426 	int err = 0;
427 
428 	if (rx_sa->active == active)
429 		return 0;
430 
431 	rx_sa->active = active;
432 	if (!active) {
433 		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, *fs_id);
434 		return 0;
435 	}
436 
437 	err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, fs_id);
438 	if (err)
439 		rx_sa->active = false;
440 
441 	return err;
442 }
443 
mlx5e_macsec_secy_features_validate(struct macsec_context * ctx)444 static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
445 {
446 	const struct net_device *netdev = ctx->netdev;
447 	const struct macsec_secy *secy = ctx->secy;
448 
449 	if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
450 		netdev_err(netdev,
451 			   "MACsec offload is supported only when validate_frame is in strict mode\n");
452 		return false;
453 	}
454 
455 	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
456 		netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
457 			   MACSEC_DEFAULT_ICV_LEN);
458 		return false;
459 	}
460 
461 	if (!secy->protect_frames) {
462 		netdev_err(netdev,
463 			   "MACsec offload is supported only when protect_frames is set\n");
464 		return false;
465 	}
466 
467 	if (!ctx->secy->tx_sc.encrypt) {
468 		netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
469 		return false;
470 	}
471 
472 	return true;
473 }
474 
475 static struct mlx5e_macsec_device *
mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec * macsec,const struct macsec_context * ctx)476 mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
477 				       const struct macsec_context *ctx)
478 {
479 	struct mlx5e_macsec_device *iter;
480 	const struct list_head *list;
481 
482 	list = &macsec->macsec_device_list_head;
483 	list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
484 		if (iter->netdev == ctx->secy->netdev)
485 			return iter;
486 	}
487 
488 	return NULL;
489 }
490 
update_macsec_epn(struct mlx5e_macsec_sa * sa,const struct macsec_key * key,const pn_t * next_pn_halves,ssci_t ssci)491 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
492 			      const pn_t *next_pn_halves, ssci_t ssci)
493 {
494 	struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
495 
496 	sa->ssci = ssci;
497 	sa->salt = key->salt;
498 	epn_state->epn_enabled = 1;
499 	epn_state->epn_msb = next_pn_halves->upper;
500 	epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
501 }
502 
mlx5e_macsec_add_txsa(struct macsec_context * ctx)503 static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
504 {
505 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
506 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
507 	const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
508 	const struct macsec_secy *secy = ctx->secy;
509 	struct mlx5e_macsec_device *macsec_device;
510 	struct mlx5_core_dev *mdev = priv->mdev;
511 	u8 assoc_num = ctx->sa.assoc_num;
512 	struct mlx5e_macsec_sa *tx_sa;
513 	struct mlx5e_macsec *macsec;
514 	int err = 0;
515 
516 	mutex_lock(&priv->macsec->lock);
517 
518 	macsec = priv->macsec;
519 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
520 	if (!macsec_device) {
521 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
522 		err = -EEXIST;
523 		goto out;
524 	}
525 
526 	if (macsec_device->tx_sa[assoc_num]) {
527 		netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
528 		err = -EEXIST;
529 		goto out;
530 	}
531 
532 	tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
533 	if (!tx_sa) {
534 		err = -ENOMEM;
535 		goto out;
536 	}
537 
538 	tx_sa->active = ctx_tx_sa->active;
539 	tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
540 	tx_sa->sci = secy->sci;
541 	tx_sa->assoc_num = assoc_num;
542 
543 	if (secy->xpn)
544 		update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
545 				  ctx_tx_sa->ssci);
546 
547 	err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
548 					 MLX5_ACCEL_OBJ_MACSEC_KEY,
549 					 &tx_sa->enc_key_id);
550 	if (err)
551 		goto destroy_sa;
552 
553 	macsec_device->tx_sa[assoc_num] = tx_sa;
554 	if (!secy->operational)
555 		goto out;
556 
557 	err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
558 	if (err)
559 		goto destroy_encryption_key;
560 
561 	mutex_unlock(&macsec->lock);
562 
563 	return 0;
564 
565 destroy_encryption_key:
566 	macsec_device->tx_sa[assoc_num] = NULL;
567 	mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
568 destroy_sa:
569 	kfree(tx_sa);
570 out:
571 	mutex_unlock(&macsec->lock);
572 
573 	return err;
574 }
575 
mlx5e_macsec_upd_txsa(struct macsec_context * ctx)576 static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
577 {
578 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
579 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
580 	const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
581 	struct mlx5e_macsec_device *macsec_device;
582 	u8 assoc_num = ctx->sa.assoc_num;
583 	struct mlx5e_macsec_sa *tx_sa;
584 	struct mlx5e_macsec *macsec;
585 	struct net_device *netdev;
586 	int err = 0;
587 
588 	mutex_lock(&priv->macsec->lock);
589 
590 	macsec = priv->macsec;
591 	netdev = ctx->netdev;
592 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
593 	if (!macsec_device) {
594 		netdev_err(netdev, "MACsec offload: Failed to find device context\n");
595 		err = -EINVAL;
596 		goto out;
597 	}
598 
599 	tx_sa = macsec_device->tx_sa[assoc_num];
600 	if (!tx_sa) {
601 		netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
602 		err = -EEXIST;
603 		goto out;
604 	}
605 
606 	if (ctx->sa.update_pn) {
607 		netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
608 			   assoc_num);
609 		err = -EINVAL;
610 		goto out;
611 	}
612 
613 	if (tx_sa->active == ctx_tx_sa->active)
614 		goto out;
615 
616 	tx_sa->active = ctx_tx_sa->active;
617 	if (tx_sa->assoc_num != tx_sc->encoding_sa)
618 		goto out;
619 
620 	if (ctx_tx_sa->active) {
621 		err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
622 		if (err)
623 			goto out;
624 	} else {
625 		if (!tx_sa->macsec_rule) {
626 			err = -EINVAL;
627 			goto out;
628 		}
629 
630 		mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
631 	}
632 out:
633 	mutex_unlock(&macsec->lock);
634 
635 	return err;
636 }
637 
mlx5e_macsec_del_txsa(struct macsec_context * ctx)638 static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
639 {
640 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
641 	struct mlx5e_macsec_device *macsec_device;
642 	u8 assoc_num = ctx->sa.assoc_num;
643 	struct mlx5e_macsec_sa *tx_sa;
644 	struct mlx5e_macsec *macsec;
645 	int err = 0;
646 
647 	mutex_lock(&priv->macsec->lock);
648 	macsec = priv->macsec;
649 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
650 	if (!macsec_device) {
651 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
652 		err = -EINVAL;
653 		goto out;
654 	}
655 
656 	tx_sa = macsec_device->tx_sa[assoc_num];
657 	if (!tx_sa) {
658 		netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
659 		err = -EEXIST;
660 		goto out;
661 	}
662 
663 	mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
664 	mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
665 	kfree_rcu_mightsleep(tx_sa);
666 	macsec_device->tx_sa[assoc_num] = NULL;
667 
668 out:
669 	mutex_unlock(&macsec->lock);
670 
671 	return err;
672 }
673 
mlx5e_macsec_add_rxsc(struct macsec_context * ctx)674 static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
675 {
676 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
677 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
678 	const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
679 	struct mlx5e_macsec_device *macsec_device;
680 	struct mlx5e_macsec_rx_sc *rx_sc;
681 	struct list_head *rx_sc_list;
682 	struct mlx5e_macsec *macsec;
683 	int err = 0;
684 
685 	mutex_lock(&priv->macsec->lock);
686 	macsec = priv->macsec;
687 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
688 	if (!macsec_device) {
689 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
690 		err = -EINVAL;
691 		goto out;
692 	}
693 
694 	rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
695 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
696 	if (rx_sc) {
697 		netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
698 			   ctx_rx_sc->sci);
699 		err = -EEXIST;
700 		goto out;
701 	}
702 
703 	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
704 	if (!rx_sc) {
705 		err = -ENOMEM;
706 		goto out;
707 	}
708 
709 	sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
710 	if (!sc_xarray_element) {
711 		err = -ENOMEM;
712 		goto destroy_rx_sc;
713 	}
714 
715 	sc_xarray_element->rx_sc = rx_sc;
716 	err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
717 		       XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
718 	if (err) {
719 		if (err == -EBUSY)
720 			netdev_err(ctx->netdev,
721 				   "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
722 				   MLX5_MACEC_RX_FS_ID_MAX);
723 		goto destroy_sc_xarray_elemenet;
724 	}
725 
726 	rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
727 	if (!rx_sc->md_dst) {
728 		err = -ENOMEM;
729 		goto erase_xa_alloc;
730 	}
731 
732 	rx_sc->sci = ctx_rx_sc->sci;
733 	rx_sc->active = ctx_rx_sc->active;
734 	list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
735 
736 	rx_sc->sc_xarray_element = sc_xarray_element;
737 	rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
738 	mutex_unlock(&macsec->lock);
739 
740 	return 0;
741 
742 erase_xa_alloc:
743 	xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
744 destroy_sc_xarray_elemenet:
745 	kfree(sc_xarray_element);
746 destroy_rx_sc:
747 	kfree(rx_sc);
748 
749 out:
750 	mutex_unlock(&macsec->lock);
751 
752 	return err;
753 }
754 
mlx5e_macsec_upd_rxsc(struct macsec_context * ctx)755 static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
756 {
757 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
758 	const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
759 	struct mlx5e_macsec_device *macsec_device;
760 	struct mlx5e_macsec_rx_sc *rx_sc;
761 	struct mlx5e_macsec_sa *rx_sa;
762 	struct mlx5e_macsec *macsec;
763 	struct list_head *list;
764 	int i;
765 	int err = 0;
766 
767 	mutex_lock(&priv->macsec->lock);
768 
769 	macsec = priv->macsec;
770 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
771 	if (!macsec_device) {
772 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
773 		err = -EINVAL;
774 		goto out;
775 	}
776 
777 	list = &macsec_device->macsec_rx_sc_list_head;
778 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
779 	if (!rx_sc) {
780 		err = -EINVAL;
781 		goto out;
782 	}
783 
784 	if (rx_sc->active == ctx_rx_sc->active)
785 		goto out;
786 
787 	rx_sc->active = ctx_rx_sc->active;
788 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
789 		rx_sa = rx_sc->rx_sa[i];
790 		if (!rx_sa)
791 			continue;
792 
793 		err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active,
794 						 &rx_sc->sc_xarray_element->fs_id);
795 		if (err)
796 			goto out;
797 	}
798 
799 out:
800 	mutex_unlock(&macsec->lock);
801 
802 	return err;
803 }
804 
macsec_del_rxsc_ctx(struct mlx5e_macsec * macsec,struct mlx5e_macsec_rx_sc * rx_sc,struct net_device * netdev)805 static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc,
806 				struct net_device *netdev)
807 {
808 	struct mlx5e_macsec_sa *rx_sa;
809 	int i;
810 
811 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
812 		rx_sa = rx_sc->rx_sa[i];
813 		if (!rx_sa)
814 			continue;
815 
816 		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, netdev,
817 					rx_sc->sc_xarray_element->fs_id);
818 		mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
819 
820 		kfree(rx_sa);
821 		rx_sc->rx_sa[i] = NULL;
822 	}
823 
824 	/* At this point the relevant MACsec offload Rx rule already removed at
825 	 * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
826 	 * Rx related data propagating using xa_erase which uses rcu to sync,
827 	 * once fs_id is erased then this rx_sc is hidden from datapath.
828 	 */
829 	list_del_rcu(&rx_sc->rx_sc_list_element);
830 	xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
831 	metadata_dst_free(rx_sc->md_dst);
832 	kfree(rx_sc->sc_xarray_element);
833 	kfree_rcu_mightsleep(rx_sc);
834 }
835 
mlx5e_macsec_del_rxsc(struct macsec_context * ctx)836 static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
837 {
838 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
839 	struct mlx5e_macsec_device *macsec_device;
840 	struct mlx5e_macsec_rx_sc *rx_sc;
841 	struct mlx5e_macsec *macsec;
842 	struct list_head *list;
843 	int err = 0;
844 
845 	mutex_lock(&priv->macsec->lock);
846 
847 	macsec = priv->macsec;
848 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
849 	if (!macsec_device) {
850 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
851 		err = -EINVAL;
852 		goto out;
853 	}
854 
855 	list = &macsec_device->macsec_rx_sc_list_head;
856 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
857 	if (!rx_sc) {
858 		netdev_err(ctx->netdev,
859 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
860 			   ctx->sa.rx_sa->sc->sci);
861 		err = -EINVAL;
862 		goto out;
863 	}
864 
865 	macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
866 out:
867 	mutex_unlock(&macsec->lock);
868 
869 	return err;
870 }
871 
mlx5e_macsec_add_rxsa(struct macsec_context * ctx)872 static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
873 {
874 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
875 	const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
876 	struct mlx5e_macsec_device *macsec_device;
877 	struct mlx5_core_dev *mdev = priv->mdev;
878 	u8 assoc_num = ctx->sa.assoc_num;
879 	struct mlx5e_macsec_rx_sc *rx_sc;
880 	sci_t sci = ctx_rx_sa->sc->sci;
881 	struct mlx5e_macsec_sa *rx_sa;
882 	struct mlx5e_macsec *macsec;
883 	struct list_head *list;
884 	int err = 0;
885 
886 	mutex_lock(&priv->macsec->lock);
887 
888 	macsec = priv->macsec;
889 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
890 	if (!macsec_device) {
891 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
892 		err = -EINVAL;
893 		goto out;
894 	}
895 
896 	list = &macsec_device->macsec_rx_sc_list_head;
897 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
898 	if (!rx_sc) {
899 		netdev_err(ctx->netdev,
900 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
901 			   ctx->sa.rx_sa->sc->sci);
902 		err = -EINVAL;
903 		goto out;
904 	}
905 
906 	if (rx_sc->rx_sa[assoc_num]) {
907 		netdev_err(ctx->netdev,
908 			   "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
909 			   sci, assoc_num);
910 		err = -EEXIST;
911 		goto out;
912 	}
913 
914 	rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
915 	if (!rx_sa) {
916 		err = -ENOMEM;
917 		goto out;
918 	}
919 
920 	rx_sa->active = ctx_rx_sa->active;
921 	rx_sa->next_pn = ctx_rx_sa->next_pn;
922 	rx_sa->sci = sci;
923 	rx_sa->assoc_num = assoc_num;
924 
925 	if (ctx->secy->xpn)
926 		update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
927 				  ctx_rx_sa->ssci);
928 
929 	err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
930 					 MLX5_ACCEL_OBJ_MACSEC_KEY,
931 					 &rx_sa->enc_key_id);
932 	if (err)
933 		goto destroy_sa;
934 
935 	rx_sc->rx_sa[assoc_num] = rx_sa;
936 	if (!rx_sa->active)
937 		goto out;
938 
939 	//TODO - add support for both authentication and encryption flows
940 	err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, &rx_sc->sc_xarray_element->fs_id);
941 	if (err)
942 		goto destroy_encryption_key;
943 
944 	goto out;
945 
946 destroy_encryption_key:
947 	rx_sc->rx_sa[assoc_num] = NULL;
948 	mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
949 destroy_sa:
950 	kfree(rx_sa);
951 out:
952 	mutex_unlock(&macsec->lock);
953 
954 	return err;
955 }
956 
mlx5e_macsec_upd_rxsa(struct macsec_context * ctx)957 static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
958 {
959 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
960 	const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
961 	struct mlx5e_macsec_device *macsec_device;
962 	u8 assoc_num = ctx->sa.assoc_num;
963 	struct mlx5e_macsec_rx_sc *rx_sc;
964 	sci_t sci = ctx_rx_sa->sc->sci;
965 	struct mlx5e_macsec_sa *rx_sa;
966 	struct mlx5e_macsec *macsec;
967 	struct list_head *list;
968 	int err = 0;
969 
970 	mutex_lock(&priv->macsec->lock);
971 
972 	macsec = priv->macsec;
973 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
974 	if (!macsec_device) {
975 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
976 		err = -EINVAL;
977 		goto out;
978 	}
979 
980 	list = &macsec_device->macsec_rx_sc_list_head;
981 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
982 	if (!rx_sc) {
983 		netdev_err(ctx->netdev,
984 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
985 			   ctx->sa.rx_sa->sc->sci);
986 		err = -EINVAL;
987 		goto out;
988 	}
989 
990 	rx_sa = rx_sc->rx_sa[assoc_num];
991 	if (!rx_sa) {
992 		netdev_err(ctx->netdev,
993 			   "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
994 			   sci, assoc_num);
995 		err = -EINVAL;
996 		goto out;
997 	}
998 
999 	if (ctx->sa.update_pn) {
1000 		netdev_err(ctx->netdev,
1001 			   "MACsec offload update RX sa %d PN isn't supported\n",
1002 			   assoc_num);
1003 		err = -EINVAL;
1004 		goto out;
1005 	}
1006 
1007 	err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active,
1008 					 &rx_sc->sc_xarray_element->fs_id);
1009 out:
1010 	mutex_unlock(&macsec->lock);
1011 
1012 	return err;
1013 }
1014 
mlx5e_macsec_del_rxsa(struct macsec_context * ctx)1015 static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
1016 {
1017 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1018 	struct mlx5e_macsec_device *macsec_device;
1019 	sci_t sci = ctx->sa.rx_sa->sc->sci;
1020 	struct mlx5e_macsec_rx_sc *rx_sc;
1021 	u8 assoc_num = ctx->sa.assoc_num;
1022 	struct mlx5e_macsec_sa *rx_sa;
1023 	struct mlx5e_macsec *macsec;
1024 	struct list_head *list;
1025 	int err = 0;
1026 
1027 	mutex_lock(&priv->macsec->lock);
1028 
1029 	macsec = priv->macsec;
1030 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1031 	if (!macsec_device) {
1032 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1033 		err = -EINVAL;
1034 		goto out;
1035 	}
1036 
1037 	list = &macsec_device->macsec_rx_sc_list_head;
1038 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1039 	if (!rx_sc) {
1040 		netdev_err(ctx->netdev,
1041 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
1042 			   ctx->sa.rx_sa->sc->sci);
1043 		err = -EINVAL;
1044 		goto out;
1045 	}
1046 
1047 	rx_sa = rx_sc->rx_sa[assoc_num];
1048 	if (!rx_sa) {
1049 		netdev_err(ctx->netdev,
1050 			   "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1051 			   sci, assoc_num);
1052 		err = -EINVAL;
1053 		goto out;
1054 	}
1055 
1056 	if (rx_sa->active)
1057 		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
1058 					rx_sc->sc_xarray_element->fs_id);
1059 	mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
1060 	kfree(rx_sa);
1061 	rx_sc->rx_sa[assoc_num] = NULL;
1062 
1063 out:
1064 	mutex_unlock(&macsec->lock);
1065 
1066 	return err;
1067 }
1068 
mlx5e_macsec_add_secy(struct macsec_context * ctx)1069 static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
1070 {
1071 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1072 	const struct net_device *dev = ctx->secy->netdev;
1073 	const struct net_device *netdev = ctx->netdev;
1074 	struct mlx5e_macsec_device *macsec_device;
1075 	struct mlx5e_macsec *macsec;
1076 	int err = 0;
1077 
1078 	if (!mlx5e_macsec_secy_features_validate(ctx))
1079 		return -EINVAL;
1080 
1081 	mutex_lock(&priv->macsec->lock);
1082 	macsec = priv->macsec;
1083 	if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
1084 		netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
1085 		goto out;
1086 	}
1087 
1088 	if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
1089 		netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
1090 			   MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
1091 		err = -EBUSY;
1092 		goto out;
1093 	}
1094 
1095 	macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
1096 	if (!macsec_device) {
1097 		err = -ENOMEM;
1098 		goto out;
1099 	}
1100 
1101 	macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
1102 	if (!macsec_device->dev_addr) {
1103 		kfree(macsec_device);
1104 		err = -ENOMEM;
1105 		goto out;
1106 	}
1107 
1108 	macsec_device->netdev = dev;
1109 
1110 	INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
1111 	list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
1112 
1113 	++macsec->num_of_devices;
1114 out:
1115 	mutex_unlock(&macsec->lock);
1116 
1117 	return err;
1118 }
1119 
macsec_upd_secy_hw_address(struct macsec_context * ctx,struct mlx5e_macsec_device * macsec_device)1120 static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
1121 				      struct mlx5e_macsec_device *macsec_device)
1122 {
1123 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1124 	const struct net_device *dev = ctx->secy->netdev;
1125 	struct mlx5e_macsec *macsec = priv->macsec;
1126 	struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1127 	struct mlx5e_macsec_sa *rx_sa;
1128 	struct list_head *list;
1129 	int i, err = 0;
1130 
1131 
1132 	list = &macsec_device->macsec_rx_sc_list_head;
1133 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1134 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1135 			rx_sa = rx_sc->rx_sa[i];
1136 			if (!rx_sa || !rx_sa->macsec_rule)
1137 				continue;
1138 
1139 			mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev,
1140 						   rx_sc->sc_xarray_element->fs_id);
1141 		}
1142 	}
1143 
1144 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1145 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1146 			rx_sa = rx_sc->rx_sa[i];
1147 			if (!rx_sa)
1148 				continue;
1149 
1150 			if (rx_sa->active) {
1151 				err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false,
1152 							      &rx_sc->sc_xarray_element->fs_id);
1153 				if (err)
1154 					goto out;
1155 			}
1156 		}
1157 	}
1158 
1159 	memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
1160 out:
1161 	return err;
1162 }
1163 
1164 /* this function is called from 2 macsec ops functions:
1165  *  macsec_set_mac_address – MAC address was changed, therefore we need to destroy
1166  *  and create new Tx contexts(macsec object + steering).
1167  *  macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
1168  *  destroy Tx and Rx contexts(macsec object + steering)
1169  */
mlx5e_macsec_upd_secy(struct macsec_context * ctx)1170 static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
1171 {
1172 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1173 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
1174 	const struct net_device *dev = ctx->secy->netdev;
1175 	struct mlx5e_macsec_device *macsec_device;
1176 	struct mlx5e_macsec_sa *tx_sa;
1177 	struct mlx5e_macsec *macsec;
1178 	int i, err = 0;
1179 
1180 	if (!mlx5e_macsec_secy_features_validate(ctx))
1181 		return -EINVAL;
1182 
1183 	mutex_lock(&priv->macsec->lock);
1184 
1185 	macsec = priv->macsec;
1186 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1187 	if (!macsec_device) {
1188 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1189 		err = -EINVAL;
1190 		goto out;
1191 	}
1192 
1193 	/* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
1194 	if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
1195 		err = macsec_upd_secy_hw_address(ctx, macsec_device);
1196 		if (err)
1197 			goto out;
1198 	}
1199 
1200 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1201 		tx_sa = macsec_device->tx_sa[i];
1202 		if (!tx_sa)
1203 			continue;
1204 
1205 		mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
1206 	}
1207 
1208 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1209 		tx_sa = macsec_device->tx_sa[i];
1210 		if (!tx_sa)
1211 			continue;
1212 
1213 		if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
1214 			err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
1215 			if (err)
1216 				goto out;
1217 		}
1218 	}
1219 
1220 out:
1221 	mutex_unlock(&macsec->lock);
1222 
1223 	return err;
1224 }
1225 
mlx5e_macsec_del_secy(struct macsec_context * ctx)1226 static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
1227 {
1228 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1229 	struct mlx5e_macsec_device *macsec_device;
1230 	struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1231 	struct mlx5e_macsec_sa *tx_sa;
1232 	struct mlx5e_macsec *macsec;
1233 	struct list_head *list;
1234 	int err = 0;
1235 	int i;
1236 
1237 	mutex_lock(&priv->macsec->lock);
1238 	macsec = priv->macsec;
1239 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1240 	if (!macsec_device) {
1241 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1242 		err = -EINVAL;
1243 
1244 		goto out;
1245 	}
1246 
1247 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1248 		tx_sa = macsec_device->tx_sa[i];
1249 		if (!tx_sa)
1250 			continue;
1251 
1252 		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
1253 		mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
1254 		kfree(tx_sa);
1255 		macsec_device->tx_sa[i] = NULL;
1256 	}
1257 
1258 	list = &macsec_device->macsec_rx_sc_list_head;
1259 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
1260 		macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
1261 
1262 	kfree(macsec_device->dev_addr);
1263 	macsec_device->dev_addr = NULL;
1264 
1265 	list_del_rcu(&macsec_device->macsec_device_list_element);
1266 	--macsec->num_of_devices;
1267 	kfree(macsec_device);
1268 
1269 out:
1270 	mutex_unlock(&macsec->lock);
1271 
1272 	return err;
1273 }
1274 
macsec_build_accel_attrs(struct mlx5e_macsec_sa * sa,struct mlx5_macsec_obj_attrs * attrs)1275 static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
1276 				     struct mlx5_macsec_obj_attrs *attrs)
1277 {
1278 	attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
1279 	attrs->epn_state.overlap = sa->epn_state.overlap;
1280 }
1281 
macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso * macsec_aso,struct mlx5_wqe_aso_ctrl_seg * aso_ctrl,struct mlx5_aso_ctrl_param * param)1282 static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
1283 					  struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1284 					  struct mlx5_aso_ctrl_param *param)
1285 {
1286 	struct mlx5e_macsec_umr *umr = macsec_aso->umr;
1287 
1288 	memset(aso_ctrl, 0, sizeof(*aso_ctrl));
1289 	aso_ctrl->va_l = cpu_to_be32(umr->dma_addr | ASO_CTRL_READ_EN);
1290 	aso_ctrl->va_h = cpu_to_be32((u64)umr->dma_addr >> 32);
1291 	aso_ctrl->l_key = cpu_to_be32(umr->mkey);
1292 
1293 	if (!param)
1294 		return;
1295 
1296 	aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
1297 	aso_ctrl->condition_1_0_operand = param->condition_1_operand |
1298 						param->condition_0_operand << 4;
1299 	aso_ctrl->condition_1_0_offset = param->condition_1_offset |
1300 						param->condition_0_offset << 4;
1301 	aso_ctrl->data_offset_condition_operand = param->data_offset |
1302 						param->condition_operand << 6;
1303 	aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
1304 	aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
1305 	aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
1306 	aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
1307 	aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
1308 	aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
1309 }
1310 
mlx5e_macsec_modify_obj(struct mlx5_core_dev * mdev,struct mlx5_macsec_obj_attrs * attrs,u32 macsec_id)1311 static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
1312 				   u32 macsec_id)
1313 {
1314 	u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
1315 	u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
1316 	u64 modify_field_select = 0;
1317 	void *obj;
1318 	int err;
1319 
1320 	/* General object fields set */
1321 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1322 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
1323 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
1324 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1325 	if (err) {
1326 		mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
1327 			      macsec_id, err);
1328 		return err;
1329 	}
1330 
1331 	obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
1332 	modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
1333 
1334 	/* EPN */
1335 	if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
1336 	    !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
1337 		mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
1338 			      macsec_id);
1339 		return -EOPNOTSUPP;
1340 	}
1341 
1342 	obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
1343 	MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
1344 		   MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
1345 	MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
1346 	MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
1347 
1348 	/* General object fields set */
1349 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1350 
1351 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1352 }
1353 
macsec_aso_build_ctrl(struct mlx5e_macsec_aso * aso,struct mlx5_wqe_aso_ctrl_seg * aso_ctrl,struct mlx5e_macsec_aso_in * in)1354 static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
1355 				  struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1356 				  struct mlx5e_macsec_aso_in *in)
1357 {
1358 	struct mlx5_aso_ctrl_param param = {};
1359 
1360 	param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
1361 	param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
1362 	param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
1363 	if (in->mode == MLX5_MACSEC_EPN) {
1364 		param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
1365 		param.bitwise_data = BIT_ULL(54);
1366 		param.data_mask = param.bitwise_data;
1367 	}
1368 	macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, &param);
1369 }
1370 
macsec_aso_set_arm_event(struct mlx5_core_dev * mdev,struct mlx5e_macsec * macsec,struct mlx5e_macsec_aso_in * in)1371 static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1372 				    struct mlx5e_macsec_aso_in *in)
1373 {
1374 	struct mlx5e_macsec_aso *aso;
1375 	struct mlx5_aso_wqe *aso_wqe;
1376 	struct mlx5_aso *maso;
1377 	int err;
1378 
1379 	aso = &macsec->aso;
1380 	maso = aso->maso;
1381 
1382 	mutex_lock(&aso->aso_lock);
1383 	aso_wqe = mlx5_aso_get_wqe(maso);
1384 	mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1385 			   MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1386 	macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
1387 	mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1388 	err = mlx5_aso_poll_cq(maso, false);
1389 	mutex_unlock(&aso->aso_lock);
1390 
1391 	return err;
1392 }
1393 
macsec_aso_query(struct mlx5_core_dev * mdev,struct mlx5e_macsec * macsec,struct mlx5e_macsec_aso_in * in,struct mlx5e_macsec_aso_out * out)1394 static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1395 			    struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
1396 {
1397 	struct mlx5e_macsec_aso *aso;
1398 	struct mlx5_aso_wqe *aso_wqe;
1399 	struct mlx5_aso *maso;
1400 	unsigned long expires;
1401 	int err;
1402 
1403 	aso = &macsec->aso;
1404 	maso = aso->maso;
1405 
1406 	mutex_lock(&aso->aso_lock);
1407 
1408 	aso_wqe = mlx5_aso_get_wqe(maso);
1409 	mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1410 			   MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1411 	macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
1412 
1413 	mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1414 	expires = jiffies + msecs_to_jiffies(10);
1415 	do {
1416 		err = mlx5_aso_poll_cq(maso, false);
1417 		if (err)
1418 			usleep_range(2, 10);
1419 	} while (err && time_is_after_jiffies(expires));
1420 
1421 	if (err)
1422 		goto err_out;
1423 
1424 	if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
1425 		out->event_arm |= MLX5E_ASO_EPN_ARM;
1426 
1427 	out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
1428 
1429 err_out:
1430 	mutex_unlock(&aso->aso_lock);
1431 	return err;
1432 }
1433 
get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec * macsec,const u32 obj_id)1434 static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1435 							    const u32 obj_id)
1436 {
1437 	const struct list_head *device_list;
1438 	struct mlx5e_macsec_sa *macsec_sa;
1439 	struct mlx5e_macsec_device *iter;
1440 	int i;
1441 
1442 	device_list = &macsec->macsec_device_list_head;
1443 
1444 	list_for_each_entry(iter, device_list, macsec_device_list_element) {
1445 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1446 			macsec_sa = iter->tx_sa[i];
1447 			if (!macsec_sa || !macsec_sa->active)
1448 				continue;
1449 			if (macsec_sa->macsec_obj_id == obj_id)
1450 				return macsec_sa;
1451 		}
1452 	}
1453 
1454 	return NULL;
1455 }
1456 
get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec * macsec,const u32 obj_id)1457 static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1458 							    const u32 obj_id)
1459 {
1460 	const struct list_head *device_list, *sc_list;
1461 	struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
1462 	struct mlx5e_macsec_sa *macsec_sa;
1463 	struct mlx5e_macsec_device *iter;
1464 	int i;
1465 
1466 	device_list = &macsec->macsec_device_list_head;
1467 
1468 	list_for_each_entry(iter, device_list, macsec_device_list_element) {
1469 		sc_list = &iter->macsec_rx_sc_list_head;
1470 		list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
1471 			for (i = 0; i < MACSEC_NUM_AN; ++i) {
1472 				macsec_sa = mlx5e_rx_sc->rx_sa[i];
1473 				if (!macsec_sa || !macsec_sa->active)
1474 					continue;
1475 				if (macsec_sa->macsec_obj_id == obj_id)
1476 					return macsec_sa;
1477 			}
1478 		}
1479 	}
1480 
1481 	return NULL;
1482 }
1483 
macsec_epn_update(struct mlx5e_macsec * macsec,struct mlx5_core_dev * mdev,struct mlx5e_macsec_sa * sa,u32 obj_id,u32 mode_param)1484 static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
1485 			      struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
1486 {
1487 	struct mlx5_macsec_obj_attrs attrs = {};
1488 	struct mlx5e_macsec_aso_in in = {};
1489 
1490 	/* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
1491 	 * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
1492 	 * esn_overlap to OLD (1).
1493 	 * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
1494 	 * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
1495 	 * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
1496 	 */
1497 
1498 	if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
1499 		sa->epn_state.epn_msb++;
1500 		sa->epn_state.overlap = 0;
1501 	} else {
1502 		sa->epn_state.overlap = 1;
1503 	}
1504 
1505 	macsec_build_accel_attrs(sa, &attrs);
1506 	mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
1507 
1508 	/* Re-set EPN arm event */
1509 	in.obj_id = obj_id;
1510 	in.mode = MLX5_MACSEC_EPN;
1511 	macsec_aso_set_arm_event(mdev, macsec, &in);
1512 }
1513 
macsec_async_event(struct work_struct * work)1514 static void macsec_async_event(struct work_struct *work)
1515 {
1516 	struct mlx5e_macsec_async_work *async_work;
1517 	struct mlx5e_macsec_aso_out out = {};
1518 	struct mlx5e_macsec_aso_in in = {};
1519 	struct mlx5e_macsec_sa *macsec_sa;
1520 	struct mlx5e_macsec *macsec;
1521 	struct mlx5_core_dev *mdev;
1522 	u32 obj_id;
1523 
1524 	async_work = container_of(work, struct mlx5e_macsec_async_work, work);
1525 	macsec = async_work->macsec;
1526 	mutex_lock(&macsec->lock);
1527 
1528 	mdev = async_work->mdev;
1529 	obj_id = async_work->obj_id;
1530 	macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
1531 	if (!macsec_sa) {
1532 		macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
1533 		if (!macsec_sa) {
1534 			mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
1535 			goto out_async_work;
1536 		}
1537 	}
1538 
1539 	/* Query MACsec ASO context */
1540 	in.obj_id = obj_id;
1541 	macsec_aso_query(mdev, macsec, &in, &out);
1542 
1543 	/* EPN case */
1544 	if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
1545 		macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
1546 
1547 out_async_work:
1548 	kfree(async_work);
1549 	mutex_unlock(&macsec->lock);
1550 }
1551 
macsec_obj_change_event(struct notifier_block * nb,unsigned long event,void * data)1552 static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
1553 {
1554 	struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
1555 	struct mlx5e_macsec_async_work *async_work;
1556 	struct mlx5_eqe_obj_change *obj_change;
1557 	struct mlx5_eqe *eqe = data;
1558 	u16 obj_type;
1559 	u32 obj_id;
1560 
1561 	if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
1562 		return NOTIFY_DONE;
1563 
1564 	obj_change = &eqe->data.obj_change;
1565 	obj_type = be16_to_cpu(obj_change->obj_type);
1566 	obj_id = be32_to_cpu(obj_change->obj_id);
1567 
1568 	if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
1569 		return NOTIFY_DONE;
1570 
1571 	async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
1572 	if (!async_work)
1573 		return NOTIFY_DONE;
1574 
1575 	async_work->macsec = macsec;
1576 	async_work->mdev = macsec->mdev;
1577 	async_work->obj_id = obj_id;
1578 
1579 	INIT_WORK(&async_work->work, macsec_async_event);
1580 
1581 	WARN_ON(!queue_work(macsec->wq, &async_work->work));
1582 
1583 	return NOTIFY_OK;
1584 }
1585 
mlx5e_macsec_aso_init(struct mlx5e_macsec_aso * aso,struct mlx5_core_dev * mdev)1586 static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1587 {
1588 	struct mlx5_aso *maso;
1589 	int err;
1590 
1591 	err = mlx5_core_alloc_pd(mdev, &aso->pdn);
1592 	if (err) {
1593 		mlx5_core_err(mdev,
1594 			      "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
1595 			      err);
1596 		return err;
1597 	}
1598 
1599 	maso = mlx5_aso_create(mdev, aso->pdn);
1600 	if (IS_ERR(maso)) {
1601 		err = PTR_ERR(maso);
1602 		goto err_aso;
1603 	}
1604 
1605 	err = mlx5e_macsec_aso_reg_mr(mdev, aso);
1606 	if (err)
1607 		goto err_aso_reg;
1608 
1609 	mutex_init(&aso->aso_lock);
1610 
1611 	aso->maso = maso;
1612 
1613 	return 0;
1614 
1615 err_aso_reg:
1616 	mlx5_aso_destroy(maso);
1617 err_aso:
1618 	mlx5_core_dealloc_pd(mdev, aso->pdn);
1619 	return err;
1620 }
1621 
mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso * aso,struct mlx5_core_dev * mdev)1622 static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1623 {
1624 	if (!aso)
1625 		return;
1626 
1627 	mlx5e_macsec_aso_dereg_mr(mdev, aso);
1628 
1629 	mlx5_aso_destroy(aso->maso);
1630 
1631 	mlx5_core_dealloc_pd(mdev, aso->pdn);
1632 }
1633 
1634 static const struct macsec_ops macsec_offload_ops = {
1635 	.mdo_add_txsa = mlx5e_macsec_add_txsa,
1636 	.mdo_upd_txsa = mlx5e_macsec_upd_txsa,
1637 	.mdo_del_txsa = mlx5e_macsec_del_txsa,
1638 	.mdo_add_rxsc = mlx5e_macsec_add_rxsc,
1639 	.mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
1640 	.mdo_del_rxsc = mlx5e_macsec_del_rxsc,
1641 	.mdo_add_rxsa = mlx5e_macsec_add_rxsa,
1642 	.mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
1643 	.mdo_del_rxsa = mlx5e_macsec_del_rxsa,
1644 	.mdo_add_secy = mlx5e_macsec_add_secy,
1645 	.mdo_upd_secy = mlx5e_macsec_upd_secy,
1646 	.mdo_del_secy = mlx5e_macsec_del_secy,
1647 	.rx_uses_md_dst = true,
1648 };
1649 
mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec * macsec,struct sk_buff * skb)1650 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
1651 {
1652 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
1653 	u32 fs_id;
1654 
1655 	fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
1656 							&md_dst->u.macsec_info.sci);
1657 	if (!fs_id)
1658 		goto err_out;
1659 
1660 	return true;
1661 
1662 err_out:
1663 	dev_kfree_skb_any(skb);
1664 	return false;
1665 }
1666 
mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec * macsec,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg)1667 void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
1668 				struct sk_buff *skb,
1669 				struct mlx5_wqe_eth_seg *eseg)
1670 {
1671 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
1672 	u32 fs_id;
1673 
1674 	fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
1675 							&md_dst->u.macsec_info.sci);
1676 	if (!fs_id)
1677 		return;
1678 
1679 	eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
1680 }
1681 
mlx5e_macsec_offload_handle_rx_skb(struct net_device * netdev,struct sk_buff * skb,struct mlx5_cqe64 * cqe)1682 void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
1683 					struct sk_buff *skb,
1684 					struct mlx5_cqe64 *cqe)
1685 {
1686 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
1687 	u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
1688 	struct mlx5e_priv *priv = macsec_netdev_priv(netdev);
1689 	struct mlx5e_macsec_rx_sc *rx_sc;
1690 	struct mlx5e_macsec *macsec;
1691 	u32  fs_id;
1692 
1693 	macsec = priv->macsec;
1694 	if (!macsec)
1695 		return;
1696 
1697 	fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
1698 
1699 	rcu_read_lock();
1700 	sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
1701 	rx_sc = sc_xarray_element->rx_sc;
1702 	if (rx_sc) {
1703 		dst_hold(&rx_sc->md_dst->dst);
1704 		skb_dst_set(skb, &rx_sc->md_dst->dst);
1705 	}
1706 
1707 	rcu_read_unlock();
1708 }
1709 
mlx5e_macsec_build_netdev(struct mlx5e_priv * priv)1710 void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
1711 {
1712 	struct net_device *netdev = priv->netdev;
1713 
1714 	if (!mlx5e_is_macsec_device(priv->mdev))
1715 		return;
1716 
1717 	/* Enable MACsec */
1718 	mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
1719 	netdev->macsec_ops = &macsec_offload_ops;
1720 	netdev->features |= NETIF_F_HW_MACSEC;
1721 	netif_keep_dst(netdev);
1722 }
1723 
mlx5e_macsec_init(struct mlx5e_priv * priv)1724 int mlx5e_macsec_init(struct mlx5e_priv *priv)
1725 {
1726 	struct mlx5_core_dev *mdev = priv->mdev;
1727 	struct mlx5e_macsec *macsec = NULL;
1728 	struct mlx5_macsec_fs *macsec_fs;
1729 	int err;
1730 
1731 	if (!mlx5e_is_macsec_device(priv->mdev)) {
1732 		mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
1733 		return 0;
1734 	}
1735 
1736 	macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
1737 	if (!macsec)
1738 		return -ENOMEM;
1739 
1740 	INIT_LIST_HEAD(&macsec->macsec_device_list_head);
1741 	mutex_init(&macsec->lock);
1742 
1743 	err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
1744 	if (err) {
1745 		mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
1746 		goto err_aso;
1747 	}
1748 
1749 	macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
1750 	if (!macsec->wq) {
1751 		err = -ENOMEM;
1752 		goto err_wq;
1753 	}
1754 
1755 	xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
1756 
1757 	priv->macsec = macsec;
1758 
1759 	macsec->mdev = mdev;
1760 
1761 	macsec_fs = mlx5_macsec_fs_init(mdev);
1762 	if (!macsec_fs) {
1763 		err = -ENOMEM;
1764 		goto err_out;
1765 	}
1766 
1767 	mdev->macsec_fs = macsec_fs;
1768 
1769 	macsec->nb.notifier_call = macsec_obj_change_event;
1770 	mlx5_notifier_register(mdev, &macsec->nb);
1771 
1772 	mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
1773 
1774 	return 0;
1775 
1776 err_out:
1777 	destroy_workqueue(macsec->wq);
1778 err_wq:
1779 	mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
1780 err_aso:
1781 	kfree(macsec);
1782 	priv->macsec = NULL;
1783 	return err;
1784 }
1785 
mlx5e_macsec_cleanup(struct mlx5e_priv * priv)1786 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
1787 {
1788 	struct mlx5e_macsec *macsec = priv->macsec;
1789 	struct mlx5_core_dev *mdev = priv->mdev;
1790 
1791 	if (!macsec)
1792 		return;
1793 
1794 	mlx5_notifier_unregister(mdev, &macsec->nb);
1795 	mlx5_macsec_fs_cleanup(mdev->macsec_fs);
1796 	destroy_workqueue(macsec->wq);
1797 	mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
1798 	mutex_destroy(&macsec->lock);
1799 	kfree(macsec);
1800 }
1801