1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell MACSEC hardware offload driver
3  *
4  * Copyright (C) 2022 Marvell.
5  */
6 
7 #include <linux/rtnetlink.h>
8 #include <linux/bitfield.h>
9 #include "otx2_common.h"
10 
11 #define MCS_TCAM0_MAC_DA_MASK		GENMASK_ULL(47, 0)
12 #define MCS_TCAM0_MAC_SA_MASK		GENMASK_ULL(63, 48)
13 #define MCS_TCAM1_MAC_SA_MASK		GENMASK_ULL(31, 0)
14 #define MCS_TCAM1_ETYPE_MASK		GENMASK_ULL(47, 32)
15 
16 #define MCS_SA_MAP_MEM_SA_USE		BIT_ULL(9)
17 
18 #define MCS_RX_SECY_PLCY_RW_MASK	GENMASK_ULL(49, 18)
19 #define MCS_RX_SECY_PLCY_RP		BIT_ULL(17)
20 #define MCS_RX_SECY_PLCY_AUTH_ENA	BIT_ULL(16)
21 #define MCS_RX_SECY_PLCY_CIP		GENMASK_ULL(8, 5)
22 #define MCS_RX_SECY_PLCY_VAL		GENMASK_ULL(2, 1)
23 #define MCS_RX_SECY_PLCY_ENA		BIT_ULL(0)
24 
25 #define MCS_TX_SECY_PLCY_MTU		GENMASK_ULL(43, 28)
26 #define MCS_TX_SECY_PLCY_ST_TCI		GENMASK_ULL(27, 22)
27 #define MCS_TX_SECY_PLCY_ST_OFFSET	GENMASK_ULL(21, 15)
28 #define MCS_TX_SECY_PLCY_INS_MODE	BIT_ULL(14)
29 #define MCS_TX_SECY_PLCY_AUTH_ENA	BIT_ULL(13)
30 #define MCS_TX_SECY_PLCY_CIP		GENMASK_ULL(5, 2)
31 #define MCS_TX_SECY_PLCY_PROTECT	BIT_ULL(1)
32 #define MCS_TX_SECY_PLCY_ENA		BIT_ULL(0)
33 
34 #define MCS_GCM_AES_128			0
35 #define MCS_GCM_AES_256			1
36 #define MCS_GCM_AES_XPN_128		2
37 #define MCS_GCM_AES_XPN_256		3
38 
39 #define MCS_TCI_ES			0x40 /* end station */
40 #define MCS_TCI_SC			0x20 /* SCI present */
41 #define MCS_TCI_SCB			0x10 /* epon */
42 #define MCS_TCI_E			0x08 /* encryption */
43 #define MCS_TCI_C			0x04 /* changed text */
44 
45 static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
46 						 struct macsec_secy *secy)
47 {
48 	struct cn10k_mcs_txsc *txsc;
49 
50 	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
51 		if (txsc->sw_secy == secy)
52 			return txsc;
53 	}
54 
55 	return NULL;
56 }
57 
58 static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
59 						 struct macsec_secy *secy,
60 						 struct macsec_rx_sc *rx_sc)
61 {
62 	struct cn10k_mcs_rxsc *rxsc;
63 
64 	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
65 		if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
66 			return rxsc;
67 	}
68 
69 	return NULL;
70 }
71 
72 static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
73 {
74 	switch (rsrc_type) {
75 	case MCS_RSRC_TYPE_FLOWID:
76 		return "FLOW";
77 	case MCS_RSRC_TYPE_SC:
78 		return "SC";
79 	case MCS_RSRC_TYPE_SECY:
80 		return "SECY";
81 	case MCS_RSRC_TYPE_SA:
82 		return "SA";
83 	default:
84 		return "Unknown";
85 	};
86 
87 	return "Unknown";
88 }
89 
90 static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
91 				enum mcs_rsrc_type type, u16 *rsrc_id)
92 {
93 	struct mbox *mbox = &pfvf->mbox;
94 	struct mcs_alloc_rsrc_req *req;
95 	struct mcs_alloc_rsrc_rsp *rsp;
96 	int ret = -ENOMEM;
97 
98 	mutex_lock(&mbox->lock);
99 
100 	req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
101 	if (!req)
102 		goto fail;
103 
104 	req->rsrc_type = type;
105 	req->rsrc_cnt  = 1;
106 	req->dir = dir;
107 
108 	ret = otx2_sync_mbox_msg(mbox);
109 	if (ret)
110 		goto fail;
111 
112 	rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
113 							     0, &req->hdr);
114 	if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
115 	    req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
116 		ret = -EINVAL;
117 		goto fail;
118 	}
119 
120 	switch (rsp->rsrc_type) {
121 	case MCS_RSRC_TYPE_FLOWID:
122 		*rsrc_id = rsp->flow_ids[0];
123 		break;
124 	case MCS_RSRC_TYPE_SC:
125 		*rsrc_id = rsp->sc_ids[0];
126 		break;
127 	case MCS_RSRC_TYPE_SECY:
128 		*rsrc_id = rsp->secy_ids[0];
129 		break;
130 	case MCS_RSRC_TYPE_SA:
131 		*rsrc_id = rsp->sa_ids[0];
132 		break;
133 	default:
134 		ret = -EINVAL;
135 		goto fail;
136 	}
137 
138 	mutex_unlock(&mbox->lock);
139 
140 	return 0;
141 fail:
142 	dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
143 		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
144 	mutex_unlock(&mbox->lock);
145 	return ret;
146 }
147 
148 static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
149 				enum mcs_rsrc_type type, u16 hw_rsrc_id,
150 				bool all)
151 {
152 	struct mcs_clear_stats *clear_req;
153 	struct mbox *mbox = &pfvf->mbox;
154 	struct mcs_free_rsrc_req *req;
155 
156 	mutex_lock(&mbox->lock);
157 
158 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
159 	if (!clear_req)
160 		goto fail;
161 
162 	clear_req->id = hw_rsrc_id;
163 	clear_req->type = type;
164 	clear_req->dir = dir;
165 
166 	req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
167 	if (!req)
168 		goto fail;
169 
170 	req->rsrc_id = hw_rsrc_id;
171 	req->rsrc_type = type;
172 	req->dir = dir;
173 	if (all)
174 		req->all = 1;
175 
176 	if (otx2_sync_mbox_msg(&pfvf->mbox))
177 		goto fail;
178 
179 	mutex_unlock(&mbox->lock);
180 
181 	return;
182 fail:
183 	dev_err(pfvf->dev, "Failed to free %s %s resource\n",
184 		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
185 	mutex_unlock(&mbox->lock);
186 }
187 
188 static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
189 {
190 	return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
191 }
192 
193 static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
194 {
195 	return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
196 }
197 
198 static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
199 {
200 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
201 }
202 
203 static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
204 {
205 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
206 }
207 
208 static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
209 				   struct macsec_secy *secy, u8 hw_secy_id)
210 {
211 	struct mcs_secy_plcy_write_req *req;
212 	struct mbox *mbox = &pfvf->mbox;
213 	u64 policy;
214 	u8 cipher;
215 	int ret;
216 
217 	mutex_lock(&mbox->lock);
218 
219 	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
220 	if (!req) {
221 		ret = -ENOMEM;
222 		goto fail;
223 	}
224 
225 	policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
226 	if (secy->replay_protect)
227 		policy |= MCS_RX_SECY_PLCY_RP;
228 
229 	policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
230 
231 	switch (secy->key_len) {
232 	case 16:
233 		cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128;
234 		break;
235 	case 32:
236 		cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256;
237 		break;
238 	default:
239 		cipher = MCS_GCM_AES_128;
240 		dev_warn(pfvf->dev, "Unsupported key length\n");
241 		break;
242 	}
243 
244 	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, cipher);
245 	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
246 
247 	policy |= MCS_RX_SECY_PLCY_ENA;
248 
249 	req->plcy = policy;
250 	req->secy_id = hw_secy_id;
251 	req->dir = MCS_RX;
252 
253 	ret = otx2_sync_mbox_msg(mbox);
254 
255 fail:
256 	mutex_unlock(&mbox->lock);
257 	return ret;
258 }
259 
260 static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
261 				     struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
262 {
263 	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
264 	struct macsec_secy *secy = rxsc->sw_secy;
265 	struct mcs_flowid_entry_write_req *req;
266 	struct mbox *mbox = &pfvf->mbox;
267 	u64 mac_da;
268 	int ret;
269 
270 	mutex_lock(&mbox->lock);
271 
272 	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
273 	if (!req) {
274 		ret = -ENOMEM;
275 		goto fail;
276 	}
277 
278 	mac_da = ether_addr_to_u64(secy->netdev->dev_addr);
279 
280 	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
281 	req->mask[0] = ~0ULL;
282 	req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
283 
284 	req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
285 	req->mask[1] = ~0ULL;
286 	req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
287 
288 	req->mask[2] = ~0ULL;
289 	req->mask[3] = ~0ULL;
290 
291 	req->flow_id = rxsc->hw_flow_id;
292 	req->secy_id = hw_secy_id;
293 	req->sc_id = rxsc->hw_sc_id;
294 	req->dir = MCS_RX;
295 
296 	if (sw_rx_sc->active)
297 		req->ena = 1;
298 
299 	ret = otx2_sync_mbox_msg(mbox);
300 
301 fail:
302 	mutex_unlock(&mbox->lock);
303 	return ret;
304 }
305 
306 static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
307 				  struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
308 {
309 	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
310 	struct mcs_rx_sc_cam_write_req *sc_req;
311 	struct mbox *mbox = &pfvf->mbox;
312 	int ret;
313 
314 	mutex_lock(&mbox->lock);
315 
316 	sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
317 	if (!sc_req) {
318 		ret = -ENOMEM;
319 		goto fail;
320 	}
321 
322 	sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
323 	sc_req->sc_id = rxsc->hw_sc_id;
324 	sc_req->secy_id = hw_secy_id;
325 
326 	ret = otx2_sync_mbox_msg(mbox);
327 
328 fail:
329 	mutex_unlock(&mbox->lock);
330 	return ret;
331 }
332 
333 static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
334 				      struct macsec_secy *secy,
335 				      struct cn10k_mcs_rxsc *rxsc,
336 				      u8 assoc_num, bool sa_in_use)
337 {
338 	unsigned char *src = rxsc->sa_key[assoc_num];
339 	struct mcs_sa_plcy_write_req *plcy_req;
340 	u8 *salt_p = rxsc->salt[assoc_num];
341 	struct mcs_rx_sc_sa_map *map_req;
342 	struct mbox *mbox = &pfvf->mbox;
343 	u64 ssci_salt_95_64 = 0;
344 	u8 reg, key_len;
345 	u64 salt_63_0;
346 	int ret;
347 
348 	mutex_lock(&mbox->lock);
349 
350 	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
351 	if (!plcy_req) {
352 		ret = -ENOMEM;
353 		goto fail;
354 	}
355 
356 	map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
357 	if (!map_req) {
358 		otx2_mbox_reset(&mbox->mbox, 0);
359 		ret = -ENOMEM;
360 		goto fail;
361 	}
362 
363 	for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
364 		memcpy((u8 *)&plcy_req->plcy[0][reg],
365 		       (src + reg * 8), 8);
366 		reg++;
367 	}
368 
369 	if (secy->xpn) {
370 		memcpy((u8 *)&salt_63_0, salt_p, 8);
371 		memcpy((u8 *)&ssci_salt_95_64, salt_p + 8, 4);
372 		ssci_salt_95_64 |= (__force u64)rxsc->ssci[assoc_num] << 32;
373 
374 		plcy_req->plcy[0][6] = salt_63_0;
375 		plcy_req->plcy[0][7] = ssci_salt_95_64;
376 	}
377 
378 	plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
379 	plcy_req->sa_cnt = 1;
380 	plcy_req->dir = MCS_RX;
381 
382 	map_req->sa_index = rxsc->hw_sa_id[assoc_num];
383 	map_req->sa_in_use = sa_in_use;
384 	map_req->sc_id = rxsc->hw_sc_id;
385 	map_req->an = assoc_num;
386 
387 	/* Send two messages together */
388 	ret = otx2_sync_mbox_msg(mbox);
389 
390 fail:
391 	mutex_unlock(&mbox->lock);
392 	return ret;
393 }
394 
395 static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
396 				    struct cn10k_mcs_rxsc *rxsc,
397 				    u8 assoc_num, u64 next_pn)
398 {
399 	struct mcs_pn_table_write_req *req;
400 	struct mbox *mbox = &pfvf->mbox;
401 	int ret;
402 
403 	mutex_lock(&mbox->lock);
404 
405 	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
406 	if (!req) {
407 		ret = -ENOMEM;
408 		goto fail;
409 	}
410 
411 	req->pn_id = rxsc->hw_sa_id[assoc_num];
412 	req->next_pn = next_pn;
413 	req->dir = MCS_RX;
414 
415 	ret = otx2_sync_mbox_msg(mbox);
416 
417 fail:
418 	mutex_unlock(&mbox->lock);
419 	return ret;
420 }
421 
422 static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
423 				   struct macsec_secy *secy,
424 				   struct cn10k_mcs_txsc *txsc)
425 {
426 	struct mcs_secy_plcy_write_req *req;
427 	struct mbox *mbox = &pfvf->mbox;
428 	struct macsec_tx_sc *sw_tx_sc;
429 	u8 sectag_tci = 0;
430 	u8 tag_offset;
431 	u64 policy;
432 	u8 cipher;
433 	int ret;
434 
435 	/* Insert SecTag after 12 bytes (DA+SA) or 16 bytes
436 	 * if VLAN tag needs to be sent in clear text.
437 	 */
438 	tag_offset = txsc->vlan_dev ? 16 : 12;
439 	sw_tx_sc = &secy->tx_sc;
440 
441 	mutex_lock(&mbox->lock);
442 
443 	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
444 	if (!req) {
445 		ret = -ENOMEM;
446 		goto fail;
447 	}
448 
449 	if (sw_tx_sc->send_sci) {
450 		sectag_tci |= MCS_TCI_SC;
451 	} else {
452 		if (sw_tx_sc->end_station)
453 			sectag_tci |= MCS_TCI_ES;
454 		if (sw_tx_sc->scb)
455 			sectag_tci |= MCS_TCI_SCB;
456 	}
457 
458 	if (sw_tx_sc->encrypt)
459 		sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
460 
461 	policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
462 	/* Write SecTag excluding AN bits(1..0) */
463 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
464 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
465 	policy |= MCS_TX_SECY_PLCY_INS_MODE;
466 	policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
467 
468 	switch (secy->key_len) {
469 	case 16:
470 		cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128;
471 		break;
472 	case 32:
473 		cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256;
474 		break;
475 	default:
476 		cipher = MCS_GCM_AES_128;
477 		dev_warn(pfvf->dev, "Unsupported key length\n");
478 		break;
479 	}
480 
481 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, cipher);
482 
483 	if (secy->protect_frames)
484 		policy |= MCS_TX_SECY_PLCY_PROTECT;
485 
486 	/* If the encodingsa does not exist/active and protect is
487 	 * not set then frames can be sent out as it is. Hence enable
488 	 * the policy irrespective of secy operational when !protect.
489 	 */
490 	if (!secy->protect_frames || secy->operational)
491 		policy |= MCS_TX_SECY_PLCY_ENA;
492 
493 	req->plcy = policy;
494 	req->secy_id = txsc->hw_secy_id_tx;
495 	req->dir = MCS_TX;
496 
497 	ret = otx2_sync_mbox_msg(mbox);
498 
499 fail:
500 	mutex_unlock(&mbox->lock);
501 	return ret;
502 }
503 
504 static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
505 				     struct macsec_secy *secy,
506 				     struct cn10k_mcs_txsc *txsc)
507 {
508 	struct mcs_flowid_entry_write_req *req;
509 	struct mbox *mbox = &pfvf->mbox;
510 	u64 mac_sa;
511 	int ret;
512 
513 	mutex_lock(&mbox->lock);
514 
515 	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
516 	if (!req) {
517 		ret = -ENOMEM;
518 		goto fail;
519 	}
520 
521 	mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
522 
523 	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
524 	req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
525 
526 	req->mask[0] = ~0ULL;
527 	req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
528 
529 	req->mask[1] = ~0ULL;
530 	req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
531 
532 	req->mask[2] = ~0ULL;
533 	req->mask[3] = ~0ULL;
534 
535 	req->flow_id = txsc->hw_flow_id;
536 	req->secy_id = txsc->hw_secy_id_tx;
537 	req->sc_id = txsc->hw_sc_id;
538 	req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
539 	req->dir = MCS_TX;
540 	/* This can be enabled since stack xmits packets only when interface is up */
541 	req->ena = 1;
542 
543 	ret = otx2_sync_mbox_msg(mbox);
544 
545 fail:
546 	mutex_unlock(&mbox->lock);
547 	return ret;
548 }
549 
550 static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
551 				   struct macsec_secy *secy,
552 				   struct cn10k_mcs_txsc *txsc,
553 				   u8 sa_num, bool sa_active)
554 {
555 	struct mcs_tx_sc_sa_map *map_req;
556 	struct mbox *mbox = &pfvf->mbox;
557 	int ret;
558 
559 	/* Link the encoding_sa only to SC out of all SAs */
560 	if (txsc->encoding_sa != sa_num)
561 		return 0;
562 
563 	mutex_lock(&mbox->lock);
564 
565 	map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
566 	if (!map_req) {
567 		otx2_mbox_reset(&mbox->mbox, 0);
568 		ret = -ENOMEM;
569 		goto fail;
570 	}
571 
572 	map_req->sa_index0 = txsc->hw_sa_id[sa_num];
573 	map_req->sa_index0_vld = sa_active;
574 	map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
575 	map_req->sc_id = txsc->hw_sc_id;
576 
577 	ret = otx2_sync_mbox_msg(mbox);
578 
579 fail:
580 	mutex_unlock(&mbox->lock);
581 	return ret;
582 }
583 
584 static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
585 				      struct macsec_secy *secy,
586 				      struct cn10k_mcs_txsc *txsc,
587 				      u8 assoc_num)
588 {
589 	unsigned char *src = txsc->sa_key[assoc_num];
590 	struct mcs_sa_plcy_write_req *plcy_req;
591 	u8 *salt_p = txsc->salt[assoc_num];
592 	struct mbox *mbox = &pfvf->mbox;
593 	u64 ssci_salt_95_64 = 0;
594 	u8 reg, key_len;
595 	u64 salt_63_0;
596 	int ret;
597 
598 	mutex_lock(&mbox->lock);
599 
600 	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
601 	if (!plcy_req) {
602 		ret = -ENOMEM;
603 		goto fail;
604 	}
605 
606 	for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
607 		memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
608 		reg++;
609 	}
610 
611 	if (secy->xpn) {
612 		memcpy((u8 *)&salt_63_0, salt_p, 8);
613 		memcpy((u8 *)&ssci_salt_95_64, salt_p + 8, 4);
614 		ssci_salt_95_64 |= (__force u64)txsc->ssci[assoc_num] << 32;
615 
616 		plcy_req->plcy[0][6] = salt_63_0;
617 		plcy_req->plcy[0][7] = ssci_salt_95_64;
618 	}
619 
620 	plcy_req->plcy[0][8] = assoc_num;
621 	plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
622 	plcy_req->sa_cnt = 1;
623 	plcy_req->dir = MCS_TX;
624 
625 	ret = otx2_sync_mbox_msg(mbox);
626 
627 fail:
628 	mutex_unlock(&mbox->lock);
629 	return ret;
630 }
631 
632 static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
633 				struct cn10k_mcs_txsc *txsc,
634 				u8 assoc_num, u64 next_pn)
635 {
636 	struct mcs_pn_table_write_req *req;
637 	struct mbox *mbox = &pfvf->mbox;
638 	int ret;
639 
640 	mutex_lock(&mbox->lock);
641 
642 	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
643 	if (!req) {
644 		ret = -ENOMEM;
645 		goto fail;
646 	}
647 
648 	req->pn_id = txsc->hw_sa_id[assoc_num];
649 	req->next_pn = next_pn;
650 	req->dir = MCS_TX;
651 
652 	ret = otx2_sync_mbox_msg(mbox);
653 
654 fail:
655 	mutex_unlock(&mbox->lock);
656 	return ret;
657 }
658 
659 static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
660 				    bool enable, enum mcs_direction dir)
661 {
662 	struct mcs_flowid_ena_dis_entry *req;
663 	struct mbox *mbox = &pfvf->mbox;
664 	int ret;
665 
666 	mutex_lock(&mbox->lock);
667 
668 	req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
669 	if (!req) {
670 		ret = -ENOMEM;
671 		goto fail;
672 	}
673 
674 	req->flow_id = hw_flow_id;
675 	req->ena = enable;
676 	req->dir = dir;
677 
678 	ret = otx2_sync_mbox_msg(mbox);
679 
680 fail:
681 	mutex_unlock(&mbox->lock);
682 	return ret;
683 }
684 
685 static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
686 			      struct mcs_sa_stats *rsp_p,
687 			      enum mcs_direction dir, bool clear)
688 {
689 	struct mcs_clear_stats *clear_req;
690 	struct mbox *mbox = &pfvf->mbox;
691 	struct mcs_stats_req *req;
692 	struct mcs_sa_stats *rsp;
693 	int ret;
694 
695 	mutex_lock(&mbox->lock);
696 
697 	req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
698 	if (!req) {
699 		ret = -ENOMEM;
700 		goto fail;
701 	}
702 
703 	req->id = hw_sa_id;
704 	req->dir = dir;
705 
706 	if (!clear)
707 		goto send_msg;
708 
709 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
710 	if (!clear_req) {
711 		ret = -ENOMEM;
712 		goto fail;
713 	}
714 	clear_req->id = hw_sa_id;
715 	clear_req->dir = dir;
716 	clear_req->type = MCS_RSRC_TYPE_SA;
717 
718 send_msg:
719 	ret = otx2_sync_mbox_msg(mbox);
720 	if (ret)
721 		goto fail;
722 
723 	rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
724 						       0, &req->hdr);
725 	if (IS_ERR(rsp)) {
726 		ret = PTR_ERR(rsp);
727 		goto fail;
728 	}
729 
730 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
731 
732 	mutex_unlock(&mbox->lock);
733 
734 	return 0;
735 fail:
736 	mutex_unlock(&mbox->lock);
737 	return ret;
738 }
739 
740 static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
741 			      struct mcs_sc_stats *rsp_p,
742 			      enum mcs_direction dir, bool clear)
743 {
744 	struct mcs_clear_stats *clear_req;
745 	struct mbox *mbox = &pfvf->mbox;
746 	struct mcs_stats_req *req;
747 	struct mcs_sc_stats *rsp;
748 	int ret;
749 
750 	mutex_lock(&mbox->lock);
751 
752 	req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
753 	if (!req) {
754 		ret = -ENOMEM;
755 		goto fail;
756 	}
757 
758 	req->id = hw_sc_id;
759 	req->dir = dir;
760 
761 	if (!clear)
762 		goto send_msg;
763 
764 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
765 	if (!clear_req) {
766 		ret = -ENOMEM;
767 		goto fail;
768 	}
769 	clear_req->id = hw_sc_id;
770 	clear_req->dir = dir;
771 	clear_req->type = MCS_RSRC_TYPE_SC;
772 
773 send_msg:
774 	ret = otx2_sync_mbox_msg(mbox);
775 	if (ret)
776 		goto fail;
777 
778 	rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
779 						       0, &req->hdr);
780 	if (IS_ERR(rsp)) {
781 		ret = PTR_ERR(rsp);
782 		goto fail;
783 	}
784 
785 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
786 
787 	mutex_unlock(&mbox->lock);
788 
789 	return 0;
790 fail:
791 	mutex_unlock(&mbox->lock);
792 	return ret;
793 }
794 
795 static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
796 				struct mcs_secy_stats *rsp_p,
797 				enum mcs_direction dir, bool clear)
798 {
799 	struct mcs_clear_stats *clear_req;
800 	struct mbox *mbox = &pfvf->mbox;
801 	struct mcs_secy_stats *rsp;
802 	struct mcs_stats_req *req;
803 	int ret;
804 
805 	mutex_lock(&mbox->lock);
806 
807 	req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
808 	if (!req) {
809 		ret = -ENOMEM;
810 		goto fail;
811 	}
812 
813 	req->id = hw_secy_id;
814 	req->dir = dir;
815 
816 	if (!clear)
817 		goto send_msg;
818 
819 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
820 	if (!clear_req) {
821 		ret = -ENOMEM;
822 		goto fail;
823 	}
824 	clear_req->id = hw_secy_id;
825 	clear_req->dir = dir;
826 	clear_req->type = MCS_RSRC_TYPE_SECY;
827 
828 send_msg:
829 	ret = otx2_sync_mbox_msg(mbox);
830 	if (ret)
831 		goto fail;
832 
833 	rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
834 							 0, &req->hdr);
835 	if (IS_ERR(rsp)) {
836 		ret = PTR_ERR(rsp);
837 		goto fail;
838 	}
839 
840 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
841 
842 	mutex_unlock(&mbox->lock);
843 
844 	return 0;
845 fail:
846 	mutex_unlock(&mbox->lock);
847 	return ret;
848 }
849 
850 static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
851 {
852 	struct cn10k_mcs_txsc *txsc;
853 	int ret;
854 
855 	txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
856 	if (!txsc)
857 		return ERR_PTR(-ENOMEM);
858 
859 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
860 				   &txsc->hw_flow_id);
861 	if (ret)
862 		goto fail;
863 
864 	/* For a SecY, one TX secy and one RX secy HW resources are needed */
865 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
866 				   &txsc->hw_secy_id_tx);
867 	if (ret)
868 		goto free_flowid;
869 
870 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
871 				   &txsc->hw_secy_id_rx);
872 	if (ret)
873 		goto free_tx_secy;
874 
875 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
876 				   &txsc->hw_sc_id);
877 	if (ret)
878 		goto free_rx_secy;
879 
880 	return txsc;
881 free_rx_secy:
882 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
883 			    txsc->hw_secy_id_rx, false);
884 free_tx_secy:
885 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
886 			    txsc->hw_secy_id_tx, false);
887 free_flowid:
888 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
889 			    txsc->hw_flow_id, false);
890 fail:
891 	kfree(txsc);
892 	return ERR_PTR(ret);
893 }
894 
895 /* Free Tx SC and its SAs(if any) resources to AF
896  */
897 static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
898 				  struct cn10k_mcs_txsc *txsc)
899 {
900 	u8 sa_bmap = txsc->sa_bmap;
901 	u8 sa_num = 0;
902 
903 	while (sa_bmap) {
904 		if (sa_bmap & 1) {
905 			cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
906 						   txsc, sa_num);
907 			cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
908 		}
909 		sa_num++;
910 		sa_bmap >>= 1;
911 	}
912 
913 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
914 			    txsc->hw_sc_id, false);
915 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
916 			    txsc->hw_secy_id_rx, false);
917 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
918 			    txsc->hw_secy_id_tx, false);
919 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
920 			    txsc->hw_flow_id, false);
921 }
922 
923 static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
924 {
925 	struct cn10k_mcs_rxsc *rxsc;
926 	int ret;
927 
928 	rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
929 	if (!rxsc)
930 		return ERR_PTR(-ENOMEM);
931 
932 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
933 				   &rxsc->hw_flow_id);
934 	if (ret)
935 		goto fail;
936 
937 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
938 				   &rxsc->hw_sc_id);
939 	if (ret)
940 		goto free_flowid;
941 
942 	return rxsc;
943 free_flowid:
944 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
945 			    rxsc->hw_flow_id, false);
946 fail:
947 	kfree(rxsc);
948 	return ERR_PTR(ret);
949 }
950 
951 /* Free Rx SC and its SAs(if any) resources to AF
952  */
953 static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
954 				  struct cn10k_mcs_rxsc *rxsc)
955 {
956 	u8 sa_bmap = rxsc->sa_bmap;
957 	u8 sa_num = 0;
958 
959 	while (sa_bmap) {
960 		if (sa_bmap & 1) {
961 			cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
962 						   sa_num, false);
963 			cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
964 		}
965 		sa_num++;
966 		sa_bmap >>= 1;
967 	}
968 
969 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
970 			    rxsc->hw_sc_id, false);
971 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
972 			    rxsc->hw_flow_id, false);
973 }
974 
975 static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
976 				 struct cn10k_mcs_txsc *txsc,
977 				 struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
978 {
979 	if (sw_tx_sa) {
980 		cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
981 		cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, sw_tx_sa->next_pn);
982 		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
983 					sw_tx_sa->active);
984 	}
985 
986 	cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
987 	cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
988 	/* When updating secy, change RX secy also */
989 	cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
990 
991 	return 0;
992 }
993 
994 static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
995 				 struct macsec_secy *secy, u8 hw_secy_id)
996 {
997 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
998 	struct cn10k_mcs_rxsc *mcs_rx_sc;
999 	struct macsec_rx_sc *sw_rx_sc;
1000 	struct macsec_rx_sa *sw_rx_sa;
1001 	u8 sa_num;
1002 
1003 	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
1004 	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
1005 		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1006 		if (unlikely(!mcs_rx_sc))
1007 			continue;
1008 
1009 		for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
1010 			sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
1011 			if (!sw_rx_sa)
1012 				continue;
1013 
1014 			cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
1015 						   sa_num, sw_rx_sa->active);
1016 			cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
1017 						 sw_rx_sa->next_pn);
1018 		}
1019 
1020 		cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
1021 		cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
1022 	}
1023 
1024 	return 0;
1025 }
1026 
1027 static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
1028 				   struct macsec_secy *secy,
1029 				   bool delete)
1030 {
1031 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1032 	struct cn10k_mcs_rxsc *mcs_rx_sc;
1033 	struct macsec_rx_sc *sw_rx_sc;
1034 	int ret;
1035 
1036 	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
1037 	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
1038 		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1039 		if (unlikely(!mcs_rx_sc))
1040 			continue;
1041 
1042 		ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
1043 					       false, MCS_RX);
1044 		if (ret)
1045 			dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
1046 				mcs_rx_sc->hw_sc_id);
1047 		if (delete) {
1048 			cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
1049 			list_del(&mcs_rx_sc->entry);
1050 			kfree(mcs_rx_sc);
1051 		}
1052 	}
1053 
1054 	return 0;
1055 }
1056 
1057 static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
1058 				 struct cn10k_mcs_txsc *txsc)
1059 {
1060 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1061 	struct mcs_secy_stats rx_rsp = { 0 };
1062 	struct mcs_sc_stats sc_rsp = { 0 };
1063 	struct cn10k_mcs_rxsc *rxsc;
1064 
1065 	/* Because of shared counters for some stats in the hardware, when
1066 	 * updating secy policy take a snapshot of current stats and reset them.
1067 	 * Below are the effected stats because of shared counters.
1068 	 */
1069 
1070 	/* Check if sync is really needed */
1071 	if (secy->validate_frames == txsc->last_validate_frames &&
1072 	    secy->replay_protect == txsc->last_replay_protect)
1073 		return;
1074 
1075 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1076 
1077 	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1078 	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1079 	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1080 	if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
1081 		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1082 	else
1083 		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1084 
1085 	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
1086 		cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
1087 
1088 		rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
1089 		rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
1090 
1091 		rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
1092 		rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
1093 
1094 		if (txsc->last_replay_protect)
1095 			rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
1096 		else
1097 			rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
1098 
1099 		if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED)
1100 			rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
1101 		else
1102 			rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
1103 	}
1104 
1105 	txsc->last_validate_frames = secy->validate_frames;
1106 	txsc->last_replay_protect = secy->replay_protect;
1107 }
1108 
1109 static int cn10k_mdo_open(struct macsec_context *ctx)
1110 {
1111 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1112 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1113 	struct macsec_secy *secy = ctx->secy;
1114 	struct macsec_tx_sa *sw_tx_sa;
1115 	struct cn10k_mcs_txsc *txsc;
1116 	u8 sa_num;
1117 	int err;
1118 
1119 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1120 	if (!txsc)
1121 		return -ENOENT;
1122 
1123 	sa_num = txsc->encoding_sa;
1124 	sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1125 
1126 	err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
1127 	if (err)
1128 		return err;
1129 
1130 	return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
1131 }
1132 
1133 static int cn10k_mdo_stop(struct macsec_context *ctx)
1134 {
1135 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1136 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1137 	struct cn10k_mcs_txsc *txsc;
1138 	int err;
1139 
1140 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1141 	if (!txsc)
1142 		return -ENOENT;
1143 
1144 	err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1145 	if (err)
1146 		return err;
1147 
1148 	return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
1149 }
1150 
1151 static int cn10k_mdo_add_secy(struct macsec_context *ctx)
1152 {
1153 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1154 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1155 	struct macsec_secy *secy = ctx->secy;
1156 	struct cn10k_mcs_txsc *txsc;
1157 
1158 	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
1159 		return -EOPNOTSUPP;
1160 
1161 	txsc = cn10k_mcs_create_txsc(pfvf);
1162 	if (IS_ERR(txsc))
1163 		return -ENOSPC;
1164 
1165 	txsc->sw_secy = secy;
1166 	txsc->encoding_sa = secy->tx_sc.encoding_sa;
1167 	txsc->last_validate_frames = secy->validate_frames;
1168 	txsc->last_replay_protect = secy->replay_protect;
1169 	txsc->vlan_dev = is_vlan_dev(ctx->netdev);
1170 
1171 	list_add(&txsc->entry, &cfg->txsc_list);
1172 
1173 	if (netif_running(secy->netdev))
1174 		return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1175 
1176 	return 0;
1177 }
1178 
1179 static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
1180 {
1181 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1182 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1183 	struct macsec_secy *secy = ctx->secy;
1184 	struct macsec_tx_sa *sw_tx_sa;
1185 	struct cn10k_mcs_txsc *txsc;
1186 	bool active;
1187 	u8 sa_num;
1188 	int err;
1189 
1190 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1191 	if (!txsc)
1192 		return -ENOENT;
1193 
1194 	/* Encoding SA got changed */
1195 	if (txsc->encoding_sa != secy->tx_sc.encoding_sa) {
1196 		txsc->encoding_sa = secy->tx_sc.encoding_sa;
1197 		sa_num = txsc->encoding_sa;
1198 		sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1199 		active = sw_tx_sa ? sw_tx_sa->active : false;
1200 		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active);
1201 	}
1202 
1203 	if (netif_running(secy->netdev)) {
1204 		cn10k_mcs_sync_stats(pfvf, secy, txsc);
1205 
1206 		err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1207 		if (err)
1208 			return err;
1209 	}
1210 
1211 	return 0;
1212 }
1213 
1214 static int cn10k_mdo_del_secy(struct macsec_context *ctx)
1215 {
1216 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1217 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1218 	struct cn10k_mcs_txsc *txsc;
1219 
1220 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1221 	if (!txsc)
1222 		return -ENOENT;
1223 
1224 	cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1225 	cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
1226 	cn10k_mcs_delete_txsc(pfvf, txsc);
1227 	list_del(&txsc->entry);
1228 	kfree(txsc);
1229 
1230 	return 0;
1231 }
1232 
1233 static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
1234 {
1235 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1236 	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1237 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1238 	struct macsec_secy *secy = ctx->secy;
1239 	u8 sa_num = ctx->sa.assoc_num;
1240 	struct cn10k_mcs_txsc *txsc;
1241 	int err;
1242 
1243 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1244 	if (!txsc)
1245 		return -ENOENT;
1246 
1247 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1248 		return -EOPNOTSUPP;
1249 
1250 	if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
1251 		return -ENOSPC;
1252 
1253 	memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
1254 	memcpy(&txsc->salt[sa_num], sw_tx_sa->key.salt.bytes, MACSEC_SALT_LEN);
1255 	txsc->ssci[sa_num] = sw_tx_sa->ssci;
1256 
1257 	txsc->sa_bmap |= 1 << sa_num;
1258 
1259 	if (netif_running(secy->netdev)) {
1260 		err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
1261 		if (err)
1262 			return err;
1263 
1264 		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1265 					   sw_tx_sa->next_pn);
1266 		if (err)
1267 			return err;
1268 
1269 		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1270 					      sa_num, sw_tx_sa->active);
1271 		if (err)
1272 			return err;
1273 	}
1274 
1275 	return 0;
1276 }
1277 
1278 static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
1279 {
1280 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1281 	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1282 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1283 	struct macsec_secy *secy = ctx->secy;
1284 	u8 sa_num = ctx->sa.assoc_num;
1285 	struct cn10k_mcs_txsc *txsc;
1286 	int err;
1287 
1288 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1289 	if (!txsc)
1290 		return -ENOENT;
1291 
1292 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1293 		return -EOPNOTSUPP;
1294 
1295 	if (netif_running(secy->netdev)) {
1296 		/* Keys cannot be changed after creation */
1297 		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1298 					   sw_tx_sa->next_pn);
1299 		if (err)
1300 			return err;
1301 
1302 		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1303 					      sa_num, sw_tx_sa->active);
1304 		if (err)
1305 			return err;
1306 	}
1307 
1308 	return 0;
1309 }
1310 
1311 static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
1312 {
1313 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1314 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1315 	u8 sa_num = ctx->sa.assoc_num;
1316 	struct cn10k_mcs_txsc *txsc;
1317 
1318 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1319 	if (!txsc)
1320 		return -ENOENT;
1321 
1322 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1323 		return -EOPNOTSUPP;
1324 
1325 	cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
1326 	txsc->sa_bmap &= ~(1 << sa_num);
1327 
1328 	return 0;
1329 }
1330 
1331 static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
1332 {
1333 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1334 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1335 	struct macsec_secy *secy = ctx->secy;
1336 	struct cn10k_mcs_rxsc *rxsc;
1337 	struct cn10k_mcs_txsc *txsc;
1338 	int err;
1339 
1340 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1341 	if (!txsc)
1342 		return -ENOENT;
1343 
1344 	rxsc = cn10k_mcs_create_rxsc(pfvf);
1345 	if (IS_ERR(rxsc))
1346 		return -ENOSPC;
1347 
1348 	rxsc->sw_secy = ctx->secy;
1349 	rxsc->sw_rxsc = ctx->rx_sc;
1350 	list_add(&rxsc->entry, &cfg->rxsc_list);
1351 
1352 	if (netif_running(secy->netdev)) {
1353 		err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
1354 		if (err)
1355 			return err;
1356 
1357 		err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
1358 		if (err)
1359 			return err;
1360 	}
1361 
1362 	return 0;
1363 }
1364 
1365 static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
1366 {
1367 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1368 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1369 	struct macsec_secy *secy = ctx->secy;
1370 	bool enable = ctx->rx_sc->active;
1371 	struct cn10k_mcs_rxsc *rxsc;
1372 
1373 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1374 	if (!rxsc)
1375 		return -ENOENT;
1376 
1377 	if (netif_running(secy->netdev))
1378 		return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
1379 						enable, MCS_RX);
1380 
1381 	return 0;
1382 }
1383 
1384 static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
1385 {
1386 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1387 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1388 	struct cn10k_mcs_rxsc *rxsc;
1389 
1390 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
1391 	if (!rxsc)
1392 		return -ENOENT;
1393 
1394 	cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
1395 	cn10k_mcs_delete_rxsc(pfvf, rxsc);
1396 	list_del(&rxsc->entry);
1397 	kfree(rxsc);
1398 
1399 	return 0;
1400 }
1401 
1402 static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
1403 {
1404 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1405 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1406 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1407 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1408 	struct macsec_secy *secy = ctx->secy;
1409 	bool sa_in_use = rx_sa->active;
1410 	u8 sa_num = ctx->sa.assoc_num;
1411 	struct cn10k_mcs_rxsc *rxsc;
1412 	int err;
1413 
1414 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1415 	if (!rxsc)
1416 		return -ENOENT;
1417 
1418 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1419 		return -EOPNOTSUPP;
1420 
1421 	if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
1422 		return -ENOSPC;
1423 
1424 	memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
1425 	memcpy(&rxsc->salt[sa_num], rx_sa->key.salt.bytes, MACSEC_SALT_LEN);
1426 	rxsc->ssci[sa_num] = rx_sa->ssci;
1427 
1428 	rxsc->sa_bmap |= 1 << sa_num;
1429 
1430 	if (netif_running(secy->netdev)) {
1431 		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
1432 						 sa_num, sa_in_use);
1433 		if (err)
1434 			return err;
1435 
1436 		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
1437 					       rx_sa->next_pn);
1438 		if (err)
1439 			return err;
1440 	}
1441 
1442 	return 0;
1443 }
1444 
1445 static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
1446 {
1447 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1448 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1449 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1450 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1451 	struct macsec_secy *secy = ctx->secy;
1452 	bool sa_in_use = rx_sa->active;
1453 	u8 sa_num = ctx->sa.assoc_num;
1454 	struct cn10k_mcs_rxsc *rxsc;
1455 	int err;
1456 
1457 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1458 	if (!rxsc)
1459 		return -ENOENT;
1460 
1461 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1462 		return -EOPNOTSUPP;
1463 
1464 	if (netif_running(secy->netdev)) {
1465 		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
1466 		if (err)
1467 			return err;
1468 
1469 		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
1470 					       rx_sa->next_pn);
1471 		if (err)
1472 			return err;
1473 	}
1474 
1475 	return 0;
1476 }
1477 
1478 static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
1479 {
1480 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1481 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1482 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1483 	u8 sa_num = ctx->sa.assoc_num;
1484 	struct cn10k_mcs_rxsc *rxsc;
1485 
1486 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1487 	if (!rxsc)
1488 		return -ENOENT;
1489 
1490 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1491 		return -EOPNOTSUPP;
1492 
1493 	cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
1494 	cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
1495 
1496 	rxsc->sa_bmap &= ~(1 << sa_num);
1497 
1498 	return 0;
1499 }
1500 
1501 static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
1502 {
1503 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1504 	struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
1505 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1506 	struct macsec_secy *secy = ctx->secy;
1507 	struct cn10k_mcs_txsc *txsc;
1508 
1509 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1510 	if (!txsc)
1511 		return -ENOENT;
1512 
1513 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
1514 	ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
1515 	ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
1516 
1517 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1518 	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1519 	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1520 	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1521 	if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
1522 		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1523 	else
1524 		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1525 	txsc->stats.InPktsOverrun = 0;
1526 
1527 	ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
1528 	ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
1529 	ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
1530 	ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
1531 	ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
1532 	ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
1533 
1534 	return 0;
1535 }
1536 
1537 static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
1538 {
1539 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1540 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1541 	struct mcs_sc_stats rsp = { 0 };
1542 	struct cn10k_mcs_txsc *txsc;
1543 
1544 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1545 	if (!txsc)
1546 		return -ENOENT;
1547 
1548 	cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
1549 
1550 	ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1551 	ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1552 	ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
1553 	ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
1554 
1555 	return 0;
1556 }
1557 
1558 static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
1559 {
1560 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1561 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1562 	struct mcs_sa_stats rsp = { 0 };
1563 	u8 sa_num = ctx->sa.assoc_num;
1564 	struct cn10k_mcs_txsc *txsc;
1565 
1566 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1567 	if (!txsc)
1568 		return -ENOENT;
1569 
1570 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1571 		return -EOPNOTSUPP;
1572 
1573 	cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
1574 
1575 	ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1576 	ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1577 
1578 	return 0;
1579 }
1580 
1581 static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
1582 {
1583 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1584 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1585 	struct macsec_secy *secy = ctx->secy;
1586 	struct mcs_sc_stats rsp = { 0 };
1587 	struct cn10k_mcs_rxsc *rxsc;
1588 
1589 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1590 	if (!rxsc)
1591 		return -ENOENT;
1592 
1593 	cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
1594 
1595 	rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
1596 	rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
1597 
1598 	rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
1599 	rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
1600 
1601 	if (secy->replay_protect)
1602 		rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
1603 	else
1604 		rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
1605 
1606 	if (secy->validate_frames == MACSEC_VALIDATE_DISABLED)
1607 		rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
1608 	else
1609 		rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
1610 
1611 	ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
1612 	ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
1613 	ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
1614 	ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
1615 	ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
1616 	ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
1617 	ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
1618 	ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
1619 
1620 	return 0;
1621 }
1622 
1623 static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
1624 {
1625 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1626 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1627 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1628 	struct mcs_sa_stats rsp = { 0 };
1629 	u8 sa_num = ctx->sa.assoc_num;
1630 	struct cn10k_mcs_rxsc *rxsc;
1631 
1632 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1633 	if (!rxsc)
1634 		return -ENOENT;
1635 
1636 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1637 		return -EOPNOTSUPP;
1638 
1639 	cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
1640 
1641 	ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
1642 	ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
1643 	ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
1644 	ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
1645 	ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
1646 
1647 	return 0;
1648 }
1649 
1650 static const struct macsec_ops cn10k_mcs_ops = {
1651 	.mdo_dev_open = cn10k_mdo_open,
1652 	.mdo_dev_stop = cn10k_mdo_stop,
1653 	.mdo_add_secy = cn10k_mdo_add_secy,
1654 	.mdo_upd_secy = cn10k_mdo_upd_secy,
1655 	.mdo_del_secy = cn10k_mdo_del_secy,
1656 	.mdo_add_rxsc = cn10k_mdo_add_rxsc,
1657 	.mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
1658 	.mdo_del_rxsc = cn10k_mdo_del_rxsc,
1659 	.mdo_add_rxsa = cn10k_mdo_add_rxsa,
1660 	.mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
1661 	.mdo_del_rxsa = cn10k_mdo_del_rxsa,
1662 	.mdo_add_txsa = cn10k_mdo_add_txsa,
1663 	.mdo_upd_txsa = cn10k_mdo_upd_txsa,
1664 	.mdo_del_txsa = cn10k_mdo_del_txsa,
1665 	.mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
1666 	.mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
1667 	.mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
1668 	.mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
1669 	.mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
1670 };
1671 
1672 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
1673 {
1674 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1675 	struct macsec_tx_sa *sw_tx_sa = NULL;
1676 	struct macsec_secy *secy = NULL;
1677 	struct cn10k_mcs_txsc *txsc;
1678 	u8 an;
1679 
1680 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1681 		return;
1682 
1683 	if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
1684 		return;
1685 
1686 	/* Find the SecY to which the expired hardware SA is mapped */
1687 	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
1688 		for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
1689 			if (txsc->hw_sa_id[an] == event->sa_id) {
1690 				secy = txsc->sw_secy;
1691 				sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
1692 			}
1693 	}
1694 
1695 	if (secy && sw_tx_sa)
1696 		macsec_pn_wrapped(secy, sw_tx_sa);
1697 }
1698 
1699 int cn10k_mcs_init(struct otx2_nic *pfvf)
1700 {
1701 	struct mbox *mbox = &pfvf->mbox;
1702 	struct cn10k_mcs_cfg *cfg;
1703 	struct mcs_intr_cfg *req;
1704 
1705 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1706 		return 0;
1707 
1708 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1709 	if (!cfg)
1710 		return -ENOMEM;
1711 
1712 	INIT_LIST_HEAD(&cfg->txsc_list);
1713 	INIT_LIST_HEAD(&cfg->rxsc_list);
1714 	pfvf->macsec_cfg = cfg;
1715 
1716 	pfvf->netdev->features |= NETIF_F_HW_MACSEC;
1717 	pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
1718 
1719 	mutex_lock(&mbox->lock);
1720 
1721 	req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
1722 	if (!req)
1723 		goto fail;
1724 
1725 	req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
1726 
1727 	if (otx2_sync_mbox_msg(mbox))
1728 		goto fail;
1729 
1730 	mutex_unlock(&mbox->lock);
1731 
1732 	return 0;
1733 fail:
1734 	dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
1735 	mutex_unlock(&mbox->lock);
1736 	return 0;
1737 }
1738 
1739 void cn10k_mcs_free(struct otx2_nic *pfvf)
1740 {
1741 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1742 		return;
1743 
1744 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
1745 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
1746 	kfree(pfvf->macsec_cfg);
1747 	pfvf->macsec_cfg = NULL;
1748 }
1749