xref: /openbmc/linux/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c (revision 6f6249a599e52e1a5f0b632f8edff733cfa76450)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell MACSEC hardware offload driver
3  *
4  * Copyright (C) 2022 Marvell.
5  */
6 
7 #include <crypto/skcipher.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/bitfield.h>
10 #include "otx2_common.h"
11 
12 #define MCS_TCAM0_MAC_DA_MASK		GENMASK_ULL(47, 0)
13 #define MCS_TCAM0_MAC_SA_MASK		GENMASK_ULL(63, 48)
14 #define MCS_TCAM1_MAC_SA_MASK		GENMASK_ULL(31, 0)
15 #define MCS_TCAM1_ETYPE_MASK		GENMASK_ULL(47, 32)
16 
17 #define MCS_SA_MAP_MEM_SA_USE		BIT_ULL(9)
18 
19 #define MCS_RX_SECY_PLCY_RW_MASK	GENMASK_ULL(49, 18)
20 #define MCS_RX_SECY_PLCY_RP		BIT_ULL(17)
21 #define MCS_RX_SECY_PLCY_AUTH_ENA	BIT_ULL(16)
22 #define MCS_RX_SECY_PLCY_CIP		GENMASK_ULL(8, 5)
23 #define MCS_RX_SECY_PLCY_VAL		GENMASK_ULL(2, 1)
24 #define MCS_RX_SECY_PLCY_ENA		BIT_ULL(0)
25 
26 #define MCS_TX_SECY_PLCY_MTU		GENMASK_ULL(43, 28)
27 #define MCS_TX_SECY_PLCY_ST_TCI		GENMASK_ULL(27, 22)
28 #define MCS_TX_SECY_PLCY_ST_OFFSET	GENMASK_ULL(21, 15)
29 #define MCS_TX_SECY_PLCY_INS_MODE	BIT_ULL(14)
30 #define MCS_TX_SECY_PLCY_AUTH_ENA	BIT_ULL(13)
31 #define MCS_TX_SECY_PLCY_CIP		GENMASK_ULL(5, 2)
32 #define MCS_TX_SECY_PLCY_PROTECT	BIT_ULL(1)
33 #define MCS_TX_SECY_PLCY_ENA		BIT_ULL(0)
34 
35 #define MCS_GCM_AES_128			0
36 #define MCS_GCM_AES_256			1
37 #define MCS_GCM_AES_XPN_128		2
38 #define MCS_GCM_AES_XPN_256		3
39 
40 #define MCS_TCI_ES			0x40 /* end station */
41 #define MCS_TCI_SC			0x20 /* SCI present */
42 #define MCS_TCI_SCB			0x10 /* epon */
43 #define MCS_TCI_E			0x08 /* encryption */
44 #define MCS_TCI_C			0x04 /* changed text */
45 
46 #define CN10K_MAX_HASH_LEN		16
47 #define CN10K_MAX_SAK_LEN		32
48 
cn10k_ecb_aes_encrypt(struct otx2_nic * pfvf,u8 * sak,u16 sak_len,u8 * hash)49 static int cn10k_ecb_aes_encrypt(struct otx2_nic *pfvf, u8 *sak,
50 				 u16 sak_len, u8 *hash)
51 {
52 	u8 data[CN10K_MAX_HASH_LEN] = { 0 };
53 	struct skcipher_request *req = NULL;
54 	struct scatterlist sg_src, sg_dst;
55 	struct crypto_skcipher *tfm;
56 	DECLARE_CRYPTO_WAIT(wait);
57 	int err;
58 
59 	tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
60 	if (IS_ERR(tfm)) {
61 		dev_err(pfvf->dev, "failed to allocate transform for ecb-aes\n");
62 		return PTR_ERR(tfm);
63 	}
64 
65 	req = skcipher_request_alloc(tfm, GFP_KERNEL);
66 	if (!req) {
67 		dev_err(pfvf->dev, "failed to allocate request for skcipher\n");
68 		err = -ENOMEM;
69 		goto free_tfm;
70 	}
71 
72 	err = crypto_skcipher_setkey(tfm, sak, sak_len);
73 	if (err) {
74 		dev_err(pfvf->dev, "failed to set key for skcipher\n");
75 		goto free_req;
76 	}
77 
78 	/* build sg list */
79 	sg_init_one(&sg_src, data, CN10K_MAX_HASH_LEN);
80 	sg_init_one(&sg_dst, hash, CN10K_MAX_HASH_LEN);
81 
82 	skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
83 	skcipher_request_set_crypt(req, &sg_src, &sg_dst,
84 				   CN10K_MAX_HASH_LEN, NULL);
85 
86 	err = crypto_skcipher_encrypt(req);
87 	err = crypto_wait_req(err, &wait);
88 
89 free_req:
90 	skcipher_request_free(req);
91 free_tfm:
92 	crypto_free_skcipher(tfm);
93 	return err;
94 }
95 
cn10k_mcs_get_txsc(struct cn10k_mcs_cfg * cfg,struct macsec_secy * secy)96 static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
97 						 struct macsec_secy *secy)
98 {
99 	struct cn10k_mcs_txsc *txsc;
100 
101 	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
102 		if (txsc->sw_secy == secy)
103 			return txsc;
104 	}
105 
106 	return NULL;
107 }
108 
cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg * cfg,struct macsec_secy * secy,struct macsec_rx_sc * rx_sc)109 static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
110 						 struct macsec_secy *secy,
111 						 struct macsec_rx_sc *rx_sc)
112 {
113 	struct cn10k_mcs_rxsc *rxsc;
114 
115 	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
116 		if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
117 			return rxsc;
118 	}
119 
120 	return NULL;
121 }
122 
rsrc_name(enum mcs_rsrc_type rsrc_type)123 static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
124 {
125 	switch (rsrc_type) {
126 	case MCS_RSRC_TYPE_FLOWID:
127 		return "FLOW";
128 	case MCS_RSRC_TYPE_SC:
129 		return "SC";
130 	case MCS_RSRC_TYPE_SECY:
131 		return "SECY";
132 	case MCS_RSRC_TYPE_SA:
133 		return "SA";
134 	default:
135 		return "Unknown";
136 	};
137 
138 	return "Unknown";
139 }
140 
cn10k_mcs_alloc_rsrc(struct otx2_nic * pfvf,enum mcs_direction dir,enum mcs_rsrc_type type,u16 * rsrc_id)141 static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
142 				enum mcs_rsrc_type type, u16 *rsrc_id)
143 {
144 	struct mbox *mbox = &pfvf->mbox;
145 	struct mcs_alloc_rsrc_req *req;
146 	struct mcs_alloc_rsrc_rsp *rsp;
147 	int ret = -ENOMEM;
148 
149 	mutex_lock(&mbox->lock);
150 
151 	req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
152 	if (!req)
153 		goto fail;
154 
155 	req->rsrc_type = type;
156 	req->rsrc_cnt  = 1;
157 	req->dir = dir;
158 
159 	ret = otx2_sync_mbox_msg(mbox);
160 	if (ret)
161 		goto fail;
162 
163 	rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
164 							     0, &req->hdr);
165 	if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
166 	    req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
167 		ret = -EINVAL;
168 		goto fail;
169 	}
170 
171 	switch (rsp->rsrc_type) {
172 	case MCS_RSRC_TYPE_FLOWID:
173 		*rsrc_id = rsp->flow_ids[0];
174 		break;
175 	case MCS_RSRC_TYPE_SC:
176 		*rsrc_id = rsp->sc_ids[0];
177 		break;
178 	case MCS_RSRC_TYPE_SECY:
179 		*rsrc_id = rsp->secy_ids[0];
180 		break;
181 	case MCS_RSRC_TYPE_SA:
182 		*rsrc_id = rsp->sa_ids[0];
183 		break;
184 	default:
185 		ret = -EINVAL;
186 		goto fail;
187 	}
188 
189 	mutex_unlock(&mbox->lock);
190 
191 	return 0;
192 fail:
193 	dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
194 		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
195 	mutex_unlock(&mbox->lock);
196 	return ret;
197 }
198 
cn10k_mcs_free_rsrc(struct otx2_nic * pfvf,enum mcs_direction dir,enum mcs_rsrc_type type,u16 hw_rsrc_id,bool all)199 static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
200 				enum mcs_rsrc_type type, u16 hw_rsrc_id,
201 				bool all)
202 {
203 	struct mcs_clear_stats *clear_req;
204 	struct mbox *mbox = &pfvf->mbox;
205 	struct mcs_free_rsrc_req *req;
206 
207 	mutex_lock(&mbox->lock);
208 
209 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
210 	if (!clear_req)
211 		goto fail;
212 
213 	clear_req->id = hw_rsrc_id;
214 	clear_req->type = type;
215 	clear_req->dir = dir;
216 
217 	req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
218 	if (!req)
219 		goto fail;
220 
221 	req->rsrc_id = hw_rsrc_id;
222 	req->rsrc_type = type;
223 	req->dir = dir;
224 	if (all)
225 		req->all = 1;
226 
227 	if (otx2_sync_mbox_msg(&pfvf->mbox))
228 		goto fail;
229 
230 	mutex_unlock(&mbox->lock);
231 
232 	return;
233 fail:
234 	dev_err(pfvf->dev, "Failed to free %s %s resource\n",
235 		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
236 	mutex_unlock(&mbox->lock);
237 }
238 
cn10k_mcs_alloc_txsa(struct otx2_nic * pfvf,u16 * hw_sa_id)239 static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
240 {
241 	return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
242 }
243 
cn10k_mcs_alloc_rxsa(struct otx2_nic * pfvf,u16 * hw_sa_id)244 static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
245 {
246 	return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
247 }
248 
cn10k_mcs_free_txsa(struct otx2_nic * pfvf,u16 hw_sa_id)249 static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
250 {
251 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
252 }
253 
cn10k_mcs_free_rxsa(struct otx2_nic * pfvf,u16 hw_sa_id)254 static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
255 {
256 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
257 }
258 
cn10k_mcs_write_rx_secy(struct otx2_nic * pfvf,struct macsec_secy * secy,u8 hw_secy_id)259 static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
260 				   struct macsec_secy *secy, u8 hw_secy_id)
261 {
262 	struct mcs_secy_plcy_write_req *req;
263 	struct mbox *mbox = &pfvf->mbox;
264 	u64 policy;
265 	u8 cipher;
266 	int ret;
267 
268 	mutex_lock(&mbox->lock);
269 
270 	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
271 	if (!req) {
272 		ret = -ENOMEM;
273 		goto fail;
274 	}
275 
276 	policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
277 	if (secy->replay_protect)
278 		policy |= MCS_RX_SECY_PLCY_RP;
279 
280 	policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
281 
282 	switch (secy->key_len) {
283 	case 16:
284 		cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128;
285 		break;
286 	case 32:
287 		cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256;
288 		break;
289 	default:
290 		cipher = MCS_GCM_AES_128;
291 		dev_warn(pfvf->dev, "Unsupported key length\n");
292 		break;
293 	}
294 
295 	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, cipher);
296 	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
297 
298 	policy |= MCS_RX_SECY_PLCY_ENA;
299 
300 	req->plcy = policy;
301 	req->secy_id = hw_secy_id;
302 	req->dir = MCS_RX;
303 
304 	ret = otx2_sync_mbox_msg(mbox);
305 
306 fail:
307 	mutex_unlock(&mbox->lock);
308 	return ret;
309 }
310 
cn10k_mcs_write_rx_flowid(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 hw_secy_id)311 static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
312 				     struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
313 {
314 	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
315 	struct macsec_secy *secy = rxsc->sw_secy;
316 	struct mcs_flowid_entry_write_req *req;
317 	struct mbox *mbox = &pfvf->mbox;
318 	u64 mac_da;
319 	int ret;
320 
321 	mutex_lock(&mbox->lock);
322 
323 	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
324 	if (!req) {
325 		ret = -ENOMEM;
326 		goto fail;
327 	}
328 
329 	mac_da = ether_addr_to_u64(secy->netdev->dev_addr);
330 
331 	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
332 	req->mask[0] = ~0ULL;
333 	req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
334 
335 	req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
336 	req->mask[1] = ~0ULL;
337 	req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
338 
339 	req->mask[2] = ~0ULL;
340 	req->mask[3] = ~0ULL;
341 
342 	req->flow_id = rxsc->hw_flow_id;
343 	req->secy_id = hw_secy_id;
344 	req->sc_id = rxsc->hw_sc_id;
345 	req->dir = MCS_RX;
346 
347 	if (sw_rx_sc->active)
348 		req->ena = 1;
349 
350 	ret = otx2_sync_mbox_msg(mbox);
351 
352 fail:
353 	mutex_unlock(&mbox->lock);
354 	return ret;
355 }
356 
cn10k_mcs_write_sc_cam(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 hw_secy_id)357 static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
358 				  struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
359 {
360 	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
361 	struct mcs_rx_sc_cam_write_req *sc_req;
362 	struct mbox *mbox = &pfvf->mbox;
363 	int ret;
364 
365 	mutex_lock(&mbox->lock);
366 
367 	sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
368 	if (!sc_req) {
369 		ret = -ENOMEM;
370 		goto fail;
371 	}
372 
373 	sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
374 	sc_req->sc_id = rxsc->hw_sc_id;
375 	sc_req->secy_id = hw_secy_id;
376 
377 	ret = otx2_sync_mbox_msg(mbox);
378 
379 fail:
380 	mutex_unlock(&mbox->lock);
381 	return ret;
382 }
383 
cn10k_mcs_write_keys(struct otx2_nic * pfvf,struct macsec_secy * secy,struct mcs_sa_plcy_write_req * req,u8 * sak,u8 * salt,ssci_t ssci)384 static int cn10k_mcs_write_keys(struct otx2_nic *pfvf,
385 				struct macsec_secy *secy,
386 				struct mcs_sa_plcy_write_req *req,
387 				u8 *sak, u8 *salt, ssci_t ssci)
388 {
389 	u8 hash_rev[CN10K_MAX_HASH_LEN];
390 	u8 sak_rev[CN10K_MAX_SAK_LEN];
391 	u8 salt_rev[MACSEC_SALT_LEN];
392 	u8 hash[CN10K_MAX_HASH_LEN];
393 	u32 ssci_63_32;
394 	int err, i;
395 
396 	err = cn10k_ecb_aes_encrypt(pfvf, sak, secy->key_len, hash);
397 	if (err) {
398 		dev_err(pfvf->dev, "Generating hash using ECB(AES) failed\n");
399 		return err;
400 	}
401 
402 	for (i = 0; i < secy->key_len; i++)
403 		sak_rev[i] = sak[secy->key_len - 1 - i];
404 
405 	for (i = 0; i < CN10K_MAX_HASH_LEN; i++)
406 		hash_rev[i] = hash[CN10K_MAX_HASH_LEN - 1 - i];
407 
408 	for (i = 0; i < MACSEC_SALT_LEN; i++)
409 		salt_rev[i] = salt[MACSEC_SALT_LEN - 1 - i];
410 
411 	ssci_63_32 = (__force u32)cpu_to_be32((__force u32)ssci);
412 
413 	memcpy(&req->plcy[0][0], sak_rev, secy->key_len);
414 	memcpy(&req->plcy[0][4], hash_rev, CN10K_MAX_HASH_LEN);
415 	memcpy(&req->plcy[0][6], salt_rev, MACSEC_SALT_LEN);
416 	req->plcy[0][7] |= (u64)ssci_63_32 << 32;
417 
418 	return 0;
419 }
420 
cn10k_mcs_write_rx_sa_plcy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_rxsc * rxsc,u8 assoc_num,bool sa_in_use)421 static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
422 				      struct macsec_secy *secy,
423 				      struct cn10k_mcs_rxsc *rxsc,
424 				      u8 assoc_num, bool sa_in_use)
425 {
426 	struct mcs_sa_plcy_write_req *plcy_req;
427 	u8 *sak = rxsc->sa_key[assoc_num];
428 	u8 *salt = rxsc->salt[assoc_num];
429 	struct mcs_rx_sc_sa_map *map_req;
430 	struct mbox *mbox = &pfvf->mbox;
431 	int ret;
432 
433 	mutex_lock(&mbox->lock);
434 
435 	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
436 	if (!plcy_req) {
437 		ret = -ENOMEM;
438 		goto fail;
439 	}
440 
441 	map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
442 	if (!map_req) {
443 		otx2_mbox_reset(&mbox->mbox, 0);
444 		ret = -ENOMEM;
445 		goto fail;
446 	}
447 
448 	ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak,
449 				   salt, rxsc->ssci[assoc_num]);
450 	if (ret)
451 		goto fail;
452 
453 	plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
454 	plcy_req->sa_cnt = 1;
455 	plcy_req->dir = MCS_RX;
456 
457 	map_req->sa_index = rxsc->hw_sa_id[assoc_num];
458 	map_req->sa_in_use = sa_in_use;
459 	map_req->sc_id = rxsc->hw_sc_id;
460 	map_req->an = assoc_num;
461 
462 	/* Send two messages together */
463 	ret = otx2_sync_mbox_msg(mbox);
464 
465 fail:
466 	mutex_unlock(&mbox->lock);
467 	return ret;
468 }
469 
cn10k_mcs_write_rx_sa_pn(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc,u8 assoc_num,u64 next_pn)470 static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
471 				    struct cn10k_mcs_rxsc *rxsc,
472 				    u8 assoc_num, u64 next_pn)
473 {
474 	struct mcs_pn_table_write_req *req;
475 	struct mbox *mbox = &pfvf->mbox;
476 	int ret;
477 
478 	mutex_lock(&mbox->lock);
479 
480 	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
481 	if (!req) {
482 		ret = -ENOMEM;
483 		goto fail;
484 	}
485 
486 	req->pn_id = rxsc->hw_sa_id[assoc_num];
487 	req->next_pn = next_pn;
488 	req->dir = MCS_RX;
489 
490 	ret = otx2_sync_mbox_msg(mbox);
491 
492 fail:
493 	mutex_unlock(&mbox->lock);
494 	return ret;
495 }
496 
cn10k_mcs_write_tx_secy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)497 static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
498 				   struct macsec_secy *secy,
499 				   struct cn10k_mcs_txsc *txsc)
500 {
501 	struct mcs_secy_plcy_write_req *req;
502 	struct mbox *mbox = &pfvf->mbox;
503 	struct macsec_tx_sc *sw_tx_sc;
504 	u8 sectag_tci = 0;
505 	u8 tag_offset;
506 	u64 policy;
507 	u8 cipher;
508 	int ret;
509 
510 	/* Insert SecTag after 12 bytes (DA+SA) or 16 bytes
511 	 * if VLAN tag needs to be sent in clear text.
512 	 */
513 	tag_offset = txsc->vlan_dev ? 16 : 12;
514 	sw_tx_sc = &secy->tx_sc;
515 
516 	mutex_lock(&mbox->lock);
517 
518 	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
519 	if (!req) {
520 		ret = -ENOMEM;
521 		goto fail;
522 	}
523 
524 	if (sw_tx_sc->send_sci) {
525 		sectag_tci |= MCS_TCI_SC;
526 	} else {
527 		if (sw_tx_sc->end_station)
528 			sectag_tci |= MCS_TCI_ES;
529 		if (sw_tx_sc->scb)
530 			sectag_tci |= MCS_TCI_SCB;
531 	}
532 
533 	if (sw_tx_sc->encrypt)
534 		sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
535 
536 	policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU,
537 			    pfvf->netdev->mtu + OTX2_ETH_HLEN);
538 	/* Write SecTag excluding AN bits(1..0) */
539 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
540 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
541 	policy |= MCS_TX_SECY_PLCY_INS_MODE;
542 	policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
543 
544 	switch (secy->key_len) {
545 	case 16:
546 		cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128;
547 		break;
548 	case 32:
549 		cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256;
550 		break;
551 	default:
552 		cipher = MCS_GCM_AES_128;
553 		dev_warn(pfvf->dev, "Unsupported key length\n");
554 		break;
555 	}
556 
557 	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, cipher);
558 
559 	if (secy->protect_frames)
560 		policy |= MCS_TX_SECY_PLCY_PROTECT;
561 
562 	/* If the encodingsa does not exist/active and protect is
563 	 * not set then frames can be sent out as it is. Hence enable
564 	 * the policy irrespective of secy operational when !protect.
565 	 */
566 	if (!secy->protect_frames || secy->operational)
567 		policy |= MCS_TX_SECY_PLCY_ENA;
568 
569 	req->plcy = policy;
570 	req->secy_id = txsc->hw_secy_id_tx;
571 	req->dir = MCS_TX;
572 
573 	ret = otx2_sync_mbox_msg(mbox);
574 
575 fail:
576 	mutex_unlock(&mbox->lock);
577 	return ret;
578 }
579 
cn10k_mcs_write_tx_flowid(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)580 static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
581 				     struct macsec_secy *secy,
582 				     struct cn10k_mcs_txsc *txsc)
583 {
584 	struct mcs_flowid_entry_write_req *req;
585 	struct mbox *mbox = &pfvf->mbox;
586 	u64 mac_sa;
587 	int ret;
588 
589 	mutex_lock(&mbox->lock);
590 
591 	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
592 	if (!req) {
593 		ret = -ENOMEM;
594 		goto fail;
595 	}
596 
597 	mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
598 
599 	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
600 	req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
601 
602 	req->mask[0] = ~0ULL;
603 	req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
604 
605 	req->mask[1] = ~0ULL;
606 	req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
607 
608 	req->mask[2] = ~0ULL;
609 	req->mask[3] = ~0ULL;
610 
611 	req->flow_id = txsc->hw_flow_id;
612 	req->secy_id = txsc->hw_secy_id_tx;
613 	req->sc_id = txsc->hw_sc_id;
614 	req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
615 	req->dir = MCS_TX;
616 	/* This can be enabled since stack xmits packets only when interface is up */
617 	req->ena = 1;
618 
619 	ret = otx2_sync_mbox_msg(mbox);
620 
621 fail:
622 	mutex_unlock(&mbox->lock);
623 	return ret;
624 }
625 
cn10k_mcs_link_tx_sa2sc(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,u8 sa_num,bool sa_active)626 static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
627 				   struct macsec_secy *secy,
628 				   struct cn10k_mcs_txsc *txsc,
629 				   u8 sa_num, bool sa_active)
630 {
631 	struct mcs_tx_sc_sa_map *map_req;
632 	struct mbox *mbox = &pfvf->mbox;
633 	int ret;
634 
635 	/* Link the encoding_sa only to SC out of all SAs */
636 	if (txsc->encoding_sa != sa_num)
637 		return 0;
638 
639 	mutex_lock(&mbox->lock);
640 
641 	map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
642 	if (!map_req) {
643 		otx2_mbox_reset(&mbox->mbox, 0);
644 		ret = -ENOMEM;
645 		goto fail;
646 	}
647 
648 	map_req->sa_index0 = txsc->hw_sa_id[sa_num];
649 	map_req->sa_index0_vld = sa_active;
650 	map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
651 	map_req->sc_id = txsc->hw_sc_id;
652 
653 	ret = otx2_sync_mbox_msg(mbox);
654 
655 fail:
656 	mutex_unlock(&mbox->lock);
657 	return ret;
658 }
659 
cn10k_mcs_write_tx_sa_plcy(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,u8 assoc_num)660 static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
661 				      struct macsec_secy *secy,
662 				      struct cn10k_mcs_txsc *txsc,
663 				      u8 assoc_num)
664 {
665 	struct mcs_sa_plcy_write_req *plcy_req;
666 	u8 *sak = txsc->sa_key[assoc_num];
667 	u8 *salt = txsc->salt[assoc_num];
668 	struct mbox *mbox = &pfvf->mbox;
669 	int ret;
670 
671 	mutex_lock(&mbox->lock);
672 
673 	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
674 	if (!plcy_req) {
675 		ret = -ENOMEM;
676 		goto fail;
677 	}
678 
679 	ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak,
680 				   salt, txsc->ssci[assoc_num]);
681 	if (ret)
682 		goto fail;
683 
684 	plcy_req->plcy[0][8] = assoc_num;
685 	plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
686 	plcy_req->sa_cnt = 1;
687 	plcy_req->dir = MCS_TX;
688 
689 	ret = otx2_sync_mbox_msg(mbox);
690 
691 fail:
692 	mutex_unlock(&mbox->lock);
693 	return ret;
694 }
695 
cn10k_write_tx_sa_pn(struct otx2_nic * pfvf,struct cn10k_mcs_txsc * txsc,u8 assoc_num,u64 next_pn)696 static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
697 				struct cn10k_mcs_txsc *txsc,
698 				u8 assoc_num, u64 next_pn)
699 {
700 	struct mcs_pn_table_write_req *req;
701 	struct mbox *mbox = &pfvf->mbox;
702 	int ret;
703 
704 	mutex_lock(&mbox->lock);
705 
706 	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
707 	if (!req) {
708 		ret = -ENOMEM;
709 		goto fail;
710 	}
711 
712 	req->pn_id = txsc->hw_sa_id[assoc_num];
713 	req->next_pn = next_pn;
714 	req->dir = MCS_TX;
715 
716 	ret = otx2_sync_mbox_msg(mbox);
717 
718 fail:
719 	mutex_unlock(&mbox->lock);
720 	return ret;
721 }
722 
cn10k_mcs_ena_dis_flowid(struct otx2_nic * pfvf,u16 hw_flow_id,bool enable,enum mcs_direction dir)723 static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
724 				    bool enable, enum mcs_direction dir)
725 {
726 	struct mcs_flowid_ena_dis_entry *req;
727 	struct mbox *mbox = &pfvf->mbox;
728 	int ret;
729 
730 	mutex_lock(&mbox->lock);
731 
732 	req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
733 	if (!req) {
734 		ret = -ENOMEM;
735 		goto fail;
736 	}
737 
738 	req->flow_id = hw_flow_id;
739 	req->ena = enable;
740 	req->dir = dir;
741 
742 	ret = otx2_sync_mbox_msg(mbox);
743 
744 fail:
745 	mutex_unlock(&mbox->lock);
746 	return ret;
747 }
748 
cn10k_mcs_sa_stats(struct otx2_nic * pfvf,u8 hw_sa_id,struct mcs_sa_stats * rsp_p,enum mcs_direction dir,bool clear)749 static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
750 			      struct mcs_sa_stats *rsp_p,
751 			      enum mcs_direction dir, bool clear)
752 {
753 	struct mcs_clear_stats *clear_req;
754 	struct mbox *mbox = &pfvf->mbox;
755 	struct mcs_stats_req *req;
756 	struct mcs_sa_stats *rsp;
757 	int ret;
758 
759 	mutex_lock(&mbox->lock);
760 
761 	req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
762 	if (!req) {
763 		ret = -ENOMEM;
764 		goto fail;
765 	}
766 
767 	req->id = hw_sa_id;
768 	req->dir = dir;
769 
770 	if (!clear)
771 		goto send_msg;
772 
773 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
774 	if (!clear_req) {
775 		ret = -ENOMEM;
776 		goto fail;
777 	}
778 	clear_req->id = hw_sa_id;
779 	clear_req->dir = dir;
780 	clear_req->type = MCS_RSRC_TYPE_SA;
781 
782 send_msg:
783 	ret = otx2_sync_mbox_msg(mbox);
784 	if (ret)
785 		goto fail;
786 
787 	rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
788 						       0, &req->hdr);
789 	if (IS_ERR(rsp)) {
790 		ret = PTR_ERR(rsp);
791 		goto fail;
792 	}
793 
794 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
795 
796 	mutex_unlock(&mbox->lock);
797 
798 	return 0;
799 fail:
800 	mutex_unlock(&mbox->lock);
801 	return ret;
802 }
803 
cn10k_mcs_sc_stats(struct otx2_nic * pfvf,u8 hw_sc_id,struct mcs_sc_stats * rsp_p,enum mcs_direction dir,bool clear)804 static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
805 			      struct mcs_sc_stats *rsp_p,
806 			      enum mcs_direction dir, bool clear)
807 {
808 	struct mcs_clear_stats *clear_req;
809 	struct mbox *mbox = &pfvf->mbox;
810 	struct mcs_stats_req *req;
811 	struct mcs_sc_stats *rsp;
812 	int ret;
813 
814 	mutex_lock(&mbox->lock);
815 
816 	req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
817 	if (!req) {
818 		ret = -ENOMEM;
819 		goto fail;
820 	}
821 
822 	req->id = hw_sc_id;
823 	req->dir = dir;
824 
825 	if (!clear)
826 		goto send_msg;
827 
828 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
829 	if (!clear_req) {
830 		ret = -ENOMEM;
831 		goto fail;
832 	}
833 	clear_req->id = hw_sc_id;
834 	clear_req->dir = dir;
835 	clear_req->type = MCS_RSRC_TYPE_SC;
836 
837 send_msg:
838 	ret = otx2_sync_mbox_msg(mbox);
839 	if (ret)
840 		goto fail;
841 
842 	rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
843 						       0, &req->hdr);
844 	if (IS_ERR(rsp)) {
845 		ret = PTR_ERR(rsp);
846 		goto fail;
847 	}
848 
849 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
850 
851 	mutex_unlock(&mbox->lock);
852 
853 	return 0;
854 fail:
855 	mutex_unlock(&mbox->lock);
856 	return ret;
857 }
858 
cn10k_mcs_secy_stats(struct otx2_nic * pfvf,u8 hw_secy_id,struct mcs_secy_stats * rsp_p,enum mcs_direction dir,bool clear)859 static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
860 				struct mcs_secy_stats *rsp_p,
861 				enum mcs_direction dir, bool clear)
862 {
863 	struct mcs_clear_stats *clear_req;
864 	struct mbox *mbox = &pfvf->mbox;
865 	struct mcs_secy_stats *rsp;
866 	struct mcs_stats_req *req;
867 	int ret;
868 
869 	mutex_lock(&mbox->lock);
870 
871 	req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
872 	if (!req) {
873 		ret = -ENOMEM;
874 		goto fail;
875 	}
876 
877 	req->id = hw_secy_id;
878 	req->dir = dir;
879 
880 	if (!clear)
881 		goto send_msg;
882 
883 	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
884 	if (!clear_req) {
885 		ret = -ENOMEM;
886 		goto fail;
887 	}
888 	clear_req->id = hw_secy_id;
889 	clear_req->dir = dir;
890 	clear_req->type = MCS_RSRC_TYPE_SECY;
891 
892 send_msg:
893 	ret = otx2_sync_mbox_msg(mbox);
894 	if (ret)
895 		goto fail;
896 
897 	rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
898 							 0, &req->hdr);
899 	if (IS_ERR(rsp)) {
900 		ret = PTR_ERR(rsp);
901 		goto fail;
902 	}
903 
904 	memcpy(rsp_p, rsp, sizeof(*rsp_p));
905 
906 	mutex_unlock(&mbox->lock);
907 
908 	return 0;
909 fail:
910 	mutex_unlock(&mbox->lock);
911 	return ret;
912 }
913 
cn10k_mcs_create_txsc(struct otx2_nic * pfvf)914 static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
915 {
916 	struct cn10k_mcs_txsc *txsc;
917 	int ret;
918 
919 	txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
920 	if (!txsc)
921 		return ERR_PTR(-ENOMEM);
922 
923 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
924 				   &txsc->hw_flow_id);
925 	if (ret)
926 		goto fail;
927 
928 	/* For a SecY, one TX secy and one RX secy HW resources are needed */
929 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
930 				   &txsc->hw_secy_id_tx);
931 	if (ret)
932 		goto free_flowid;
933 
934 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
935 				   &txsc->hw_secy_id_rx);
936 	if (ret)
937 		goto free_tx_secy;
938 
939 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
940 				   &txsc->hw_sc_id);
941 	if (ret)
942 		goto free_rx_secy;
943 
944 	return txsc;
945 free_rx_secy:
946 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
947 			    txsc->hw_secy_id_rx, false);
948 free_tx_secy:
949 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
950 			    txsc->hw_secy_id_tx, false);
951 free_flowid:
952 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
953 			    txsc->hw_flow_id, false);
954 fail:
955 	kfree(txsc);
956 	return ERR_PTR(ret);
957 }
958 
959 /* Free Tx SC and its SAs(if any) resources to AF
960  */
cn10k_mcs_delete_txsc(struct otx2_nic * pfvf,struct cn10k_mcs_txsc * txsc)961 static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
962 				  struct cn10k_mcs_txsc *txsc)
963 {
964 	u8 sa_bmap = txsc->sa_bmap;
965 	u8 sa_num = 0;
966 
967 	while (sa_bmap) {
968 		if (sa_bmap & 1) {
969 			cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
970 						   txsc, sa_num);
971 			cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
972 		}
973 		sa_num++;
974 		sa_bmap >>= 1;
975 	}
976 
977 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
978 			    txsc->hw_sc_id, false);
979 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
980 			    txsc->hw_secy_id_rx, false);
981 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
982 			    txsc->hw_secy_id_tx, false);
983 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
984 			    txsc->hw_flow_id, false);
985 }
986 
cn10k_mcs_create_rxsc(struct otx2_nic * pfvf)987 static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
988 {
989 	struct cn10k_mcs_rxsc *rxsc;
990 	int ret;
991 
992 	rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
993 	if (!rxsc)
994 		return ERR_PTR(-ENOMEM);
995 
996 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
997 				   &rxsc->hw_flow_id);
998 	if (ret)
999 		goto fail;
1000 
1001 	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
1002 				   &rxsc->hw_sc_id);
1003 	if (ret)
1004 		goto free_flowid;
1005 
1006 	return rxsc;
1007 free_flowid:
1008 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
1009 			    rxsc->hw_flow_id, false);
1010 fail:
1011 	kfree(rxsc);
1012 	return ERR_PTR(ret);
1013 }
1014 
1015 /* Free Rx SC and its SAs(if any) resources to AF
1016  */
cn10k_mcs_delete_rxsc(struct otx2_nic * pfvf,struct cn10k_mcs_rxsc * rxsc)1017 static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
1018 				  struct cn10k_mcs_rxsc *rxsc)
1019 {
1020 	u8 sa_bmap = rxsc->sa_bmap;
1021 	u8 sa_num = 0;
1022 
1023 	while (sa_bmap) {
1024 		if (sa_bmap & 1) {
1025 			cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
1026 						   sa_num, false);
1027 			cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
1028 		}
1029 		sa_num++;
1030 		sa_bmap >>= 1;
1031 	}
1032 
1033 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
1034 			    rxsc->hw_sc_id, false);
1035 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
1036 			    rxsc->hw_flow_id, false);
1037 }
1038 
cn10k_mcs_secy_tx_cfg(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc,struct macsec_tx_sa * sw_tx_sa,u8 sa_num)1039 static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
1040 				 struct cn10k_mcs_txsc *txsc,
1041 				 struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
1042 {
1043 	if (sw_tx_sa) {
1044 		cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
1045 		cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, sw_tx_sa->next_pn);
1046 		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
1047 					sw_tx_sa->active);
1048 	}
1049 
1050 	cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
1051 	cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
1052 	/* When updating secy, change RX secy also */
1053 	cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
1054 
1055 	return 0;
1056 }
1057 
cn10k_mcs_secy_rx_cfg(struct otx2_nic * pfvf,struct macsec_secy * secy,u8 hw_secy_id)1058 static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
1059 				 struct macsec_secy *secy, u8 hw_secy_id)
1060 {
1061 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1062 	struct cn10k_mcs_rxsc *mcs_rx_sc;
1063 	struct macsec_rx_sc *sw_rx_sc;
1064 	struct macsec_rx_sa *sw_rx_sa;
1065 	u8 sa_num;
1066 
1067 	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
1068 	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
1069 		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1070 		if (unlikely(!mcs_rx_sc))
1071 			continue;
1072 
1073 		for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
1074 			sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
1075 			if (!sw_rx_sa)
1076 				continue;
1077 
1078 			cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
1079 						   sa_num, sw_rx_sa->active);
1080 			cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
1081 						 sw_rx_sa->next_pn);
1082 		}
1083 
1084 		cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
1085 		cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
1086 	}
1087 
1088 	return 0;
1089 }
1090 
cn10k_mcs_disable_rxscs(struct otx2_nic * pfvf,struct macsec_secy * secy,bool delete)1091 static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
1092 				   struct macsec_secy *secy,
1093 				   bool delete)
1094 {
1095 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1096 	struct cn10k_mcs_rxsc *mcs_rx_sc;
1097 	struct macsec_rx_sc *sw_rx_sc;
1098 	int ret;
1099 
1100 	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
1101 	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
1102 		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1103 		if (unlikely(!mcs_rx_sc))
1104 			continue;
1105 
1106 		ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
1107 					       false, MCS_RX);
1108 		if (ret)
1109 			dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
1110 				mcs_rx_sc->hw_sc_id);
1111 		if (delete) {
1112 			cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
1113 			list_del(&mcs_rx_sc->entry);
1114 			kfree(mcs_rx_sc);
1115 		}
1116 	}
1117 
1118 	return 0;
1119 }
1120 
cn10k_mcs_sync_stats(struct otx2_nic * pfvf,struct macsec_secy * secy,struct cn10k_mcs_txsc * txsc)1121 static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
1122 				 struct cn10k_mcs_txsc *txsc)
1123 {
1124 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1125 	struct mcs_secy_stats rx_rsp = { 0 };
1126 	struct mcs_sc_stats sc_rsp = { 0 };
1127 	struct cn10k_mcs_rxsc *rxsc;
1128 
1129 	/* Because of shared counters for some stats in the hardware, when
1130 	 * updating secy policy take a snapshot of current stats and reset them.
1131 	 * Below are the effected stats because of shared counters.
1132 	 */
1133 
1134 	/* Check if sync is really needed */
1135 	if (secy->validate_frames == txsc->last_validate_frames &&
1136 	    secy->replay_protect == txsc->last_replay_protect)
1137 		return;
1138 
1139 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1140 
1141 	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1142 	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1143 	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1144 	if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
1145 		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1146 	else
1147 		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1148 
1149 	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
1150 		cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
1151 
1152 		rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
1153 		rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
1154 
1155 		rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
1156 		rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
1157 
1158 		if (txsc->last_replay_protect)
1159 			rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
1160 		else
1161 			rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
1162 
1163 		if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED)
1164 			rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
1165 		else
1166 			rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
1167 	}
1168 
1169 	txsc->last_validate_frames = secy->validate_frames;
1170 	txsc->last_replay_protect = secy->replay_protect;
1171 }
1172 
cn10k_mdo_open(struct macsec_context * ctx)1173 static int cn10k_mdo_open(struct macsec_context *ctx)
1174 {
1175 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1176 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1177 	struct macsec_secy *secy = ctx->secy;
1178 	struct macsec_tx_sa *sw_tx_sa;
1179 	struct cn10k_mcs_txsc *txsc;
1180 	u8 sa_num;
1181 	int err;
1182 
1183 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1184 	if (!txsc)
1185 		return -ENOENT;
1186 
1187 	sa_num = txsc->encoding_sa;
1188 	sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1189 
1190 	err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
1191 	if (err)
1192 		return err;
1193 
1194 	return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
1195 }
1196 
cn10k_mdo_stop(struct macsec_context * ctx)1197 static int cn10k_mdo_stop(struct macsec_context *ctx)
1198 {
1199 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1200 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1201 	struct cn10k_mcs_txsc *txsc;
1202 	int err;
1203 
1204 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1205 	if (!txsc)
1206 		return -ENOENT;
1207 
1208 	err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1209 	if (err)
1210 		return err;
1211 
1212 	return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
1213 }
1214 
cn10k_mdo_add_secy(struct macsec_context * ctx)1215 static int cn10k_mdo_add_secy(struct macsec_context *ctx)
1216 {
1217 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1218 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1219 	struct macsec_secy *secy = ctx->secy;
1220 	struct cn10k_mcs_txsc *txsc;
1221 
1222 	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
1223 		return -EOPNOTSUPP;
1224 
1225 	txsc = cn10k_mcs_create_txsc(pfvf);
1226 	if (IS_ERR(txsc))
1227 		return -ENOSPC;
1228 
1229 	txsc->sw_secy = secy;
1230 	txsc->encoding_sa = secy->tx_sc.encoding_sa;
1231 	txsc->last_validate_frames = secy->validate_frames;
1232 	txsc->last_replay_protect = secy->replay_protect;
1233 	txsc->vlan_dev = is_vlan_dev(ctx->netdev);
1234 
1235 	list_add(&txsc->entry, &cfg->txsc_list);
1236 
1237 	if (netif_running(secy->netdev))
1238 		return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1239 
1240 	return 0;
1241 }
1242 
cn10k_mdo_upd_secy(struct macsec_context * ctx)1243 static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
1244 {
1245 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1246 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1247 	struct macsec_secy *secy = ctx->secy;
1248 	struct macsec_tx_sa *sw_tx_sa;
1249 	struct cn10k_mcs_txsc *txsc;
1250 	bool active;
1251 	u8 sa_num;
1252 	int err;
1253 
1254 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1255 	if (!txsc)
1256 		return -ENOENT;
1257 
1258 	/* Encoding SA got changed */
1259 	if (txsc->encoding_sa != secy->tx_sc.encoding_sa) {
1260 		txsc->encoding_sa = secy->tx_sc.encoding_sa;
1261 		sa_num = txsc->encoding_sa;
1262 		sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
1263 		active = sw_tx_sa ? sw_tx_sa->active : false;
1264 		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active);
1265 	}
1266 
1267 	if (netif_running(secy->netdev)) {
1268 		cn10k_mcs_sync_stats(pfvf, secy, txsc);
1269 
1270 		err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
1271 		if (err)
1272 			return err;
1273 	}
1274 
1275 	return 0;
1276 }
1277 
cn10k_mdo_del_secy(struct macsec_context * ctx)1278 static int cn10k_mdo_del_secy(struct macsec_context *ctx)
1279 {
1280 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1281 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1282 	struct cn10k_mcs_txsc *txsc;
1283 
1284 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1285 	if (!txsc)
1286 		return -ENOENT;
1287 
1288 	cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
1289 	cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
1290 	cn10k_mcs_delete_txsc(pfvf, txsc);
1291 	list_del(&txsc->entry);
1292 	kfree(txsc);
1293 
1294 	return 0;
1295 }
1296 
cn10k_mdo_add_txsa(struct macsec_context * ctx)1297 static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
1298 {
1299 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1300 	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1301 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1302 	struct macsec_secy *secy = ctx->secy;
1303 	u8 sa_num = ctx->sa.assoc_num;
1304 	struct cn10k_mcs_txsc *txsc;
1305 	int err;
1306 
1307 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1308 	if (!txsc)
1309 		return -ENOENT;
1310 
1311 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1312 		return -EOPNOTSUPP;
1313 
1314 	if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
1315 		return -ENOSPC;
1316 
1317 	memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
1318 	memcpy(&txsc->salt[sa_num], sw_tx_sa->key.salt.bytes, MACSEC_SALT_LEN);
1319 	txsc->ssci[sa_num] = sw_tx_sa->ssci;
1320 
1321 	txsc->sa_bmap |= 1 << sa_num;
1322 
1323 	if (netif_running(secy->netdev)) {
1324 		err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
1325 		if (err)
1326 			return err;
1327 
1328 		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1329 					   sw_tx_sa->next_pn);
1330 		if (err)
1331 			return err;
1332 
1333 		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1334 					      sa_num, sw_tx_sa->active);
1335 		if (err)
1336 			return err;
1337 	}
1338 
1339 	return 0;
1340 }
1341 
cn10k_mdo_upd_txsa(struct macsec_context * ctx)1342 static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
1343 {
1344 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1345 	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
1346 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1347 	struct macsec_secy *secy = ctx->secy;
1348 	u8 sa_num = ctx->sa.assoc_num;
1349 	struct cn10k_mcs_txsc *txsc;
1350 	int err;
1351 
1352 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1353 	if (!txsc)
1354 		return -ENOENT;
1355 
1356 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1357 		return -EOPNOTSUPP;
1358 
1359 	if (netif_running(secy->netdev)) {
1360 		/* Keys cannot be changed after creation */
1361 		if (ctx->sa.update_pn) {
1362 			err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
1363 						   sw_tx_sa->next_pn);
1364 			if (err)
1365 				return err;
1366 		}
1367 
1368 		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
1369 					      sa_num, sw_tx_sa->active);
1370 		if (err)
1371 			return err;
1372 	}
1373 
1374 	return 0;
1375 }
1376 
cn10k_mdo_del_txsa(struct macsec_context * ctx)1377 static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
1378 {
1379 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1380 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1381 	u8 sa_num = ctx->sa.assoc_num;
1382 	struct cn10k_mcs_txsc *txsc;
1383 
1384 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1385 	if (!txsc)
1386 		return -ENOENT;
1387 
1388 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1389 		return -EOPNOTSUPP;
1390 
1391 	cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
1392 	txsc->sa_bmap &= ~(1 << sa_num);
1393 
1394 	return 0;
1395 }
1396 
cn10k_mdo_add_rxsc(struct macsec_context * ctx)1397 static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
1398 {
1399 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1400 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1401 	struct macsec_secy *secy = ctx->secy;
1402 	struct cn10k_mcs_rxsc *rxsc;
1403 	struct cn10k_mcs_txsc *txsc;
1404 	int err;
1405 
1406 	txsc = cn10k_mcs_get_txsc(cfg, secy);
1407 	if (!txsc)
1408 		return -ENOENT;
1409 
1410 	rxsc = cn10k_mcs_create_rxsc(pfvf);
1411 	if (IS_ERR(rxsc))
1412 		return -ENOSPC;
1413 
1414 	rxsc->sw_secy = ctx->secy;
1415 	rxsc->sw_rxsc = ctx->rx_sc;
1416 	list_add(&rxsc->entry, &cfg->rxsc_list);
1417 
1418 	if (netif_running(secy->netdev)) {
1419 		err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
1420 		if (err)
1421 			return err;
1422 
1423 		err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
1424 		if (err)
1425 			return err;
1426 	}
1427 
1428 	return 0;
1429 }
1430 
cn10k_mdo_upd_rxsc(struct macsec_context * ctx)1431 static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
1432 {
1433 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1434 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1435 	struct macsec_secy *secy = ctx->secy;
1436 	bool enable = ctx->rx_sc->active;
1437 	struct cn10k_mcs_rxsc *rxsc;
1438 
1439 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1440 	if (!rxsc)
1441 		return -ENOENT;
1442 
1443 	if (netif_running(secy->netdev))
1444 		return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
1445 						enable, MCS_RX);
1446 
1447 	return 0;
1448 }
1449 
cn10k_mdo_del_rxsc(struct macsec_context * ctx)1450 static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
1451 {
1452 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1453 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1454 	struct cn10k_mcs_rxsc *rxsc;
1455 
1456 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
1457 	if (!rxsc)
1458 		return -ENOENT;
1459 
1460 	cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
1461 	cn10k_mcs_delete_rxsc(pfvf, rxsc);
1462 	list_del(&rxsc->entry);
1463 	kfree(rxsc);
1464 
1465 	return 0;
1466 }
1467 
cn10k_mdo_add_rxsa(struct macsec_context * ctx)1468 static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
1469 {
1470 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1471 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1472 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1473 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1474 	struct macsec_secy *secy = ctx->secy;
1475 	bool sa_in_use = rx_sa->active;
1476 	u8 sa_num = ctx->sa.assoc_num;
1477 	struct cn10k_mcs_rxsc *rxsc;
1478 	int err;
1479 
1480 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1481 	if (!rxsc)
1482 		return -ENOENT;
1483 
1484 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1485 		return -EOPNOTSUPP;
1486 
1487 	if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
1488 		return -ENOSPC;
1489 
1490 	memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
1491 	memcpy(&rxsc->salt[sa_num], rx_sa->key.salt.bytes, MACSEC_SALT_LEN);
1492 	rxsc->ssci[sa_num] = rx_sa->ssci;
1493 
1494 	rxsc->sa_bmap |= 1 << sa_num;
1495 
1496 	if (netif_running(secy->netdev)) {
1497 		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
1498 						 sa_num, sa_in_use);
1499 		if (err)
1500 			return err;
1501 
1502 		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
1503 					       rx_sa->next_pn);
1504 		if (err)
1505 			return err;
1506 	}
1507 
1508 	return 0;
1509 }
1510 
cn10k_mdo_upd_rxsa(struct macsec_context * ctx)1511 static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
1512 {
1513 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1514 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1515 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1516 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1517 	struct macsec_secy *secy = ctx->secy;
1518 	bool sa_in_use = rx_sa->active;
1519 	u8 sa_num = ctx->sa.assoc_num;
1520 	struct cn10k_mcs_rxsc *rxsc;
1521 	int err;
1522 
1523 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
1524 	if (!rxsc)
1525 		return -ENOENT;
1526 
1527 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1528 		return -EOPNOTSUPP;
1529 
1530 	if (netif_running(secy->netdev)) {
1531 		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
1532 		if (err)
1533 			return err;
1534 
1535 		if (!ctx->sa.update_pn)
1536 			return 0;
1537 
1538 		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
1539 					       rx_sa->next_pn);
1540 		if (err)
1541 			return err;
1542 	}
1543 
1544 	return 0;
1545 }
1546 
cn10k_mdo_del_rxsa(struct macsec_context * ctx)1547 static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
1548 {
1549 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1550 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1551 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1552 	u8 sa_num = ctx->sa.assoc_num;
1553 	struct cn10k_mcs_rxsc *rxsc;
1554 
1555 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1556 	if (!rxsc)
1557 		return -ENOENT;
1558 
1559 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1560 		return -EOPNOTSUPP;
1561 
1562 	cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
1563 	cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
1564 
1565 	rxsc->sa_bmap &= ~(1 << sa_num);
1566 
1567 	return 0;
1568 }
1569 
cn10k_mdo_get_dev_stats(struct macsec_context * ctx)1570 static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
1571 {
1572 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1573 	struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
1574 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1575 	struct macsec_secy *secy = ctx->secy;
1576 	struct cn10k_mcs_txsc *txsc;
1577 
1578 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1579 	if (!txsc)
1580 		return -ENOENT;
1581 
1582 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
1583 	ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
1584 	ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
1585 
1586 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
1587 	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
1588 	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
1589 	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
1590 	if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
1591 		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
1592 	else
1593 		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
1594 	txsc->stats.InPktsOverrun = 0;
1595 
1596 	ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
1597 	ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
1598 	ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
1599 	ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
1600 	ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
1601 	ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
1602 
1603 	return 0;
1604 }
1605 
cn10k_mdo_get_tx_sc_stats(struct macsec_context * ctx)1606 static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
1607 {
1608 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1609 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1610 	struct mcs_sc_stats rsp = { 0 };
1611 	struct cn10k_mcs_txsc *txsc;
1612 
1613 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1614 	if (!txsc)
1615 		return -ENOENT;
1616 
1617 	cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
1618 
1619 	ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1620 	ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1621 	ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
1622 	ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
1623 
1624 	return 0;
1625 }
1626 
cn10k_mdo_get_tx_sa_stats(struct macsec_context * ctx)1627 static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
1628 {
1629 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1630 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1631 	struct mcs_sa_stats rsp = { 0 };
1632 	u8 sa_num = ctx->sa.assoc_num;
1633 	struct cn10k_mcs_txsc *txsc;
1634 
1635 	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
1636 	if (!txsc)
1637 		return -ENOENT;
1638 
1639 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1640 		return -EOPNOTSUPP;
1641 
1642 	cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
1643 
1644 	ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
1645 	ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
1646 
1647 	return 0;
1648 }
1649 
cn10k_mdo_get_rx_sc_stats(struct macsec_context * ctx)1650 static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
1651 {
1652 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1653 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1654 	struct macsec_secy *secy = ctx->secy;
1655 	struct mcs_sc_stats rsp = { 0 };
1656 	struct cn10k_mcs_rxsc *rxsc;
1657 
1658 	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
1659 	if (!rxsc)
1660 		return -ENOENT;
1661 
1662 	cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
1663 
1664 	rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
1665 	rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
1666 
1667 	rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
1668 	rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
1669 
1670 	if (secy->replay_protect)
1671 		rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
1672 	else
1673 		rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
1674 
1675 	if (secy->validate_frames == MACSEC_VALIDATE_DISABLED)
1676 		rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
1677 	else
1678 		rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
1679 
1680 	ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
1681 	ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
1682 	ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
1683 	ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
1684 	ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
1685 	ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
1686 	ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
1687 	ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
1688 
1689 	return 0;
1690 }
1691 
cn10k_mdo_get_rx_sa_stats(struct macsec_context * ctx)1692 static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
1693 {
1694 	struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev);
1695 	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
1696 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1697 	struct mcs_sa_stats rsp = { 0 };
1698 	u8 sa_num = ctx->sa.assoc_num;
1699 	struct cn10k_mcs_rxsc *rxsc;
1700 
1701 	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
1702 	if (!rxsc)
1703 		return -ENOENT;
1704 
1705 	if (sa_num >= CN10K_MCS_SA_PER_SC)
1706 		return -EOPNOTSUPP;
1707 
1708 	cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
1709 
1710 	ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
1711 	ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
1712 	ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
1713 	ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
1714 	ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
1715 
1716 	return 0;
1717 }
1718 
1719 static const struct macsec_ops cn10k_mcs_ops = {
1720 	.mdo_dev_open = cn10k_mdo_open,
1721 	.mdo_dev_stop = cn10k_mdo_stop,
1722 	.mdo_add_secy = cn10k_mdo_add_secy,
1723 	.mdo_upd_secy = cn10k_mdo_upd_secy,
1724 	.mdo_del_secy = cn10k_mdo_del_secy,
1725 	.mdo_add_rxsc = cn10k_mdo_add_rxsc,
1726 	.mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
1727 	.mdo_del_rxsc = cn10k_mdo_del_rxsc,
1728 	.mdo_add_rxsa = cn10k_mdo_add_rxsa,
1729 	.mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
1730 	.mdo_del_rxsa = cn10k_mdo_del_rxsa,
1731 	.mdo_add_txsa = cn10k_mdo_add_txsa,
1732 	.mdo_upd_txsa = cn10k_mdo_upd_txsa,
1733 	.mdo_del_txsa = cn10k_mdo_del_txsa,
1734 	.mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
1735 	.mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
1736 	.mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
1737 	.mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
1738 	.mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
1739 };
1740 
cn10k_handle_mcs_event(struct otx2_nic * pfvf,struct mcs_intr_info * event)1741 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
1742 {
1743 	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
1744 	struct macsec_tx_sa *sw_tx_sa = NULL;
1745 	struct macsec_secy *secy = NULL;
1746 	struct cn10k_mcs_txsc *txsc;
1747 	u8 an;
1748 
1749 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1750 		return;
1751 
1752 	if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
1753 		return;
1754 
1755 	/* Find the SecY to which the expired hardware SA is mapped */
1756 	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
1757 		for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
1758 			if (txsc->hw_sa_id[an] == event->sa_id) {
1759 				secy = txsc->sw_secy;
1760 				sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
1761 			}
1762 	}
1763 
1764 	if (secy && sw_tx_sa)
1765 		macsec_pn_wrapped(secy, sw_tx_sa);
1766 }
1767 
cn10k_mcs_init(struct otx2_nic * pfvf)1768 int cn10k_mcs_init(struct otx2_nic *pfvf)
1769 {
1770 	struct mbox *mbox = &pfvf->mbox;
1771 	struct cn10k_mcs_cfg *cfg;
1772 	struct mcs_intr_cfg *req;
1773 
1774 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1775 		return 0;
1776 
1777 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1778 	if (!cfg)
1779 		return -ENOMEM;
1780 
1781 	INIT_LIST_HEAD(&cfg->txsc_list);
1782 	INIT_LIST_HEAD(&cfg->rxsc_list);
1783 	pfvf->macsec_cfg = cfg;
1784 
1785 	pfvf->netdev->features |= NETIF_F_HW_MACSEC;
1786 	pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
1787 
1788 	mutex_lock(&mbox->lock);
1789 
1790 	req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
1791 	if (!req)
1792 		goto fail;
1793 
1794 	req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
1795 
1796 	if (otx2_sync_mbox_msg(mbox))
1797 		goto fail;
1798 
1799 	mutex_unlock(&mbox->lock);
1800 
1801 	return 0;
1802 fail:
1803 	dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
1804 	mutex_unlock(&mbox->lock);
1805 	return 0;
1806 }
1807 
cn10k_mcs_free(struct otx2_nic * pfvf)1808 void cn10k_mcs_free(struct otx2_nic *pfvf)
1809 {
1810 	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
1811 		return;
1812 
1813 	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
1814 	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
1815 	kfree(pfvf->macsec_cfg);
1816 	pfvf->macsec_cfg = NULL;
1817 }
1818