1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell MACSEC hardware offload driver 3 * 4 * Copyright (C) 2022 Marvell. 5 */ 6 7 #include <crypto/skcipher.h> 8 #include <linux/rtnetlink.h> 9 #include <linux/bitfield.h> 10 #include "otx2_common.h" 11 12 #define MCS_TCAM0_MAC_DA_MASK GENMASK_ULL(47, 0) 13 #define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48) 14 #define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0) 15 #define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32) 16 17 #define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9) 18 19 #define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18) 20 #define MCS_RX_SECY_PLCY_RP BIT_ULL(17) 21 #define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16) 22 #define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5) 23 #define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1) 24 #define MCS_RX_SECY_PLCY_ENA BIT_ULL(0) 25 26 #define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28) 27 #define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22) 28 #define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15) 29 #define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14) 30 #define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13) 31 #define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2) 32 #define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1) 33 #define MCS_TX_SECY_PLCY_ENA BIT_ULL(0) 34 35 #define MCS_GCM_AES_128 0 36 #define MCS_GCM_AES_256 1 37 #define MCS_GCM_AES_XPN_128 2 38 #define MCS_GCM_AES_XPN_256 3 39 40 #define MCS_TCI_ES 0x40 /* end station */ 41 #define MCS_TCI_SC 0x20 /* SCI present */ 42 #define MCS_TCI_SCB 0x10 /* epon */ 43 #define MCS_TCI_E 0x08 /* encryption */ 44 #define MCS_TCI_C 0x04 /* changed text */ 45 46 #define CN10K_MAX_HASH_LEN 16 47 #define CN10K_MAX_SAK_LEN 32 48 49 static int cn10k_ecb_aes_encrypt(struct otx2_nic *pfvf, u8 *sak, 50 u16 sak_len, u8 *hash) 51 { 52 u8 data[CN10K_MAX_HASH_LEN] = { 0 }; 53 struct skcipher_request *req = NULL; 54 struct scatterlist sg_src, sg_dst; 55 struct crypto_skcipher *tfm; 56 DECLARE_CRYPTO_WAIT(wait); 57 int err; 58 59 tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); 60 if (IS_ERR(tfm)) { 61 dev_err(pfvf->dev, "failed to allocate transform for ecb-aes\n"); 62 return PTR_ERR(tfm); 63 } 64 65 req = skcipher_request_alloc(tfm, GFP_KERNEL); 66 if (!req) { 67 dev_err(pfvf->dev, "failed to allocate request for skcipher\n"); 68 err = -ENOMEM; 69 goto free_tfm; 70 } 71 72 err = crypto_skcipher_setkey(tfm, sak, sak_len); 73 if (err) { 74 dev_err(pfvf->dev, "failed to set key for skcipher\n"); 75 goto free_req; 76 } 77 78 /* build sg list */ 79 sg_init_one(&sg_src, data, CN10K_MAX_HASH_LEN); 80 sg_init_one(&sg_dst, hash, CN10K_MAX_HASH_LEN); 81 82 skcipher_request_set_callback(req, 0, crypto_req_done, &wait); 83 skcipher_request_set_crypt(req, &sg_src, &sg_dst, 84 CN10K_MAX_HASH_LEN, NULL); 85 86 err = crypto_skcipher_encrypt(req); 87 err = crypto_wait_req(err, &wait); 88 89 free_req: 90 skcipher_request_free(req); 91 free_tfm: 92 crypto_free_skcipher(tfm); 93 return err; 94 } 95 96 static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg, 97 struct macsec_secy *secy) 98 { 99 struct cn10k_mcs_txsc *txsc; 100 101 list_for_each_entry(txsc, &cfg->txsc_list, entry) { 102 if (txsc->sw_secy == secy) 103 return txsc; 104 } 105 106 return NULL; 107 } 108 109 static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg, 110 struct macsec_secy *secy, 111 struct macsec_rx_sc *rx_sc) 112 { 113 struct cn10k_mcs_rxsc *rxsc; 114 115 list_for_each_entry(rxsc, &cfg->rxsc_list, entry) { 116 if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy) 117 return rxsc; 118 } 119 120 return NULL; 121 } 122 123 static const char *rsrc_name(enum mcs_rsrc_type rsrc_type) 124 { 125 switch (rsrc_type) { 126 case MCS_RSRC_TYPE_FLOWID: 127 return "FLOW"; 128 case MCS_RSRC_TYPE_SC: 129 return "SC"; 130 case MCS_RSRC_TYPE_SECY: 131 return "SECY"; 132 case MCS_RSRC_TYPE_SA: 133 return "SA"; 134 default: 135 return "Unknown"; 136 }; 137 138 return "Unknown"; 139 } 140 141 static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir, 142 enum mcs_rsrc_type type, u16 *rsrc_id) 143 { 144 struct mbox *mbox = &pfvf->mbox; 145 struct mcs_alloc_rsrc_req *req; 146 struct mcs_alloc_rsrc_rsp *rsp; 147 int ret = -ENOMEM; 148 149 mutex_lock(&mbox->lock); 150 151 req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox); 152 if (!req) 153 goto fail; 154 155 req->rsrc_type = type; 156 req->rsrc_cnt = 1; 157 req->dir = dir; 158 159 ret = otx2_sync_mbox_msg(mbox); 160 if (ret) 161 goto fail; 162 163 rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 164 0, &req->hdr); 165 if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt || 166 req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) { 167 ret = -EINVAL; 168 goto fail; 169 } 170 171 switch (rsp->rsrc_type) { 172 case MCS_RSRC_TYPE_FLOWID: 173 *rsrc_id = rsp->flow_ids[0]; 174 break; 175 case MCS_RSRC_TYPE_SC: 176 *rsrc_id = rsp->sc_ids[0]; 177 break; 178 case MCS_RSRC_TYPE_SECY: 179 *rsrc_id = rsp->secy_ids[0]; 180 break; 181 case MCS_RSRC_TYPE_SA: 182 *rsrc_id = rsp->sa_ids[0]; 183 break; 184 default: 185 ret = -EINVAL; 186 goto fail; 187 } 188 189 mutex_unlock(&mbox->lock); 190 191 return 0; 192 fail: 193 dev_err(pfvf->dev, "Failed to allocate %s %s resource\n", 194 dir == MCS_TX ? "TX" : "RX", rsrc_name(type)); 195 mutex_unlock(&mbox->lock); 196 return ret; 197 } 198 199 static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir, 200 enum mcs_rsrc_type type, u16 hw_rsrc_id, 201 bool all) 202 { 203 struct mcs_clear_stats *clear_req; 204 struct mbox *mbox = &pfvf->mbox; 205 struct mcs_free_rsrc_req *req; 206 207 mutex_lock(&mbox->lock); 208 209 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); 210 if (!clear_req) 211 goto fail; 212 213 clear_req->id = hw_rsrc_id; 214 clear_req->type = type; 215 clear_req->dir = dir; 216 217 req = otx2_mbox_alloc_msg_mcs_free_resources(mbox); 218 if (!req) 219 goto fail; 220 221 req->rsrc_id = hw_rsrc_id; 222 req->rsrc_type = type; 223 req->dir = dir; 224 if (all) 225 req->all = 1; 226 227 if (otx2_sync_mbox_msg(&pfvf->mbox)) 228 goto fail; 229 230 mutex_unlock(&mbox->lock); 231 232 return; 233 fail: 234 dev_err(pfvf->dev, "Failed to free %s %s resource\n", 235 dir == MCS_TX ? "TX" : "RX", rsrc_name(type)); 236 mutex_unlock(&mbox->lock); 237 } 238 239 static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id) 240 { 241 return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id); 242 } 243 244 static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id) 245 { 246 return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id); 247 } 248 249 static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id) 250 { 251 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false); 252 } 253 254 static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id) 255 { 256 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false); 257 } 258 259 static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf, 260 struct macsec_secy *secy, u8 hw_secy_id) 261 { 262 struct mcs_secy_plcy_write_req *req; 263 struct mbox *mbox = &pfvf->mbox; 264 u64 policy; 265 u8 cipher; 266 int ret; 267 268 mutex_lock(&mbox->lock); 269 270 req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox); 271 if (!req) { 272 ret = -ENOMEM; 273 goto fail; 274 } 275 276 policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window); 277 if (secy->replay_protect) 278 policy |= MCS_RX_SECY_PLCY_RP; 279 280 policy |= MCS_RX_SECY_PLCY_AUTH_ENA; 281 282 switch (secy->key_len) { 283 case 16: 284 cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128; 285 break; 286 case 32: 287 cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256; 288 break; 289 default: 290 cipher = MCS_GCM_AES_128; 291 dev_warn(pfvf->dev, "Unsupported key length\n"); 292 break; 293 } 294 295 policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, cipher); 296 policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames); 297 298 policy |= MCS_RX_SECY_PLCY_ENA; 299 300 req->plcy = policy; 301 req->secy_id = hw_secy_id; 302 req->dir = MCS_RX; 303 304 ret = otx2_sync_mbox_msg(mbox); 305 306 fail: 307 mutex_unlock(&mbox->lock); 308 return ret; 309 } 310 311 static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf, 312 struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id) 313 { 314 struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc; 315 struct macsec_secy *secy = rxsc->sw_secy; 316 struct mcs_flowid_entry_write_req *req; 317 struct mbox *mbox = &pfvf->mbox; 318 u64 mac_da; 319 int ret; 320 321 mutex_lock(&mbox->lock); 322 323 req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox); 324 if (!req) { 325 ret = -ENOMEM; 326 goto fail; 327 } 328 329 mac_da = ether_addr_to_u64(secy->netdev->dev_addr); 330 331 req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da); 332 req->mask[0] = ~0ULL; 333 req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK; 334 335 req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC); 336 req->mask[1] = ~0ULL; 337 req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK; 338 339 req->mask[2] = ~0ULL; 340 req->mask[3] = ~0ULL; 341 342 req->flow_id = rxsc->hw_flow_id; 343 req->secy_id = hw_secy_id; 344 req->sc_id = rxsc->hw_sc_id; 345 req->dir = MCS_RX; 346 347 if (sw_rx_sc->active) 348 req->ena = 1; 349 350 ret = otx2_sync_mbox_msg(mbox); 351 352 fail: 353 mutex_unlock(&mbox->lock); 354 return ret; 355 } 356 357 static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf, 358 struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id) 359 { 360 struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc; 361 struct mcs_rx_sc_cam_write_req *sc_req; 362 struct mbox *mbox = &pfvf->mbox; 363 int ret; 364 365 mutex_lock(&mbox->lock); 366 367 sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox); 368 if (!sc_req) { 369 ret = -ENOMEM; 370 goto fail; 371 } 372 373 sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci); 374 sc_req->sc_id = rxsc->hw_sc_id; 375 sc_req->secy_id = hw_secy_id; 376 377 ret = otx2_sync_mbox_msg(mbox); 378 379 fail: 380 mutex_unlock(&mbox->lock); 381 return ret; 382 } 383 384 static int cn10k_mcs_write_keys(struct otx2_nic *pfvf, 385 struct macsec_secy *secy, 386 struct mcs_sa_plcy_write_req *req, 387 u8 *sak, u8 *salt, ssci_t ssci) 388 { 389 u8 hash_rev[CN10K_MAX_HASH_LEN]; 390 u8 sak_rev[CN10K_MAX_SAK_LEN]; 391 u8 salt_rev[MACSEC_SALT_LEN]; 392 u8 hash[CN10K_MAX_HASH_LEN]; 393 u32 ssci_63_32; 394 int err, i; 395 396 err = cn10k_ecb_aes_encrypt(pfvf, sak, secy->key_len, hash); 397 if (err) { 398 dev_err(pfvf->dev, "Generating hash using ECB(AES) failed\n"); 399 return err; 400 } 401 402 for (i = 0; i < secy->key_len; i++) 403 sak_rev[i] = sak[secy->key_len - 1 - i]; 404 405 for (i = 0; i < CN10K_MAX_HASH_LEN; i++) 406 hash_rev[i] = hash[CN10K_MAX_HASH_LEN - 1 - i]; 407 408 for (i = 0; i < MACSEC_SALT_LEN; i++) 409 salt_rev[i] = salt[MACSEC_SALT_LEN - 1 - i]; 410 411 ssci_63_32 = (__force u32)cpu_to_be32((__force u32)ssci); 412 413 memcpy(&req->plcy[0][0], sak_rev, secy->key_len); 414 memcpy(&req->plcy[0][4], hash_rev, CN10K_MAX_HASH_LEN); 415 memcpy(&req->plcy[0][6], salt_rev, MACSEC_SALT_LEN); 416 req->plcy[0][7] |= (u64)ssci_63_32 << 32; 417 418 return 0; 419 } 420 421 static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf, 422 struct macsec_secy *secy, 423 struct cn10k_mcs_rxsc *rxsc, 424 u8 assoc_num, bool sa_in_use) 425 { 426 struct mcs_sa_plcy_write_req *plcy_req; 427 u8 *sak = rxsc->sa_key[assoc_num]; 428 u8 *salt = rxsc->salt[assoc_num]; 429 struct mcs_rx_sc_sa_map *map_req; 430 struct mbox *mbox = &pfvf->mbox; 431 int ret; 432 433 mutex_lock(&mbox->lock); 434 435 plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox); 436 if (!plcy_req) { 437 ret = -ENOMEM; 438 goto fail; 439 } 440 441 map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox); 442 if (!map_req) { 443 otx2_mbox_reset(&mbox->mbox, 0); 444 ret = -ENOMEM; 445 goto fail; 446 } 447 448 ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak, 449 salt, rxsc->ssci[assoc_num]); 450 if (ret) 451 goto fail; 452 453 plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num]; 454 plcy_req->sa_cnt = 1; 455 plcy_req->dir = MCS_RX; 456 457 map_req->sa_index = rxsc->hw_sa_id[assoc_num]; 458 map_req->sa_in_use = sa_in_use; 459 map_req->sc_id = rxsc->hw_sc_id; 460 map_req->an = assoc_num; 461 462 /* Send two messages together */ 463 ret = otx2_sync_mbox_msg(mbox); 464 465 fail: 466 mutex_unlock(&mbox->lock); 467 return ret; 468 } 469 470 static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf, 471 struct cn10k_mcs_rxsc *rxsc, 472 u8 assoc_num, u64 next_pn) 473 { 474 struct mcs_pn_table_write_req *req; 475 struct mbox *mbox = &pfvf->mbox; 476 int ret; 477 478 mutex_lock(&mbox->lock); 479 480 req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox); 481 if (!req) { 482 ret = -ENOMEM; 483 goto fail; 484 } 485 486 req->pn_id = rxsc->hw_sa_id[assoc_num]; 487 req->next_pn = next_pn; 488 req->dir = MCS_RX; 489 490 ret = otx2_sync_mbox_msg(mbox); 491 492 fail: 493 mutex_unlock(&mbox->lock); 494 return ret; 495 } 496 497 static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf, 498 struct macsec_secy *secy, 499 struct cn10k_mcs_txsc *txsc) 500 { 501 struct mcs_secy_plcy_write_req *req; 502 struct mbox *mbox = &pfvf->mbox; 503 struct macsec_tx_sc *sw_tx_sc; 504 u8 sectag_tci = 0; 505 u8 tag_offset; 506 u64 policy; 507 u8 cipher; 508 int ret; 509 510 /* Insert SecTag after 12 bytes (DA+SA) or 16 bytes 511 * if VLAN tag needs to be sent in clear text. 512 */ 513 tag_offset = txsc->vlan_dev ? 16 : 12; 514 sw_tx_sc = &secy->tx_sc; 515 516 mutex_lock(&mbox->lock); 517 518 req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox); 519 if (!req) { 520 ret = -ENOMEM; 521 goto fail; 522 } 523 524 if (sw_tx_sc->send_sci) { 525 sectag_tci |= MCS_TCI_SC; 526 } else { 527 if (sw_tx_sc->end_station) 528 sectag_tci |= MCS_TCI_ES; 529 if (sw_tx_sc->scb) 530 sectag_tci |= MCS_TCI_SCB; 531 } 532 533 if (sw_tx_sc->encrypt) 534 sectag_tci |= (MCS_TCI_E | MCS_TCI_C); 535 536 policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu); 537 /* Write SecTag excluding AN bits(1..0) */ 538 policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2); 539 policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset); 540 policy |= MCS_TX_SECY_PLCY_INS_MODE; 541 policy |= MCS_TX_SECY_PLCY_AUTH_ENA; 542 543 switch (secy->key_len) { 544 case 16: 545 cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128; 546 break; 547 case 32: 548 cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256; 549 break; 550 default: 551 cipher = MCS_GCM_AES_128; 552 dev_warn(pfvf->dev, "Unsupported key length\n"); 553 break; 554 } 555 556 policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, cipher); 557 558 if (secy->protect_frames) 559 policy |= MCS_TX_SECY_PLCY_PROTECT; 560 561 /* If the encodingsa does not exist/active and protect is 562 * not set then frames can be sent out as it is. Hence enable 563 * the policy irrespective of secy operational when !protect. 564 */ 565 if (!secy->protect_frames || secy->operational) 566 policy |= MCS_TX_SECY_PLCY_ENA; 567 568 req->plcy = policy; 569 req->secy_id = txsc->hw_secy_id_tx; 570 req->dir = MCS_TX; 571 572 ret = otx2_sync_mbox_msg(mbox); 573 574 fail: 575 mutex_unlock(&mbox->lock); 576 return ret; 577 } 578 579 static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf, 580 struct macsec_secy *secy, 581 struct cn10k_mcs_txsc *txsc) 582 { 583 struct mcs_flowid_entry_write_req *req; 584 struct mbox *mbox = &pfvf->mbox; 585 u64 mac_sa; 586 int ret; 587 588 mutex_lock(&mbox->lock); 589 590 req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox); 591 if (!req) { 592 ret = -ENOMEM; 593 goto fail; 594 } 595 596 mac_sa = ether_addr_to_u64(secy->netdev->dev_addr); 597 598 req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa); 599 req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16); 600 601 req->mask[0] = ~0ULL; 602 req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK; 603 604 req->mask[1] = ~0ULL; 605 req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK; 606 607 req->mask[2] = ~0ULL; 608 req->mask[3] = ~0ULL; 609 610 req->flow_id = txsc->hw_flow_id; 611 req->secy_id = txsc->hw_secy_id_tx; 612 req->sc_id = txsc->hw_sc_id; 613 req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci); 614 req->dir = MCS_TX; 615 /* This can be enabled since stack xmits packets only when interface is up */ 616 req->ena = 1; 617 618 ret = otx2_sync_mbox_msg(mbox); 619 620 fail: 621 mutex_unlock(&mbox->lock); 622 return ret; 623 } 624 625 static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf, 626 struct macsec_secy *secy, 627 struct cn10k_mcs_txsc *txsc, 628 u8 sa_num, bool sa_active) 629 { 630 struct mcs_tx_sc_sa_map *map_req; 631 struct mbox *mbox = &pfvf->mbox; 632 int ret; 633 634 /* Link the encoding_sa only to SC out of all SAs */ 635 if (txsc->encoding_sa != sa_num) 636 return 0; 637 638 mutex_lock(&mbox->lock); 639 640 map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox); 641 if (!map_req) { 642 otx2_mbox_reset(&mbox->mbox, 0); 643 ret = -ENOMEM; 644 goto fail; 645 } 646 647 map_req->sa_index0 = txsc->hw_sa_id[sa_num]; 648 map_req->sa_index0_vld = sa_active; 649 map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci); 650 map_req->sc_id = txsc->hw_sc_id; 651 652 ret = otx2_sync_mbox_msg(mbox); 653 654 fail: 655 mutex_unlock(&mbox->lock); 656 return ret; 657 } 658 659 static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf, 660 struct macsec_secy *secy, 661 struct cn10k_mcs_txsc *txsc, 662 u8 assoc_num) 663 { 664 struct mcs_sa_plcy_write_req *plcy_req; 665 u8 *sak = txsc->sa_key[assoc_num]; 666 u8 *salt = txsc->salt[assoc_num]; 667 struct mbox *mbox = &pfvf->mbox; 668 int ret; 669 670 mutex_lock(&mbox->lock); 671 672 plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox); 673 if (!plcy_req) { 674 ret = -ENOMEM; 675 goto fail; 676 } 677 678 ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak, 679 salt, txsc->ssci[assoc_num]); 680 if (ret) 681 goto fail; 682 683 plcy_req->plcy[0][8] = assoc_num; 684 plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num]; 685 plcy_req->sa_cnt = 1; 686 plcy_req->dir = MCS_TX; 687 688 ret = otx2_sync_mbox_msg(mbox); 689 690 fail: 691 mutex_unlock(&mbox->lock); 692 return ret; 693 } 694 695 static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf, 696 struct cn10k_mcs_txsc *txsc, 697 u8 assoc_num, u64 next_pn) 698 { 699 struct mcs_pn_table_write_req *req; 700 struct mbox *mbox = &pfvf->mbox; 701 int ret; 702 703 mutex_lock(&mbox->lock); 704 705 req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox); 706 if (!req) { 707 ret = -ENOMEM; 708 goto fail; 709 } 710 711 req->pn_id = txsc->hw_sa_id[assoc_num]; 712 req->next_pn = next_pn; 713 req->dir = MCS_TX; 714 715 ret = otx2_sync_mbox_msg(mbox); 716 717 fail: 718 mutex_unlock(&mbox->lock); 719 return ret; 720 } 721 722 static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id, 723 bool enable, enum mcs_direction dir) 724 { 725 struct mcs_flowid_ena_dis_entry *req; 726 struct mbox *mbox = &pfvf->mbox; 727 int ret; 728 729 mutex_lock(&mbox->lock); 730 731 req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox); 732 if (!req) { 733 ret = -ENOMEM; 734 goto fail; 735 } 736 737 req->flow_id = hw_flow_id; 738 req->ena = enable; 739 req->dir = dir; 740 741 ret = otx2_sync_mbox_msg(mbox); 742 743 fail: 744 mutex_unlock(&mbox->lock); 745 return ret; 746 } 747 748 static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id, 749 struct mcs_sa_stats *rsp_p, 750 enum mcs_direction dir, bool clear) 751 { 752 struct mcs_clear_stats *clear_req; 753 struct mbox *mbox = &pfvf->mbox; 754 struct mcs_stats_req *req; 755 struct mcs_sa_stats *rsp; 756 int ret; 757 758 mutex_lock(&mbox->lock); 759 760 req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox); 761 if (!req) { 762 ret = -ENOMEM; 763 goto fail; 764 } 765 766 req->id = hw_sa_id; 767 req->dir = dir; 768 769 if (!clear) 770 goto send_msg; 771 772 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); 773 if (!clear_req) { 774 ret = -ENOMEM; 775 goto fail; 776 } 777 clear_req->id = hw_sa_id; 778 clear_req->dir = dir; 779 clear_req->type = MCS_RSRC_TYPE_SA; 780 781 send_msg: 782 ret = otx2_sync_mbox_msg(mbox); 783 if (ret) 784 goto fail; 785 786 rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 787 0, &req->hdr); 788 if (IS_ERR(rsp)) { 789 ret = PTR_ERR(rsp); 790 goto fail; 791 } 792 793 memcpy(rsp_p, rsp, sizeof(*rsp_p)); 794 795 mutex_unlock(&mbox->lock); 796 797 return 0; 798 fail: 799 mutex_unlock(&mbox->lock); 800 return ret; 801 } 802 803 static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id, 804 struct mcs_sc_stats *rsp_p, 805 enum mcs_direction dir, bool clear) 806 { 807 struct mcs_clear_stats *clear_req; 808 struct mbox *mbox = &pfvf->mbox; 809 struct mcs_stats_req *req; 810 struct mcs_sc_stats *rsp; 811 int ret; 812 813 mutex_lock(&mbox->lock); 814 815 req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox); 816 if (!req) { 817 ret = -ENOMEM; 818 goto fail; 819 } 820 821 req->id = hw_sc_id; 822 req->dir = dir; 823 824 if (!clear) 825 goto send_msg; 826 827 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); 828 if (!clear_req) { 829 ret = -ENOMEM; 830 goto fail; 831 } 832 clear_req->id = hw_sc_id; 833 clear_req->dir = dir; 834 clear_req->type = MCS_RSRC_TYPE_SC; 835 836 send_msg: 837 ret = otx2_sync_mbox_msg(mbox); 838 if (ret) 839 goto fail; 840 841 rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 842 0, &req->hdr); 843 if (IS_ERR(rsp)) { 844 ret = PTR_ERR(rsp); 845 goto fail; 846 } 847 848 memcpy(rsp_p, rsp, sizeof(*rsp_p)); 849 850 mutex_unlock(&mbox->lock); 851 852 return 0; 853 fail: 854 mutex_unlock(&mbox->lock); 855 return ret; 856 } 857 858 static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id, 859 struct mcs_secy_stats *rsp_p, 860 enum mcs_direction dir, bool clear) 861 { 862 struct mcs_clear_stats *clear_req; 863 struct mbox *mbox = &pfvf->mbox; 864 struct mcs_secy_stats *rsp; 865 struct mcs_stats_req *req; 866 int ret; 867 868 mutex_lock(&mbox->lock); 869 870 req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox); 871 if (!req) { 872 ret = -ENOMEM; 873 goto fail; 874 } 875 876 req->id = hw_secy_id; 877 req->dir = dir; 878 879 if (!clear) 880 goto send_msg; 881 882 clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); 883 if (!clear_req) { 884 ret = -ENOMEM; 885 goto fail; 886 } 887 clear_req->id = hw_secy_id; 888 clear_req->dir = dir; 889 clear_req->type = MCS_RSRC_TYPE_SECY; 890 891 send_msg: 892 ret = otx2_sync_mbox_msg(mbox); 893 if (ret) 894 goto fail; 895 896 rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 897 0, &req->hdr); 898 if (IS_ERR(rsp)) { 899 ret = PTR_ERR(rsp); 900 goto fail; 901 } 902 903 memcpy(rsp_p, rsp, sizeof(*rsp_p)); 904 905 mutex_unlock(&mbox->lock); 906 907 return 0; 908 fail: 909 mutex_unlock(&mbox->lock); 910 return ret; 911 } 912 913 static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf) 914 { 915 struct cn10k_mcs_txsc *txsc; 916 int ret; 917 918 txsc = kzalloc(sizeof(*txsc), GFP_KERNEL); 919 if (!txsc) 920 return ERR_PTR(-ENOMEM); 921 922 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, 923 &txsc->hw_flow_id); 924 if (ret) 925 goto fail; 926 927 /* For a SecY, one TX secy and one RX secy HW resources are needed */ 928 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 929 &txsc->hw_secy_id_tx); 930 if (ret) 931 goto free_flowid; 932 933 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 934 &txsc->hw_secy_id_rx); 935 if (ret) 936 goto free_tx_secy; 937 938 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC, 939 &txsc->hw_sc_id); 940 if (ret) 941 goto free_rx_secy; 942 943 return txsc; 944 free_rx_secy: 945 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 946 txsc->hw_secy_id_rx, false); 947 free_tx_secy: 948 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 949 txsc->hw_secy_id_tx, false); 950 free_flowid: 951 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, 952 txsc->hw_flow_id, false); 953 fail: 954 kfree(txsc); 955 return ERR_PTR(ret); 956 } 957 958 /* Free Tx SC and its SAs(if any) resources to AF 959 */ 960 static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf, 961 struct cn10k_mcs_txsc *txsc) 962 { 963 u8 sa_bmap = txsc->sa_bmap; 964 u8 sa_num = 0; 965 966 while (sa_bmap) { 967 if (sa_bmap & 1) { 968 cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy, 969 txsc, sa_num); 970 cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]); 971 } 972 sa_num++; 973 sa_bmap >>= 1; 974 } 975 976 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC, 977 txsc->hw_sc_id, false); 978 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 979 txsc->hw_secy_id_rx, false); 980 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 981 txsc->hw_secy_id_tx, false); 982 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, 983 txsc->hw_flow_id, false); 984 } 985 986 static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf) 987 { 988 struct cn10k_mcs_rxsc *rxsc; 989 int ret; 990 991 rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL); 992 if (!rxsc) 993 return ERR_PTR(-ENOMEM); 994 995 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, 996 &rxsc->hw_flow_id); 997 if (ret) 998 goto fail; 999 1000 ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC, 1001 &rxsc->hw_sc_id); 1002 if (ret) 1003 goto free_flowid; 1004 1005 return rxsc; 1006 free_flowid: 1007 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, 1008 rxsc->hw_flow_id, false); 1009 fail: 1010 kfree(rxsc); 1011 return ERR_PTR(ret); 1012 } 1013 1014 /* Free Rx SC and its SAs(if any) resources to AF 1015 */ 1016 static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf, 1017 struct cn10k_mcs_rxsc *rxsc) 1018 { 1019 u8 sa_bmap = rxsc->sa_bmap; 1020 u8 sa_num = 0; 1021 1022 while (sa_bmap) { 1023 if (sa_bmap & 1) { 1024 cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc, 1025 sa_num, false); 1026 cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]); 1027 } 1028 sa_num++; 1029 sa_bmap >>= 1; 1030 } 1031 1032 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC, 1033 rxsc->hw_sc_id, false); 1034 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, 1035 rxsc->hw_flow_id, false); 1036 } 1037 1038 static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy, 1039 struct cn10k_mcs_txsc *txsc, 1040 struct macsec_tx_sa *sw_tx_sa, u8 sa_num) 1041 { 1042 if (sw_tx_sa) { 1043 cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num); 1044 cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, sw_tx_sa->next_pn); 1045 cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, 1046 sw_tx_sa->active); 1047 } 1048 1049 cn10k_mcs_write_tx_secy(pfvf, secy, txsc); 1050 cn10k_mcs_write_tx_flowid(pfvf, secy, txsc); 1051 /* When updating secy, change RX secy also */ 1052 cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx); 1053 1054 return 0; 1055 } 1056 1057 static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf, 1058 struct macsec_secy *secy, u8 hw_secy_id) 1059 { 1060 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1061 struct cn10k_mcs_rxsc *mcs_rx_sc; 1062 struct macsec_rx_sc *sw_rx_sc; 1063 struct macsec_rx_sa *sw_rx_sa; 1064 u8 sa_num; 1065 1066 for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active; 1067 sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) { 1068 mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); 1069 if (unlikely(!mcs_rx_sc)) 1070 continue; 1071 1072 for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) { 1073 sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]); 1074 if (!sw_rx_sa) 1075 continue; 1076 1077 cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc, 1078 sa_num, sw_rx_sa->active); 1079 cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num, 1080 sw_rx_sa->next_pn); 1081 } 1082 1083 cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id); 1084 cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id); 1085 } 1086 1087 return 0; 1088 } 1089 1090 static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf, 1091 struct macsec_secy *secy, 1092 bool delete) 1093 { 1094 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1095 struct cn10k_mcs_rxsc *mcs_rx_sc; 1096 struct macsec_rx_sc *sw_rx_sc; 1097 int ret; 1098 1099 for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active; 1100 sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) { 1101 mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); 1102 if (unlikely(!mcs_rx_sc)) 1103 continue; 1104 1105 ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id, 1106 false, MCS_RX); 1107 if (ret) 1108 dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n", 1109 mcs_rx_sc->hw_sc_id); 1110 if (delete) { 1111 cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc); 1112 list_del(&mcs_rx_sc->entry); 1113 kfree(mcs_rx_sc); 1114 } 1115 } 1116 1117 return 0; 1118 } 1119 1120 static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy, 1121 struct cn10k_mcs_txsc *txsc) 1122 { 1123 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1124 struct mcs_secy_stats rx_rsp = { 0 }; 1125 struct mcs_sc_stats sc_rsp = { 0 }; 1126 struct cn10k_mcs_rxsc *rxsc; 1127 1128 /* Because of shared counters for some stats in the hardware, when 1129 * updating secy policy take a snapshot of current stats and reset them. 1130 * Below are the effected stats because of shared counters. 1131 */ 1132 1133 /* Check if sync is really needed */ 1134 if (secy->validate_frames == txsc->last_validate_frames && 1135 secy->replay_protect == txsc->last_replay_protect) 1136 return; 1137 1138 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true); 1139 1140 txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt; 1141 txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt; 1142 txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt; 1143 if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT) 1144 txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt; 1145 else 1146 txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt; 1147 1148 list_for_each_entry(rxsc, &cfg->rxsc_list, entry) { 1149 cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true); 1150 1151 rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt; 1152 rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt; 1153 1154 rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt; 1155 rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt; 1156 1157 if (txsc->last_replay_protect) 1158 rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt; 1159 else 1160 rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt; 1161 1162 if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED) 1163 rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt; 1164 else 1165 rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt; 1166 } 1167 1168 txsc->last_validate_frames = secy->validate_frames; 1169 txsc->last_replay_protect = secy->replay_protect; 1170 } 1171 1172 static int cn10k_mdo_open(struct macsec_context *ctx) 1173 { 1174 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1175 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1176 struct macsec_secy *secy = ctx->secy; 1177 struct macsec_tx_sa *sw_tx_sa; 1178 struct cn10k_mcs_txsc *txsc; 1179 u8 sa_num; 1180 int err; 1181 1182 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); 1183 if (!txsc) 1184 return -ENOENT; 1185 1186 sa_num = txsc->encoding_sa; 1187 sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]); 1188 1189 err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num); 1190 if (err) 1191 return err; 1192 1193 return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx); 1194 } 1195 1196 static int cn10k_mdo_stop(struct macsec_context *ctx) 1197 { 1198 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1199 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1200 struct cn10k_mcs_txsc *txsc; 1201 int err; 1202 1203 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); 1204 if (!txsc) 1205 return -ENOENT; 1206 1207 err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX); 1208 if (err) 1209 return err; 1210 1211 return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false); 1212 } 1213 1214 static int cn10k_mdo_add_secy(struct macsec_context *ctx) 1215 { 1216 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1217 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1218 struct macsec_secy *secy = ctx->secy; 1219 struct cn10k_mcs_txsc *txsc; 1220 1221 if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) 1222 return -EOPNOTSUPP; 1223 1224 txsc = cn10k_mcs_create_txsc(pfvf); 1225 if (IS_ERR(txsc)) 1226 return -ENOSPC; 1227 1228 txsc->sw_secy = secy; 1229 txsc->encoding_sa = secy->tx_sc.encoding_sa; 1230 txsc->last_validate_frames = secy->validate_frames; 1231 txsc->last_replay_protect = secy->replay_protect; 1232 txsc->vlan_dev = is_vlan_dev(ctx->netdev); 1233 1234 list_add(&txsc->entry, &cfg->txsc_list); 1235 1236 if (netif_running(secy->netdev)) 1237 return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0); 1238 1239 return 0; 1240 } 1241 1242 static int cn10k_mdo_upd_secy(struct macsec_context *ctx) 1243 { 1244 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1245 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1246 struct macsec_secy *secy = ctx->secy; 1247 struct macsec_tx_sa *sw_tx_sa; 1248 struct cn10k_mcs_txsc *txsc; 1249 bool active; 1250 u8 sa_num; 1251 int err; 1252 1253 txsc = cn10k_mcs_get_txsc(cfg, secy); 1254 if (!txsc) 1255 return -ENOENT; 1256 1257 /* Encoding SA got changed */ 1258 if (txsc->encoding_sa != secy->tx_sc.encoding_sa) { 1259 txsc->encoding_sa = secy->tx_sc.encoding_sa; 1260 sa_num = txsc->encoding_sa; 1261 sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]); 1262 active = sw_tx_sa ? sw_tx_sa->active : false; 1263 cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active); 1264 } 1265 1266 if (netif_running(secy->netdev)) { 1267 cn10k_mcs_sync_stats(pfvf, secy, txsc); 1268 1269 err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0); 1270 if (err) 1271 return err; 1272 } 1273 1274 return 0; 1275 } 1276 1277 static int cn10k_mdo_del_secy(struct macsec_context *ctx) 1278 { 1279 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1280 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1281 struct cn10k_mcs_txsc *txsc; 1282 1283 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); 1284 if (!txsc) 1285 return -ENOENT; 1286 1287 cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX); 1288 cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true); 1289 cn10k_mcs_delete_txsc(pfvf, txsc); 1290 list_del(&txsc->entry); 1291 kfree(txsc); 1292 1293 return 0; 1294 } 1295 1296 static int cn10k_mdo_add_txsa(struct macsec_context *ctx) 1297 { 1298 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1299 struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa; 1300 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1301 struct macsec_secy *secy = ctx->secy; 1302 u8 sa_num = ctx->sa.assoc_num; 1303 struct cn10k_mcs_txsc *txsc; 1304 int err; 1305 1306 txsc = cn10k_mcs_get_txsc(cfg, secy); 1307 if (!txsc) 1308 return -ENOENT; 1309 1310 if (sa_num >= CN10K_MCS_SA_PER_SC) 1311 return -EOPNOTSUPP; 1312 1313 if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num])) 1314 return -ENOSPC; 1315 1316 memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len); 1317 memcpy(&txsc->salt[sa_num], sw_tx_sa->key.salt.bytes, MACSEC_SALT_LEN); 1318 txsc->ssci[sa_num] = sw_tx_sa->ssci; 1319 1320 txsc->sa_bmap |= 1 << sa_num; 1321 1322 if (netif_running(secy->netdev)) { 1323 err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num); 1324 if (err) 1325 return err; 1326 1327 err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, 1328 sw_tx_sa->next_pn); 1329 if (err) 1330 return err; 1331 1332 err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, 1333 sa_num, sw_tx_sa->active); 1334 if (err) 1335 return err; 1336 } 1337 1338 return 0; 1339 } 1340 1341 static int cn10k_mdo_upd_txsa(struct macsec_context *ctx) 1342 { 1343 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1344 struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa; 1345 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1346 struct macsec_secy *secy = ctx->secy; 1347 u8 sa_num = ctx->sa.assoc_num; 1348 struct cn10k_mcs_txsc *txsc; 1349 int err; 1350 1351 txsc = cn10k_mcs_get_txsc(cfg, secy); 1352 if (!txsc) 1353 return -ENOENT; 1354 1355 if (sa_num >= CN10K_MCS_SA_PER_SC) 1356 return -EOPNOTSUPP; 1357 1358 if (netif_running(secy->netdev)) { 1359 /* Keys cannot be changed after creation */ 1360 err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, 1361 sw_tx_sa->next_pn); 1362 if (err) 1363 return err; 1364 1365 err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, 1366 sa_num, sw_tx_sa->active); 1367 if (err) 1368 return err; 1369 } 1370 1371 return 0; 1372 } 1373 1374 static int cn10k_mdo_del_txsa(struct macsec_context *ctx) 1375 { 1376 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1377 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1378 u8 sa_num = ctx->sa.assoc_num; 1379 struct cn10k_mcs_txsc *txsc; 1380 1381 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); 1382 if (!txsc) 1383 return -ENOENT; 1384 1385 if (sa_num >= CN10K_MCS_SA_PER_SC) 1386 return -EOPNOTSUPP; 1387 1388 cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]); 1389 txsc->sa_bmap &= ~(1 << sa_num); 1390 1391 return 0; 1392 } 1393 1394 static int cn10k_mdo_add_rxsc(struct macsec_context *ctx) 1395 { 1396 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1397 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1398 struct macsec_secy *secy = ctx->secy; 1399 struct cn10k_mcs_rxsc *rxsc; 1400 struct cn10k_mcs_txsc *txsc; 1401 int err; 1402 1403 txsc = cn10k_mcs_get_txsc(cfg, secy); 1404 if (!txsc) 1405 return -ENOENT; 1406 1407 rxsc = cn10k_mcs_create_rxsc(pfvf); 1408 if (IS_ERR(rxsc)) 1409 return -ENOSPC; 1410 1411 rxsc->sw_secy = ctx->secy; 1412 rxsc->sw_rxsc = ctx->rx_sc; 1413 list_add(&rxsc->entry, &cfg->rxsc_list); 1414 1415 if (netif_running(secy->netdev)) { 1416 err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx); 1417 if (err) 1418 return err; 1419 1420 err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx); 1421 if (err) 1422 return err; 1423 } 1424 1425 return 0; 1426 } 1427 1428 static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx) 1429 { 1430 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1431 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1432 struct macsec_secy *secy = ctx->secy; 1433 bool enable = ctx->rx_sc->active; 1434 struct cn10k_mcs_rxsc *rxsc; 1435 1436 rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc); 1437 if (!rxsc) 1438 return -ENOENT; 1439 1440 if (netif_running(secy->netdev)) 1441 return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, 1442 enable, MCS_RX); 1443 1444 return 0; 1445 } 1446 1447 static int cn10k_mdo_del_rxsc(struct macsec_context *ctx) 1448 { 1449 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1450 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1451 struct cn10k_mcs_rxsc *rxsc; 1452 1453 rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc); 1454 if (!rxsc) 1455 return -ENOENT; 1456 1457 cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX); 1458 cn10k_mcs_delete_rxsc(pfvf, rxsc); 1459 list_del(&rxsc->entry); 1460 kfree(rxsc); 1461 1462 return 0; 1463 } 1464 1465 static int cn10k_mdo_add_rxsa(struct macsec_context *ctx) 1466 { 1467 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1468 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; 1469 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1470 struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa; 1471 struct macsec_secy *secy = ctx->secy; 1472 bool sa_in_use = rx_sa->active; 1473 u8 sa_num = ctx->sa.assoc_num; 1474 struct cn10k_mcs_rxsc *rxsc; 1475 int err; 1476 1477 rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); 1478 if (!rxsc) 1479 return -ENOENT; 1480 1481 if (sa_num >= CN10K_MCS_SA_PER_SC) 1482 return -EOPNOTSUPP; 1483 1484 if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num])) 1485 return -ENOSPC; 1486 1487 memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len); 1488 memcpy(&rxsc->salt[sa_num], rx_sa->key.salt.bytes, MACSEC_SALT_LEN); 1489 rxsc->ssci[sa_num] = rx_sa->ssci; 1490 1491 rxsc->sa_bmap |= 1 << sa_num; 1492 1493 if (netif_running(secy->netdev)) { 1494 err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, 1495 sa_num, sa_in_use); 1496 if (err) 1497 return err; 1498 1499 err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, 1500 rx_sa->next_pn); 1501 if (err) 1502 return err; 1503 } 1504 1505 return 0; 1506 } 1507 1508 static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx) 1509 { 1510 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1511 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; 1512 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1513 struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa; 1514 struct macsec_secy *secy = ctx->secy; 1515 bool sa_in_use = rx_sa->active; 1516 u8 sa_num = ctx->sa.assoc_num; 1517 struct cn10k_mcs_rxsc *rxsc; 1518 int err; 1519 1520 rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); 1521 if (!rxsc) 1522 return -ENOENT; 1523 1524 if (sa_num >= CN10K_MCS_SA_PER_SC) 1525 return -EOPNOTSUPP; 1526 1527 if (netif_running(secy->netdev)) { 1528 err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use); 1529 if (err) 1530 return err; 1531 1532 err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, 1533 rx_sa->next_pn); 1534 if (err) 1535 return err; 1536 } 1537 1538 return 0; 1539 } 1540 1541 static int cn10k_mdo_del_rxsa(struct macsec_context *ctx) 1542 { 1543 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1544 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; 1545 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1546 u8 sa_num = ctx->sa.assoc_num; 1547 struct cn10k_mcs_rxsc *rxsc; 1548 1549 rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc); 1550 if (!rxsc) 1551 return -ENOENT; 1552 1553 if (sa_num >= CN10K_MCS_SA_PER_SC) 1554 return -EOPNOTSUPP; 1555 1556 cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false); 1557 cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]); 1558 1559 rxsc->sa_bmap &= ~(1 << sa_num); 1560 1561 return 0; 1562 } 1563 1564 static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx) 1565 { 1566 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1567 struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 }; 1568 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1569 struct macsec_secy *secy = ctx->secy; 1570 struct cn10k_mcs_txsc *txsc; 1571 1572 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); 1573 if (!txsc) 1574 return -ENOENT; 1575 1576 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false); 1577 ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt; 1578 ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt; 1579 1580 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true); 1581 txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt; 1582 txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt; 1583 txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt; 1584 if (secy->validate_frames == MACSEC_VALIDATE_STRICT) 1585 txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt; 1586 else 1587 txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt; 1588 txsc->stats.InPktsOverrun = 0; 1589 1590 ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag; 1591 ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged; 1592 ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag; 1593 ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI; 1594 ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI; 1595 ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun; 1596 1597 return 0; 1598 } 1599 1600 static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx) 1601 { 1602 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1603 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1604 struct mcs_sc_stats rsp = { 0 }; 1605 struct cn10k_mcs_txsc *txsc; 1606 1607 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); 1608 if (!txsc) 1609 return -ENOENT; 1610 1611 cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false); 1612 1613 ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt; 1614 ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt; 1615 ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt; 1616 ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt; 1617 1618 return 0; 1619 } 1620 1621 static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx) 1622 { 1623 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1624 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1625 struct mcs_sa_stats rsp = { 0 }; 1626 u8 sa_num = ctx->sa.assoc_num; 1627 struct cn10k_mcs_txsc *txsc; 1628 1629 txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); 1630 if (!txsc) 1631 return -ENOENT; 1632 1633 if (sa_num >= CN10K_MCS_SA_PER_SC) 1634 return -EOPNOTSUPP; 1635 1636 cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false); 1637 1638 ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt; 1639 ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt; 1640 1641 return 0; 1642 } 1643 1644 static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx) 1645 { 1646 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1647 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1648 struct macsec_secy *secy = ctx->secy; 1649 struct mcs_sc_stats rsp = { 0 }; 1650 struct cn10k_mcs_rxsc *rxsc; 1651 1652 rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc); 1653 if (!rxsc) 1654 return -ENOENT; 1655 1656 cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true); 1657 1658 rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt; 1659 rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt; 1660 1661 rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt; 1662 rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt; 1663 1664 if (secy->replay_protect) 1665 rxsc->stats.InPktsLate += rsp.pkt_late_cnt; 1666 else 1667 rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt; 1668 1669 if (secy->validate_frames == MACSEC_VALIDATE_DISABLED) 1670 rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt; 1671 else 1672 rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt; 1673 1674 ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated; 1675 ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted; 1676 ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid; 1677 ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid; 1678 ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate; 1679 ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed; 1680 ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked; 1681 ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK; 1682 1683 return 0; 1684 } 1685 1686 static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx) 1687 { 1688 struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); 1689 struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; 1690 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1691 struct mcs_sa_stats rsp = { 0 }; 1692 u8 sa_num = ctx->sa.assoc_num; 1693 struct cn10k_mcs_rxsc *rxsc; 1694 1695 rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc); 1696 if (!rxsc) 1697 return -ENOENT; 1698 1699 if (sa_num >= CN10K_MCS_SA_PER_SC) 1700 return -EOPNOTSUPP; 1701 1702 cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false); 1703 1704 ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt; 1705 ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt; 1706 ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt; 1707 ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt; 1708 ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt; 1709 1710 return 0; 1711 } 1712 1713 static const struct macsec_ops cn10k_mcs_ops = { 1714 .mdo_dev_open = cn10k_mdo_open, 1715 .mdo_dev_stop = cn10k_mdo_stop, 1716 .mdo_add_secy = cn10k_mdo_add_secy, 1717 .mdo_upd_secy = cn10k_mdo_upd_secy, 1718 .mdo_del_secy = cn10k_mdo_del_secy, 1719 .mdo_add_rxsc = cn10k_mdo_add_rxsc, 1720 .mdo_upd_rxsc = cn10k_mdo_upd_rxsc, 1721 .mdo_del_rxsc = cn10k_mdo_del_rxsc, 1722 .mdo_add_rxsa = cn10k_mdo_add_rxsa, 1723 .mdo_upd_rxsa = cn10k_mdo_upd_rxsa, 1724 .mdo_del_rxsa = cn10k_mdo_del_rxsa, 1725 .mdo_add_txsa = cn10k_mdo_add_txsa, 1726 .mdo_upd_txsa = cn10k_mdo_upd_txsa, 1727 .mdo_del_txsa = cn10k_mdo_del_txsa, 1728 .mdo_get_dev_stats = cn10k_mdo_get_dev_stats, 1729 .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats, 1730 .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats, 1731 .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats, 1732 .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats, 1733 }; 1734 1735 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event) 1736 { 1737 struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; 1738 struct macsec_tx_sa *sw_tx_sa = NULL; 1739 struct macsec_secy *secy = NULL; 1740 struct cn10k_mcs_txsc *txsc; 1741 u8 an; 1742 1743 if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag)) 1744 return; 1745 1746 if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT)) 1747 return; 1748 1749 /* Find the SecY to which the expired hardware SA is mapped */ 1750 list_for_each_entry(txsc, &cfg->txsc_list, entry) { 1751 for (an = 0; an < CN10K_MCS_SA_PER_SC; an++) 1752 if (txsc->hw_sa_id[an] == event->sa_id) { 1753 secy = txsc->sw_secy; 1754 sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]); 1755 } 1756 } 1757 1758 if (secy && sw_tx_sa) 1759 macsec_pn_wrapped(secy, sw_tx_sa); 1760 } 1761 1762 int cn10k_mcs_init(struct otx2_nic *pfvf) 1763 { 1764 struct mbox *mbox = &pfvf->mbox; 1765 struct cn10k_mcs_cfg *cfg; 1766 struct mcs_intr_cfg *req; 1767 1768 if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag)) 1769 return 0; 1770 1771 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 1772 if (!cfg) 1773 return -ENOMEM; 1774 1775 INIT_LIST_HEAD(&cfg->txsc_list); 1776 INIT_LIST_HEAD(&cfg->rxsc_list); 1777 pfvf->macsec_cfg = cfg; 1778 1779 pfvf->netdev->features |= NETIF_F_HW_MACSEC; 1780 pfvf->netdev->macsec_ops = &cn10k_mcs_ops; 1781 1782 mutex_lock(&mbox->lock); 1783 1784 req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox); 1785 if (!req) 1786 goto fail; 1787 1788 req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT; 1789 1790 if (otx2_sync_mbox_msg(mbox)) 1791 goto fail; 1792 1793 mutex_unlock(&mbox->lock); 1794 1795 return 0; 1796 fail: 1797 dev_err(pfvf->dev, "Cannot notify PN wrapped event\n"); 1798 mutex_unlock(&mbox->lock); 1799 return 0; 1800 } 1801 1802 void cn10k_mcs_free(struct otx2_nic *pfvf) 1803 { 1804 if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag)) 1805 return; 1806 1807 cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true); 1808 cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true); 1809 kfree(pfvf->macsec_cfg); 1810 pfvf->macsec_cfg = NULL; 1811 } 1812