1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Atlantic Network Driver 3 * Copyright (C) 2020 Marvell International Ltd. 4 */ 5 6 #include "aq_macsec.h" 7 #include "aq_nic.h" 8 #include <linux/rtnetlink.h> 9 10 #include "macsec/macsec_api.h" 11 #define AQ_MACSEC_KEY_LEN_128_BIT 16 12 #define AQ_MACSEC_KEY_LEN_192_BIT 24 13 #define AQ_MACSEC_KEY_LEN_256_BIT 32 14 15 enum aq_clear_type { 16 /* update HW configuration */ 17 AQ_CLEAR_HW = BIT(0), 18 /* update SW configuration (busy bits, pointers) */ 19 AQ_CLEAR_SW = BIT(1), 20 /* update both HW and SW configuration */ 21 AQ_CLEAR_ALL = AQ_CLEAR_HW | AQ_CLEAR_SW, 22 }; 23 24 static int aq_clear_txsc(struct aq_nic_s *nic, const int txsc_idx, 25 enum aq_clear_type clear_type); 26 static int aq_clear_txsa(struct aq_nic_s *nic, struct aq_macsec_txsc *aq_txsc, 27 const int sa_num, enum aq_clear_type clear_type); 28 static int aq_clear_rxsc(struct aq_nic_s *nic, const int rxsc_idx, 29 enum aq_clear_type clear_type); 30 static int aq_clear_rxsa(struct aq_nic_s *nic, struct aq_macsec_rxsc *aq_rxsc, 31 const int sa_num, enum aq_clear_type clear_type); 32 static int aq_clear_secy(struct aq_nic_s *nic, const struct macsec_secy *secy, 33 enum aq_clear_type clear_type); 34 static int aq_apply_macsec_cfg(struct aq_nic_s *nic); 35 static int aq_apply_secy_cfg(struct aq_nic_s *nic, 36 const struct macsec_secy *secy); 37 38 static void aq_ether_addr_to_mac(u32 mac[2], unsigned char *emac) 39 { 40 u32 tmp[2] = { 0 }; 41 42 memcpy(((u8 *)tmp) + 2, emac, ETH_ALEN); 43 44 mac[0] = swab32(tmp[1]); 45 mac[1] = swab32(tmp[0]); 46 } 47 48 /* There's a 1:1 mapping between SecY and TX SC */ 49 static int aq_get_txsc_idx_from_secy(struct aq_macsec_cfg *macsec_cfg, 50 const struct macsec_secy *secy) 51 { 52 int i; 53 54 if (unlikely(!secy)) 55 return -1; 56 57 for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { 58 if (macsec_cfg->aq_txsc[i].sw_secy == secy) 59 return i; 60 } 61 return -1; 62 } 63 64 static int aq_get_rxsc_idx_from_rxsc(struct aq_macsec_cfg *macsec_cfg, 65 const struct macsec_rx_sc *rxsc) 66 { 67 int i; 68 69 if (unlikely(!rxsc)) 70 return -1; 71 72 for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { 73 if (macsec_cfg->aq_rxsc[i].sw_rxsc == rxsc) 74 return i; 75 } 76 77 return -1; 78 } 79 80 static int aq_get_txsc_idx_from_sc_idx(const enum aq_macsec_sc_sa sc_sa, 81 const int sc_idx) 82 { 83 switch (sc_sa) { 84 case aq_macsec_sa_sc_4sa_8sc: 85 return sc_idx >> 2; 86 case aq_macsec_sa_sc_2sa_16sc: 87 return sc_idx >> 1; 88 case aq_macsec_sa_sc_1sa_32sc: 89 return sc_idx; 90 default: 91 WARN_ONCE(true, "Invalid sc_sa"); 92 } 93 return -1; 94 } 95 96 /* Rotate keys u32[8] */ 97 static void aq_rotate_keys(u32 (*key)[8], const int key_len) 98 { 99 u32 tmp[8] = { 0 }; 100 101 memcpy(&tmp, key, sizeof(tmp)); 102 memset(*key, 0, sizeof(*key)); 103 104 if (key_len == AQ_MACSEC_KEY_LEN_128_BIT) { 105 (*key)[0] = swab32(tmp[3]); 106 (*key)[1] = swab32(tmp[2]); 107 (*key)[2] = swab32(tmp[1]); 108 (*key)[3] = swab32(tmp[0]); 109 } else if (key_len == AQ_MACSEC_KEY_LEN_192_BIT) { 110 (*key)[0] = swab32(tmp[5]); 111 (*key)[1] = swab32(tmp[4]); 112 (*key)[2] = swab32(tmp[3]); 113 (*key)[3] = swab32(tmp[2]); 114 (*key)[4] = swab32(tmp[1]); 115 (*key)[5] = swab32(tmp[0]); 116 } else if (key_len == AQ_MACSEC_KEY_LEN_256_BIT) { 117 (*key)[0] = swab32(tmp[7]); 118 (*key)[1] = swab32(tmp[6]); 119 (*key)[2] = swab32(tmp[5]); 120 (*key)[3] = swab32(tmp[4]); 121 (*key)[4] = swab32(tmp[3]); 122 (*key)[5] = swab32(tmp[2]); 123 (*key)[6] = swab32(tmp[1]); 124 (*key)[7] = swab32(tmp[0]); 125 } else { 126 pr_warn("Rotate_keys: invalid key_len\n"); 127 } 128 } 129 130 #define STATS_2x32_TO_64(stat_field) \ 131 (((u64)stat_field[1] << 32) | stat_field[0]) 132 133 static int aq_get_macsec_common_stats(struct aq_hw_s *hw, 134 struct aq_macsec_common_stats *stats) 135 { 136 struct aq_mss_ingress_common_counters ingress_counters; 137 struct aq_mss_egress_common_counters egress_counters; 138 int ret; 139 140 /* MACSEC counters */ 141 ret = aq_mss_get_ingress_common_counters(hw, &ingress_counters); 142 if (unlikely(ret)) 143 return ret; 144 145 stats->in.ctl_pkts = STATS_2x32_TO_64(ingress_counters.ctl_pkts); 146 stats->in.tagged_miss_pkts = 147 STATS_2x32_TO_64(ingress_counters.tagged_miss_pkts); 148 stats->in.untagged_miss_pkts = 149 STATS_2x32_TO_64(ingress_counters.untagged_miss_pkts); 150 stats->in.notag_pkts = STATS_2x32_TO_64(ingress_counters.notag_pkts); 151 stats->in.untagged_pkts = 152 STATS_2x32_TO_64(ingress_counters.untagged_pkts); 153 stats->in.bad_tag_pkts = 154 STATS_2x32_TO_64(ingress_counters.bad_tag_pkts); 155 stats->in.no_sci_pkts = STATS_2x32_TO_64(ingress_counters.no_sci_pkts); 156 stats->in.unknown_sci_pkts = 157 STATS_2x32_TO_64(ingress_counters.unknown_sci_pkts); 158 stats->in.ctrl_prt_pass_pkts = 159 STATS_2x32_TO_64(ingress_counters.ctrl_prt_pass_pkts); 160 stats->in.unctrl_prt_pass_pkts = 161 STATS_2x32_TO_64(ingress_counters.unctrl_prt_pass_pkts); 162 stats->in.ctrl_prt_fail_pkts = 163 STATS_2x32_TO_64(ingress_counters.ctrl_prt_fail_pkts); 164 stats->in.unctrl_prt_fail_pkts = 165 STATS_2x32_TO_64(ingress_counters.unctrl_prt_fail_pkts); 166 stats->in.too_long_pkts = 167 STATS_2x32_TO_64(ingress_counters.too_long_pkts); 168 stats->in.igpoc_ctl_pkts = 169 STATS_2x32_TO_64(ingress_counters.igpoc_ctl_pkts); 170 stats->in.ecc_error_pkts = 171 STATS_2x32_TO_64(ingress_counters.ecc_error_pkts); 172 stats->in.unctrl_hit_drop_redir = 173 STATS_2x32_TO_64(ingress_counters.unctrl_hit_drop_redir); 174 175 ret = aq_mss_get_egress_common_counters(hw, &egress_counters); 176 if (unlikely(ret)) 177 return ret; 178 stats->out.ctl_pkts = STATS_2x32_TO_64(egress_counters.ctl_pkt); 179 stats->out.unknown_sa_pkts = 180 STATS_2x32_TO_64(egress_counters.unknown_sa_pkts); 181 stats->out.untagged_pkts = 182 STATS_2x32_TO_64(egress_counters.untagged_pkts); 183 stats->out.too_long = STATS_2x32_TO_64(egress_counters.too_long); 184 stats->out.ecc_error_pkts = 185 STATS_2x32_TO_64(egress_counters.ecc_error_pkts); 186 stats->out.unctrl_hit_drop_redir = 187 STATS_2x32_TO_64(egress_counters.unctrl_hit_drop_redir); 188 189 return 0; 190 } 191 192 static int aq_get_rxsa_stats(struct aq_hw_s *hw, const int sa_idx, 193 struct aq_macsec_rx_sa_stats *stats) 194 { 195 struct aq_mss_ingress_sa_counters i_sa_counters; 196 int ret; 197 198 ret = aq_mss_get_ingress_sa_counters(hw, &i_sa_counters, sa_idx); 199 if (unlikely(ret)) 200 return ret; 201 202 stats->untagged_hit_pkts = 203 STATS_2x32_TO_64(i_sa_counters.untagged_hit_pkts); 204 stats->ctrl_hit_drop_redir_pkts = 205 STATS_2x32_TO_64(i_sa_counters.ctrl_hit_drop_redir_pkts); 206 stats->not_using_sa = STATS_2x32_TO_64(i_sa_counters.not_using_sa); 207 stats->unused_sa = STATS_2x32_TO_64(i_sa_counters.unused_sa); 208 stats->not_valid_pkts = STATS_2x32_TO_64(i_sa_counters.not_valid_pkts); 209 stats->invalid_pkts = STATS_2x32_TO_64(i_sa_counters.invalid_pkts); 210 stats->ok_pkts = STATS_2x32_TO_64(i_sa_counters.ok_pkts); 211 stats->late_pkts = STATS_2x32_TO_64(i_sa_counters.late_pkts); 212 stats->delayed_pkts = STATS_2x32_TO_64(i_sa_counters.delayed_pkts); 213 stats->unchecked_pkts = STATS_2x32_TO_64(i_sa_counters.unchecked_pkts); 214 stats->validated_octets = 215 STATS_2x32_TO_64(i_sa_counters.validated_octets); 216 stats->decrypted_octets = 217 STATS_2x32_TO_64(i_sa_counters.decrypted_octets); 218 219 return 0; 220 } 221 222 static int aq_get_txsa_stats(struct aq_hw_s *hw, const int sa_idx, 223 struct aq_macsec_tx_sa_stats *stats) 224 { 225 struct aq_mss_egress_sa_counters e_sa_counters; 226 int ret; 227 228 ret = aq_mss_get_egress_sa_counters(hw, &e_sa_counters, sa_idx); 229 if (unlikely(ret)) 230 return ret; 231 232 stats->sa_hit_drop_redirect = 233 STATS_2x32_TO_64(e_sa_counters.sa_hit_drop_redirect); 234 stats->sa_protected2_pkts = 235 STATS_2x32_TO_64(e_sa_counters.sa_protected2_pkts); 236 stats->sa_protected_pkts = 237 STATS_2x32_TO_64(e_sa_counters.sa_protected_pkts); 238 stats->sa_encrypted_pkts = 239 STATS_2x32_TO_64(e_sa_counters.sa_encrypted_pkts); 240 241 return 0; 242 } 243 244 static int aq_get_txsa_next_pn(struct aq_hw_s *hw, const int sa_idx, u32 *pn) 245 { 246 struct aq_mss_egress_sa_record sa_rec; 247 int ret; 248 249 ret = aq_mss_get_egress_sa_record(hw, &sa_rec, sa_idx); 250 if (likely(!ret)) 251 *pn = sa_rec.next_pn; 252 253 return ret; 254 } 255 256 static int aq_get_rxsa_next_pn(struct aq_hw_s *hw, const int sa_idx, u32 *pn) 257 { 258 struct aq_mss_ingress_sa_record sa_rec; 259 int ret; 260 261 ret = aq_mss_get_ingress_sa_record(hw, &sa_rec, sa_idx); 262 if (likely(!ret)) 263 *pn = (!sa_rec.sat_nextpn) ? sa_rec.next_pn : 0; 264 265 return ret; 266 } 267 268 static int aq_get_txsc_stats(struct aq_hw_s *hw, const int sc_idx, 269 struct aq_macsec_tx_sc_stats *stats) 270 { 271 struct aq_mss_egress_sc_counters e_sc_counters; 272 int ret; 273 274 ret = aq_mss_get_egress_sc_counters(hw, &e_sc_counters, sc_idx); 275 if (unlikely(ret)) 276 return ret; 277 278 stats->sc_protected_pkts = 279 STATS_2x32_TO_64(e_sc_counters.sc_protected_pkts); 280 stats->sc_encrypted_pkts = 281 STATS_2x32_TO_64(e_sc_counters.sc_encrypted_pkts); 282 stats->sc_protected_octets = 283 STATS_2x32_TO_64(e_sc_counters.sc_protected_octets); 284 stats->sc_encrypted_octets = 285 STATS_2x32_TO_64(e_sc_counters.sc_encrypted_octets); 286 287 return 0; 288 } 289 290 static int aq_mdo_dev_open(struct macsec_context *ctx) 291 { 292 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 293 int ret = 0; 294 295 if (ctx->prepare) 296 return 0; 297 298 if (netif_carrier_ok(nic->ndev)) 299 ret = aq_apply_secy_cfg(nic, ctx->secy); 300 301 return ret; 302 } 303 304 static int aq_mdo_dev_stop(struct macsec_context *ctx) 305 { 306 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 307 int i; 308 309 if (ctx->prepare) 310 return 0; 311 312 for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { 313 if (nic->macsec_cfg->txsc_idx_busy & BIT(i)) 314 aq_clear_secy(nic, nic->macsec_cfg->aq_txsc[i].sw_secy, 315 AQ_CLEAR_HW); 316 } 317 318 return 0; 319 } 320 321 static int aq_set_txsc(struct aq_nic_s *nic, const int txsc_idx) 322 { 323 struct aq_macsec_txsc *aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx]; 324 struct aq_mss_egress_class_record tx_class_rec = { 0 }; 325 const struct macsec_secy *secy = aq_txsc->sw_secy; 326 struct aq_mss_egress_sc_record sc_rec = { 0 }; 327 unsigned int sc_idx = aq_txsc->hw_sc_idx; 328 struct aq_hw_s *hw = nic->aq_hw; 329 int ret = 0; 330 331 aq_ether_addr_to_mac(tx_class_rec.mac_sa, secy->netdev->dev_addr); 332 333 put_unaligned_be64((__force u64)secy->sci, tx_class_rec.sci); 334 tx_class_rec.sci_mask = 0; 335 336 tx_class_rec.sa_mask = 0x3f; 337 338 tx_class_rec.action = 0; /* forward to SA/SC table */ 339 tx_class_rec.valid = 1; 340 341 tx_class_rec.sc_idx = sc_idx; 342 343 tx_class_rec.sc_sa = nic->macsec_cfg->sc_sa; 344 345 ret = aq_mss_set_egress_class_record(hw, &tx_class_rec, txsc_idx); 346 if (ret) 347 return ret; 348 349 sc_rec.protect = secy->protect_frames; 350 if (secy->tx_sc.encrypt) 351 sc_rec.tci |= BIT(1); 352 if (secy->tx_sc.scb) 353 sc_rec.tci |= BIT(2); 354 if (secy->tx_sc.send_sci) 355 sc_rec.tci |= BIT(3); 356 if (secy->tx_sc.end_station) 357 sc_rec.tci |= BIT(4); 358 /* The C bit is clear if and only if the Secure Data is 359 * exactly the same as the User Data and the ICV is 16 octets long. 360 */ 361 if (!(secy->icv_len == 16 && !secy->tx_sc.encrypt)) 362 sc_rec.tci |= BIT(0); 363 364 sc_rec.an_roll = 0; 365 366 switch (secy->key_len) { 367 case AQ_MACSEC_KEY_LEN_128_BIT: 368 sc_rec.sak_len = 0; 369 break; 370 case AQ_MACSEC_KEY_LEN_192_BIT: 371 sc_rec.sak_len = 1; 372 break; 373 case AQ_MACSEC_KEY_LEN_256_BIT: 374 sc_rec.sak_len = 2; 375 break; 376 default: 377 WARN_ONCE(true, "Invalid sc_sa"); 378 return -EINVAL; 379 } 380 381 sc_rec.curr_an = secy->tx_sc.encoding_sa; 382 sc_rec.valid = 1; 383 sc_rec.fresh = 1; 384 385 return aq_mss_set_egress_sc_record(hw, &sc_rec, sc_idx); 386 } 387 388 static u32 aq_sc_idx_max(const enum aq_macsec_sc_sa sc_sa) 389 { 390 u32 result = 0; 391 392 switch (sc_sa) { 393 case aq_macsec_sa_sc_4sa_8sc: 394 result = 8; 395 break; 396 case aq_macsec_sa_sc_2sa_16sc: 397 result = 16; 398 break; 399 case aq_macsec_sa_sc_1sa_32sc: 400 result = 32; 401 break; 402 default: 403 break; 404 }; 405 406 return result; 407 } 408 409 static u32 aq_to_hw_sc_idx(const u32 sc_idx, const enum aq_macsec_sc_sa sc_sa) 410 { 411 switch (sc_sa) { 412 case aq_macsec_sa_sc_4sa_8sc: 413 return sc_idx << 2; 414 case aq_macsec_sa_sc_2sa_16sc: 415 return sc_idx << 1; 416 case aq_macsec_sa_sc_1sa_32sc: 417 return sc_idx; 418 default: 419 WARN_ONCE(true, "Invalid sc_sa"); 420 }; 421 422 return sc_idx; 423 } 424 425 static enum aq_macsec_sc_sa sc_sa_from_num_an(const int num_an) 426 { 427 enum aq_macsec_sc_sa sc_sa = aq_macsec_sa_sc_not_used; 428 429 switch (num_an) { 430 case 4: 431 sc_sa = aq_macsec_sa_sc_4sa_8sc; 432 break; 433 case 2: 434 sc_sa = aq_macsec_sa_sc_2sa_16sc; 435 break; 436 case 1: 437 sc_sa = aq_macsec_sa_sc_1sa_32sc; 438 break; 439 default: 440 break; 441 } 442 443 return sc_sa; 444 } 445 446 static int aq_mdo_add_secy(struct macsec_context *ctx) 447 { 448 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 449 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 450 const struct macsec_secy *secy = ctx->secy; 451 enum aq_macsec_sc_sa sc_sa; 452 u32 txsc_idx; 453 int ret = 0; 454 455 if (secy->xpn) 456 return -EOPNOTSUPP; 457 458 sc_sa = sc_sa_from_num_an(MACSEC_NUM_AN); 459 if (sc_sa == aq_macsec_sa_sc_not_used) 460 return -EINVAL; 461 462 if (hweight32(cfg->txsc_idx_busy) >= aq_sc_idx_max(sc_sa)) 463 return -ENOSPC; 464 465 txsc_idx = ffz(cfg->txsc_idx_busy); 466 if (txsc_idx == AQ_MACSEC_MAX_SC) 467 return -ENOSPC; 468 469 if (ctx->prepare) 470 return 0; 471 472 cfg->sc_sa = sc_sa; 473 cfg->aq_txsc[txsc_idx].hw_sc_idx = aq_to_hw_sc_idx(txsc_idx, sc_sa); 474 cfg->aq_txsc[txsc_idx].sw_secy = secy; 475 476 if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev)) 477 ret = aq_set_txsc(nic, txsc_idx); 478 479 set_bit(txsc_idx, &cfg->txsc_idx_busy); 480 481 return 0; 482 } 483 484 static int aq_mdo_upd_secy(struct macsec_context *ctx) 485 { 486 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 487 const struct macsec_secy *secy = ctx->secy; 488 int txsc_idx; 489 int ret = 0; 490 491 txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy); 492 if (txsc_idx < 0) 493 return -ENOENT; 494 495 if (ctx->prepare) 496 return 0; 497 498 if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev)) 499 ret = aq_set_txsc(nic, txsc_idx); 500 501 return ret; 502 } 503 504 static int aq_clear_txsc(struct aq_nic_s *nic, const int txsc_idx, 505 enum aq_clear_type clear_type) 506 { 507 struct aq_macsec_txsc *tx_sc = &nic->macsec_cfg->aq_txsc[txsc_idx]; 508 struct aq_mss_egress_class_record tx_class_rec = { 0 }; 509 struct aq_mss_egress_sc_record sc_rec = { 0 }; 510 struct aq_hw_s *hw = nic->aq_hw; 511 int ret = 0; 512 int sa_num; 513 514 for_each_set_bit (sa_num, &tx_sc->tx_sa_idx_busy, AQ_MACSEC_MAX_SA) { 515 ret = aq_clear_txsa(nic, tx_sc, sa_num, clear_type); 516 if (ret) 517 return ret; 518 } 519 520 if (clear_type & AQ_CLEAR_HW) { 521 ret = aq_mss_set_egress_class_record(hw, &tx_class_rec, 522 txsc_idx); 523 if (ret) 524 return ret; 525 526 sc_rec.fresh = 1; 527 ret = aq_mss_set_egress_sc_record(hw, &sc_rec, 528 tx_sc->hw_sc_idx); 529 if (ret) 530 return ret; 531 } 532 533 if (clear_type & AQ_CLEAR_SW) { 534 clear_bit(txsc_idx, &nic->macsec_cfg->txsc_idx_busy); 535 nic->macsec_cfg->aq_txsc[txsc_idx].sw_secy = NULL; 536 } 537 538 return ret; 539 } 540 541 static int aq_mdo_del_secy(struct macsec_context *ctx) 542 { 543 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 544 int ret = 0; 545 546 if (ctx->prepare) 547 return 0; 548 549 if (!nic->macsec_cfg) 550 return 0; 551 552 ret = aq_clear_secy(nic, ctx->secy, AQ_CLEAR_ALL); 553 554 return ret; 555 } 556 557 static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx, 558 const struct macsec_secy *secy, 559 const struct macsec_tx_sa *tx_sa, 560 const unsigned char *key, const unsigned char an) 561 { 562 const u32 next_pn = tx_sa->next_pn_halves.lower; 563 struct aq_mss_egress_sakey_record key_rec; 564 const unsigned int sa_idx = sc_idx | an; 565 struct aq_mss_egress_sa_record sa_rec; 566 struct aq_hw_s *hw = nic->aq_hw; 567 int ret = 0; 568 569 memset(&sa_rec, 0, sizeof(sa_rec)); 570 sa_rec.valid = tx_sa->active; 571 sa_rec.fresh = 1; 572 sa_rec.next_pn = next_pn; 573 574 ret = aq_mss_set_egress_sa_record(hw, &sa_rec, sa_idx); 575 if (ret) 576 return ret; 577 578 if (!key) 579 return ret; 580 581 memset(&key_rec, 0, sizeof(key_rec)); 582 memcpy(&key_rec.key, key, secy->key_len); 583 584 aq_rotate_keys(&key_rec.key, secy->key_len); 585 586 ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx); 587 588 return ret; 589 } 590 591 static int aq_mdo_add_txsa(struct macsec_context *ctx) 592 { 593 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 594 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 595 const struct macsec_secy *secy = ctx->secy; 596 struct aq_macsec_txsc *aq_txsc; 597 int txsc_idx; 598 int ret = 0; 599 600 txsc_idx = aq_get_txsc_idx_from_secy(cfg, secy); 601 if (txsc_idx < 0) 602 return -EINVAL; 603 604 if (ctx->prepare) 605 return 0; 606 607 aq_txsc = &cfg->aq_txsc[txsc_idx]; 608 set_bit(ctx->sa.assoc_num, &aq_txsc->tx_sa_idx_busy); 609 610 memcpy(aq_txsc->tx_sa_key[ctx->sa.assoc_num], ctx->sa.key, 611 secy->key_len); 612 613 if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev)) 614 ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy, 615 ctx->sa.tx_sa, ctx->sa.key, 616 ctx->sa.assoc_num); 617 618 return ret; 619 } 620 621 static int aq_mdo_upd_txsa(struct macsec_context *ctx) 622 { 623 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 624 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 625 const struct macsec_secy *secy = ctx->secy; 626 struct aq_macsec_txsc *aq_txsc; 627 int txsc_idx; 628 int ret = 0; 629 630 txsc_idx = aq_get_txsc_idx_from_secy(cfg, secy); 631 if (txsc_idx < 0) 632 return -EINVAL; 633 634 if (ctx->prepare) 635 return 0; 636 637 aq_txsc = &cfg->aq_txsc[txsc_idx]; 638 if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev)) 639 ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy, 640 ctx->sa.tx_sa, NULL, ctx->sa.assoc_num); 641 642 return ret; 643 } 644 645 static int aq_clear_txsa(struct aq_nic_s *nic, struct aq_macsec_txsc *aq_txsc, 646 const int sa_num, enum aq_clear_type clear_type) 647 { 648 const int sa_idx = aq_txsc->hw_sc_idx | sa_num; 649 struct aq_hw_s *hw = nic->aq_hw; 650 int ret = 0; 651 652 if (clear_type & AQ_CLEAR_SW) 653 clear_bit(sa_num, &aq_txsc->tx_sa_idx_busy); 654 655 if ((clear_type & AQ_CLEAR_HW) && netif_carrier_ok(nic->ndev)) { 656 struct aq_mss_egress_sakey_record key_rec; 657 struct aq_mss_egress_sa_record sa_rec; 658 659 memset(&sa_rec, 0, sizeof(sa_rec)); 660 sa_rec.fresh = 1; 661 662 ret = aq_mss_set_egress_sa_record(hw, &sa_rec, sa_idx); 663 if (ret) 664 return ret; 665 666 memset(&key_rec, 0, sizeof(key_rec)); 667 return aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx); 668 } 669 670 return 0; 671 } 672 673 static int aq_mdo_del_txsa(struct macsec_context *ctx) 674 { 675 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 676 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 677 int txsc_idx; 678 int ret = 0; 679 680 txsc_idx = aq_get_txsc_idx_from_secy(cfg, ctx->secy); 681 if (txsc_idx < 0) 682 return -EINVAL; 683 684 if (ctx->prepare) 685 return 0; 686 687 ret = aq_clear_txsa(nic, &cfg->aq_txsc[txsc_idx], ctx->sa.assoc_num, 688 AQ_CLEAR_ALL); 689 690 return ret; 691 } 692 693 static int aq_rxsc_validate_frames(const enum macsec_validation_type validate) 694 { 695 switch (validate) { 696 case MACSEC_VALIDATE_DISABLED: 697 return 2; 698 case MACSEC_VALIDATE_CHECK: 699 return 1; 700 case MACSEC_VALIDATE_STRICT: 701 return 0; 702 default: 703 WARN_ONCE(true, "Invalid validation type"); 704 } 705 706 return 0; 707 } 708 709 static int aq_set_rxsc(struct aq_nic_s *nic, const u32 rxsc_idx) 710 { 711 const struct aq_macsec_rxsc *aq_rxsc = 712 &nic->macsec_cfg->aq_rxsc[rxsc_idx]; 713 struct aq_mss_ingress_preclass_record pre_class_record; 714 const struct macsec_rx_sc *rx_sc = aq_rxsc->sw_rxsc; 715 const struct macsec_secy *secy = aq_rxsc->sw_secy; 716 const u32 hw_sc_idx = aq_rxsc->hw_sc_idx; 717 struct aq_mss_ingress_sc_record sc_record; 718 struct aq_hw_s *hw = nic->aq_hw; 719 int ret = 0; 720 721 memset(&pre_class_record, 0, sizeof(pre_class_record)); 722 put_unaligned_be64((__force u64)rx_sc->sci, pre_class_record.sci); 723 pre_class_record.sci_mask = 0xff; 724 /* match all MACSEC ethertype packets */ 725 pre_class_record.eth_type = ETH_P_MACSEC; 726 pre_class_record.eth_type_mask = 0x3; 727 728 aq_ether_addr_to_mac(pre_class_record.mac_sa, (char *)&rx_sc->sci); 729 pre_class_record.sa_mask = 0x3f; 730 731 pre_class_record.an_mask = nic->macsec_cfg->sc_sa; 732 pre_class_record.sc_idx = hw_sc_idx; 733 /* strip SecTAG & forward for decryption */ 734 pre_class_record.action = 0x0; 735 pre_class_record.valid = 1; 736 737 ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record, 738 2 * rxsc_idx + 1); 739 if (ret) 740 return ret; 741 742 /* If SCI is absent, then match by SA alone */ 743 pre_class_record.sci_mask = 0; 744 pre_class_record.sci_from_table = 1; 745 746 ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record, 747 2 * rxsc_idx); 748 if (ret) 749 return ret; 750 751 memset(&sc_record, 0, sizeof(sc_record)); 752 sc_record.validate_frames = 753 aq_rxsc_validate_frames(secy->validate_frames); 754 if (secy->replay_protect) { 755 sc_record.replay_protect = 1; 756 sc_record.anti_replay_window = secy->replay_window; 757 } 758 sc_record.valid = 1; 759 sc_record.fresh = 1; 760 761 ret = aq_mss_set_ingress_sc_record(hw, &sc_record, hw_sc_idx); 762 if (ret) 763 return ret; 764 765 return ret; 766 } 767 768 static int aq_mdo_add_rxsc(struct macsec_context *ctx) 769 { 770 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 771 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 772 const u32 rxsc_idx_max = aq_sc_idx_max(cfg->sc_sa); 773 u32 rxsc_idx; 774 int ret = 0; 775 776 if (hweight32(cfg->rxsc_idx_busy) >= rxsc_idx_max) 777 return -ENOSPC; 778 779 rxsc_idx = ffz(cfg->rxsc_idx_busy); 780 if (rxsc_idx >= rxsc_idx_max) 781 return -ENOSPC; 782 783 if (ctx->prepare) 784 return 0; 785 786 cfg->aq_rxsc[rxsc_idx].hw_sc_idx = aq_to_hw_sc_idx(rxsc_idx, 787 cfg->sc_sa); 788 cfg->aq_rxsc[rxsc_idx].sw_secy = ctx->secy; 789 cfg->aq_rxsc[rxsc_idx].sw_rxsc = ctx->rx_sc; 790 791 if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev)) 792 ret = aq_set_rxsc(nic, rxsc_idx); 793 794 if (ret < 0) 795 return ret; 796 797 set_bit(rxsc_idx, &cfg->rxsc_idx_busy); 798 799 return 0; 800 } 801 802 static int aq_mdo_upd_rxsc(struct macsec_context *ctx) 803 { 804 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 805 int rxsc_idx; 806 int ret = 0; 807 808 rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, ctx->rx_sc); 809 if (rxsc_idx < 0) 810 return -ENOENT; 811 812 if (ctx->prepare) 813 return 0; 814 815 if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev)) 816 ret = aq_set_rxsc(nic, rxsc_idx); 817 818 return ret; 819 } 820 821 static int aq_clear_rxsc(struct aq_nic_s *nic, const int rxsc_idx, 822 enum aq_clear_type clear_type) 823 { 824 struct aq_macsec_rxsc *rx_sc = &nic->macsec_cfg->aq_rxsc[rxsc_idx]; 825 struct aq_hw_s *hw = nic->aq_hw; 826 int ret = 0; 827 int sa_num; 828 829 for_each_set_bit (sa_num, &rx_sc->rx_sa_idx_busy, AQ_MACSEC_MAX_SA) { 830 ret = aq_clear_rxsa(nic, rx_sc, sa_num, clear_type); 831 if (ret) 832 return ret; 833 } 834 835 if (clear_type & AQ_CLEAR_HW) { 836 struct aq_mss_ingress_preclass_record pre_class_record; 837 struct aq_mss_ingress_sc_record sc_record; 838 839 memset(&pre_class_record, 0, sizeof(pre_class_record)); 840 memset(&sc_record, 0, sizeof(sc_record)); 841 842 ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record, 843 2 * rxsc_idx); 844 if (ret) 845 return ret; 846 847 ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record, 848 2 * rxsc_idx + 1); 849 if (ret) 850 return ret; 851 852 sc_record.fresh = 1; 853 ret = aq_mss_set_ingress_sc_record(hw, &sc_record, 854 rx_sc->hw_sc_idx); 855 if (ret) 856 return ret; 857 } 858 859 if (clear_type & AQ_CLEAR_SW) { 860 clear_bit(rxsc_idx, &nic->macsec_cfg->rxsc_idx_busy); 861 rx_sc->sw_secy = NULL; 862 rx_sc->sw_rxsc = NULL; 863 } 864 865 return ret; 866 } 867 868 static int aq_mdo_del_rxsc(struct macsec_context *ctx) 869 { 870 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 871 enum aq_clear_type clear_type = AQ_CLEAR_SW; 872 int rxsc_idx; 873 int ret = 0; 874 875 rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, ctx->rx_sc); 876 if (rxsc_idx < 0) 877 return -ENOENT; 878 879 if (ctx->prepare) 880 return 0; 881 882 if (netif_carrier_ok(nic->ndev)) 883 clear_type = AQ_CLEAR_ALL; 884 885 ret = aq_clear_rxsc(nic, rxsc_idx, clear_type); 886 887 return ret; 888 } 889 890 static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx, 891 const struct macsec_secy *secy, 892 const struct macsec_rx_sa *rx_sa, 893 const unsigned char *key, const unsigned char an) 894 { 895 struct aq_mss_ingress_sakey_record sa_key_record; 896 const u32 next_pn = rx_sa->next_pn_halves.lower; 897 struct aq_mss_ingress_sa_record sa_record; 898 struct aq_hw_s *hw = nic->aq_hw; 899 const int sa_idx = sc_idx | an; 900 int ret = 0; 901 902 memset(&sa_record, 0, sizeof(sa_record)); 903 sa_record.valid = rx_sa->active; 904 sa_record.fresh = 1; 905 sa_record.next_pn = next_pn; 906 907 ret = aq_mss_set_ingress_sa_record(hw, &sa_record, sa_idx); 908 if (ret) 909 return ret; 910 911 if (!key) 912 return ret; 913 914 memset(&sa_key_record, 0, sizeof(sa_key_record)); 915 memcpy(&sa_key_record.key, key, secy->key_len); 916 917 switch (secy->key_len) { 918 case AQ_MACSEC_KEY_LEN_128_BIT: 919 sa_key_record.key_len = 0; 920 break; 921 case AQ_MACSEC_KEY_LEN_192_BIT: 922 sa_key_record.key_len = 1; 923 break; 924 case AQ_MACSEC_KEY_LEN_256_BIT: 925 sa_key_record.key_len = 2; 926 break; 927 default: 928 return -1; 929 } 930 931 aq_rotate_keys(&sa_key_record.key, secy->key_len); 932 933 ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx); 934 935 return ret; 936 } 937 938 static int aq_mdo_add_rxsa(struct macsec_context *ctx) 939 { 940 const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc; 941 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 942 const struct macsec_secy *secy = ctx->secy; 943 struct aq_macsec_rxsc *aq_rxsc; 944 int rxsc_idx; 945 int ret = 0; 946 947 rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc); 948 if (rxsc_idx < 0) 949 return -EINVAL; 950 951 if (ctx->prepare) 952 return 0; 953 954 aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx]; 955 set_bit(ctx->sa.assoc_num, &aq_rxsc->rx_sa_idx_busy); 956 957 memcpy(aq_rxsc->rx_sa_key[ctx->sa.assoc_num], ctx->sa.key, 958 secy->key_len); 959 960 if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev)) 961 ret = aq_update_rxsa(nic, aq_rxsc->hw_sc_idx, secy, 962 ctx->sa.rx_sa, ctx->sa.key, 963 ctx->sa.assoc_num); 964 965 return ret; 966 } 967 968 static int aq_mdo_upd_rxsa(struct macsec_context *ctx) 969 { 970 const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc; 971 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 972 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 973 const struct macsec_secy *secy = ctx->secy; 974 int rxsc_idx; 975 int ret = 0; 976 977 rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, rx_sc); 978 if (rxsc_idx < 0) 979 return -EINVAL; 980 981 if (ctx->prepare) 982 return 0; 983 984 if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev)) 985 ret = aq_update_rxsa(nic, cfg->aq_rxsc[rxsc_idx].hw_sc_idx, 986 secy, ctx->sa.rx_sa, NULL, 987 ctx->sa.assoc_num); 988 989 return ret; 990 } 991 992 static int aq_clear_rxsa(struct aq_nic_s *nic, struct aq_macsec_rxsc *aq_rxsc, 993 const int sa_num, enum aq_clear_type clear_type) 994 { 995 int sa_idx = aq_rxsc->hw_sc_idx | sa_num; 996 struct aq_hw_s *hw = nic->aq_hw; 997 int ret = 0; 998 999 if (clear_type & AQ_CLEAR_SW) 1000 clear_bit(sa_num, &aq_rxsc->rx_sa_idx_busy); 1001 1002 if ((clear_type & AQ_CLEAR_HW) && netif_carrier_ok(nic->ndev)) { 1003 struct aq_mss_ingress_sakey_record sa_key_record; 1004 struct aq_mss_ingress_sa_record sa_record; 1005 1006 memset(&sa_key_record, 0, sizeof(sa_key_record)); 1007 memset(&sa_record, 0, sizeof(sa_record)); 1008 sa_record.fresh = 1; 1009 ret = aq_mss_set_ingress_sa_record(hw, &sa_record, sa_idx); 1010 if (ret) 1011 return ret; 1012 1013 return aq_mss_set_ingress_sakey_record(hw, &sa_key_record, 1014 sa_idx); 1015 } 1016 1017 return ret; 1018 } 1019 1020 static int aq_mdo_del_rxsa(struct macsec_context *ctx) 1021 { 1022 const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc; 1023 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 1024 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 1025 int rxsc_idx; 1026 int ret = 0; 1027 1028 rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, rx_sc); 1029 if (rxsc_idx < 0) 1030 return -EINVAL; 1031 1032 if (ctx->prepare) 1033 return 0; 1034 1035 ret = aq_clear_rxsa(nic, &cfg->aq_rxsc[rxsc_idx], ctx->sa.assoc_num, 1036 AQ_CLEAR_ALL); 1037 1038 return ret; 1039 } 1040 1041 static int aq_mdo_get_dev_stats(struct macsec_context *ctx) 1042 { 1043 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 1044 struct aq_macsec_common_stats *stats = &nic->macsec_cfg->stats; 1045 struct aq_hw_s *hw = nic->aq_hw; 1046 1047 if (ctx->prepare) 1048 return 0; 1049 1050 aq_get_macsec_common_stats(hw, stats); 1051 1052 ctx->stats.dev_stats->OutPktsUntagged = stats->out.untagged_pkts; 1053 ctx->stats.dev_stats->InPktsUntagged = stats->in.untagged_pkts; 1054 ctx->stats.dev_stats->OutPktsTooLong = stats->out.too_long; 1055 ctx->stats.dev_stats->InPktsNoTag = stats->in.notag_pkts; 1056 ctx->stats.dev_stats->InPktsBadTag = stats->in.bad_tag_pkts; 1057 ctx->stats.dev_stats->InPktsUnknownSCI = stats->in.unknown_sci_pkts; 1058 ctx->stats.dev_stats->InPktsNoSCI = stats->in.no_sci_pkts; 1059 ctx->stats.dev_stats->InPktsOverrun = 0; 1060 1061 return 0; 1062 } 1063 1064 static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx) 1065 { 1066 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 1067 struct aq_macsec_tx_sc_stats *stats; 1068 struct aq_hw_s *hw = nic->aq_hw; 1069 struct aq_macsec_txsc *aq_txsc; 1070 int txsc_idx; 1071 1072 txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, ctx->secy); 1073 if (txsc_idx < 0) 1074 return -ENOENT; 1075 1076 if (ctx->prepare) 1077 return 0; 1078 1079 aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx]; 1080 stats = &aq_txsc->stats; 1081 aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, stats); 1082 1083 ctx->stats.tx_sc_stats->OutPktsProtected = stats->sc_protected_pkts; 1084 ctx->stats.tx_sc_stats->OutPktsEncrypted = stats->sc_encrypted_pkts; 1085 ctx->stats.tx_sc_stats->OutOctetsProtected = stats->sc_protected_octets; 1086 ctx->stats.tx_sc_stats->OutOctetsEncrypted = stats->sc_encrypted_octets; 1087 1088 return 0; 1089 } 1090 1091 static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx) 1092 { 1093 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 1094 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 1095 struct aq_macsec_tx_sa_stats *stats; 1096 struct aq_hw_s *hw = nic->aq_hw; 1097 const struct macsec_secy *secy; 1098 struct aq_macsec_txsc *aq_txsc; 1099 struct macsec_tx_sa *tx_sa; 1100 unsigned int sa_idx; 1101 int txsc_idx; 1102 u32 next_pn; 1103 int ret; 1104 1105 txsc_idx = aq_get_txsc_idx_from_secy(cfg, ctx->secy); 1106 if (txsc_idx < 0) 1107 return -EINVAL; 1108 1109 if (ctx->prepare) 1110 return 0; 1111 1112 aq_txsc = &cfg->aq_txsc[txsc_idx]; 1113 sa_idx = aq_txsc->hw_sc_idx | ctx->sa.assoc_num; 1114 stats = &aq_txsc->tx_sa_stats[ctx->sa.assoc_num]; 1115 ret = aq_get_txsa_stats(hw, sa_idx, stats); 1116 if (ret) 1117 return ret; 1118 1119 ctx->stats.tx_sa_stats->OutPktsProtected = stats->sa_protected_pkts; 1120 ctx->stats.tx_sa_stats->OutPktsEncrypted = stats->sa_encrypted_pkts; 1121 1122 secy = aq_txsc->sw_secy; 1123 tx_sa = rcu_dereference_bh(secy->tx_sc.sa[ctx->sa.assoc_num]); 1124 ret = aq_get_txsa_next_pn(hw, sa_idx, &next_pn); 1125 if (ret == 0) { 1126 spin_lock_bh(&tx_sa->lock); 1127 tx_sa->next_pn = next_pn; 1128 spin_unlock_bh(&tx_sa->lock); 1129 } 1130 1131 return ret; 1132 } 1133 1134 static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx) 1135 { 1136 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 1137 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 1138 struct aq_macsec_rx_sa_stats *stats; 1139 struct aq_hw_s *hw = nic->aq_hw; 1140 struct aq_macsec_rxsc *aq_rxsc; 1141 unsigned int sa_idx; 1142 int rxsc_idx; 1143 int ret = 0; 1144 int i; 1145 1146 rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, ctx->rx_sc); 1147 if (rxsc_idx < 0) 1148 return -ENOENT; 1149 1150 if (ctx->prepare) 1151 return 0; 1152 1153 aq_rxsc = &cfg->aq_rxsc[rxsc_idx]; 1154 for (i = 0; i < MACSEC_NUM_AN; i++) { 1155 if (!test_bit(i, &aq_rxsc->rx_sa_idx_busy)) 1156 continue; 1157 1158 stats = &aq_rxsc->rx_sa_stats[i]; 1159 sa_idx = aq_rxsc->hw_sc_idx | i; 1160 ret = aq_get_rxsa_stats(hw, sa_idx, stats); 1161 if (ret) 1162 break; 1163 1164 ctx->stats.rx_sc_stats->InOctetsValidated += 1165 stats->validated_octets; 1166 ctx->stats.rx_sc_stats->InOctetsDecrypted += 1167 stats->decrypted_octets; 1168 ctx->stats.rx_sc_stats->InPktsUnchecked += 1169 stats->unchecked_pkts; 1170 ctx->stats.rx_sc_stats->InPktsDelayed += stats->delayed_pkts; 1171 ctx->stats.rx_sc_stats->InPktsOK += stats->ok_pkts; 1172 ctx->stats.rx_sc_stats->InPktsInvalid += stats->invalid_pkts; 1173 ctx->stats.rx_sc_stats->InPktsLate += stats->late_pkts; 1174 ctx->stats.rx_sc_stats->InPktsNotValid += stats->not_valid_pkts; 1175 ctx->stats.rx_sc_stats->InPktsNotUsingSA += stats->not_using_sa; 1176 ctx->stats.rx_sc_stats->InPktsUnusedSA += stats->unused_sa; 1177 } 1178 1179 return ret; 1180 } 1181 1182 static int aq_mdo_get_rx_sa_stats(struct macsec_context *ctx) 1183 { 1184 struct aq_nic_s *nic = netdev_priv(ctx->netdev); 1185 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 1186 struct aq_macsec_rx_sa_stats *stats; 1187 struct aq_hw_s *hw = nic->aq_hw; 1188 struct aq_macsec_rxsc *aq_rxsc; 1189 struct macsec_rx_sa *rx_sa; 1190 unsigned int sa_idx; 1191 int rxsc_idx; 1192 u32 next_pn; 1193 int ret; 1194 1195 rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, ctx->rx_sc); 1196 if (rxsc_idx < 0) 1197 return -EINVAL; 1198 1199 if (ctx->prepare) 1200 return 0; 1201 1202 aq_rxsc = &cfg->aq_rxsc[rxsc_idx]; 1203 stats = &aq_rxsc->rx_sa_stats[ctx->sa.assoc_num]; 1204 sa_idx = aq_rxsc->hw_sc_idx | ctx->sa.assoc_num; 1205 ret = aq_get_rxsa_stats(hw, sa_idx, stats); 1206 if (ret) 1207 return ret; 1208 1209 ctx->stats.rx_sa_stats->InPktsOK = stats->ok_pkts; 1210 ctx->stats.rx_sa_stats->InPktsInvalid = stats->invalid_pkts; 1211 ctx->stats.rx_sa_stats->InPktsNotValid = stats->not_valid_pkts; 1212 ctx->stats.rx_sa_stats->InPktsNotUsingSA = stats->not_using_sa; 1213 ctx->stats.rx_sa_stats->InPktsUnusedSA = stats->unused_sa; 1214 1215 rx_sa = rcu_dereference_bh(aq_rxsc->sw_rxsc->sa[ctx->sa.assoc_num]); 1216 ret = aq_get_rxsa_next_pn(hw, sa_idx, &next_pn); 1217 if (ret == 0) { 1218 spin_lock_bh(&rx_sa->lock); 1219 rx_sa->next_pn = next_pn; 1220 spin_unlock_bh(&rx_sa->lock); 1221 } 1222 1223 return ret; 1224 } 1225 1226 static int apply_txsc_cfg(struct aq_nic_s *nic, const int txsc_idx) 1227 { 1228 struct aq_macsec_txsc *aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx]; 1229 const struct macsec_secy *secy = aq_txsc->sw_secy; 1230 struct macsec_tx_sa *tx_sa; 1231 int ret = 0; 1232 int i; 1233 1234 if (!netif_running(secy->netdev)) 1235 return ret; 1236 1237 ret = aq_set_txsc(nic, txsc_idx); 1238 if (ret) 1239 return ret; 1240 1241 for (i = 0; i < MACSEC_NUM_AN; i++) { 1242 tx_sa = rcu_dereference_bh(secy->tx_sc.sa[i]); 1243 if (tx_sa) { 1244 ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy, 1245 tx_sa, aq_txsc->tx_sa_key[i], i); 1246 if (ret) 1247 return ret; 1248 } 1249 } 1250 1251 return ret; 1252 } 1253 1254 static int apply_rxsc_cfg(struct aq_nic_s *nic, const int rxsc_idx) 1255 { 1256 struct aq_macsec_rxsc *aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx]; 1257 const struct macsec_secy *secy = aq_rxsc->sw_secy; 1258 struct macsec_rx_sa *rx_sa; 1259 int ret = 0; 1260 int i; 1261 1262 if (!netif_running(secy->netdev)) 1263 return ret; 1264 1265 ret = aq_set_rxsc(nic, rxsc_idx); 1266 if (ret) 1267 return ret; 1268 1269 for (i = 0; i < MACSEC_NUM_AN; i++) { 1270 rx_sa = rcu_dereference_bh(aq_rxsc->sw_rxsc->sa[i]); 1271 if (rx_sa) { 1272 ret = aq_update_rxsa(nic, aq_rxsc->hw_sc_idx, secy, 1273 rx_sa, aq_rxsc->rx_sa_key[i], i); 1274 if (ret) 1275 return ret; 1276 } 1277 } 1278 1279 return ret; 1280 } 1281 1282 static int aq_clear_secy(struct aq_nic_s *nic, const struct macsec_secy *secy, 1283 enum aq_clear_type clear_type) 1284 { 1285 struct macsec_rx_sc *rx_sc; 1286 int txsc_idx; 1287 int rxsc_idx; 1288 int ret = 0; 1289 1290 txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy); 1291 if (txsc_idx >= 0) { 1292 ret = aq_clear_txsc(nic, txsc_idx, clear_type); 1293 if (ret) 1294 return ret; 1295 } 1296 1297 for (rx_sc = rcu_dereference_bh(secy->rx_sc); rx_sc; 1298 rx_sc = rcu_dereference_bh(rx_sc->next)) { 1299 rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc); 1300 if (rxsc_idx < 0) 1301 continue; 1302 1303 ret = aq_clear_rxsc(nic, rxsc_idx, clear_type); 1304 if (ret) 1305 return ret; 1306 } 1307 1308 return ret; 1309 } 1310 1311 static int aq_apply_secy_cfg(struct aq_nic_s *nic, 1312 const struct macsec_secy *secy) 1313 { 1314 struct macsec_rx_sc *rx_sc; 1315 int txsc_idx; 1316 int rxsc_idx; 1317 int ret = 0; 1318 1319 txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy); 1320 if (txsc_idx >= 0) 1321 apply_txsc_cfg(nic, txsc_idx); 1322 1323 for (rx_sc = rcu_dereference_bh(secy->rx_sc); rx_sc && rx_sc->active; 1324 rx_sc = rcu_dereference_bh(rx_sc->next)) { 1325 rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc); 1326 if (unlikely(rxsc_idx < 0)) 1327 continue; 1328 1329 ret = apply_rxsc_cfg(nic, rxsc_idx); 1330 if (ret) 1331 return ret; 1332 } 1333 1334 return ret; 1335 } 1336 1337 static int aq_apply_macsec_cfg(struct aq_nic_s *nic) 1338 { 1339 int ret = 0; 1340 int i; 1341 1342 for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { 1343 if (nic->macsec_cfg->txsc_idx_busy & BIT(i)) { 1344 ret = apply_txsc_cfg(nic, i); 1345 if (ret) 1346 return ret; 1347 } 1348 } 1349 1350 for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { 1351 if (nic->macsec_cfg->rxsc_idx_busy & BIT(i)) { 1352 ret = apply_rxsc_cfg(nic, i); 1353 if (ret) 1354 return ret; 1355 } 1356 } 1357 1358 return ret; 1359 } 1360 1361 static int aq_sa_from_sa_idx(const enum aq_macsec_sc_sa sc_sa, const int sa_idx) 1362 { 1363 switch (sc_sa) { 1364 case aq_macsec_sa_sc_4sa_8sc: 1365 return sa_idx & 3; 1366 case aq_macsec_sa_sc_2sa_16sc: 1367 return sa_idx & 1; 1368 case aq_macsec_sa_sc_1sa_32sc: 1369 return 0; 1370 default: 1371 WARN_ONCE(true, "Invalid sc_sa"); 1372 } 1373 return -EINVAL; 1374 } 1375 1376 static int aq_sc_idx_from_sa_idx(const enum aq_macsec_sc_sa sc_sa, 1377 const int sa_idx) 1378 { 1379 switch (sc_sa) { 1380 case aq_macsec_sa_sc_4sa_8sc: 1381 return sa_idx & ~3; 1382 case aq_macsec_sa_sc_2sa_16sc: 1383 return sa_idx & ~1; 1384 case aq_macsec_sa_sc_1sa_32sc: 1385 return sa_idx; 1386 default: 1387 WARN_ONCE(true, "Invalid sc_sa"); 1388 } 1389 return -EINVAL; 1390 } 1391 1392 static void aq_check_txsa_expiration(struct aq_nic_s *nic) 1393 { 1394 u32 egress_sa_expired, egress_sa_threshold_expired; 1395 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 1396 struct aq_hw_s *hw = nic->aq_hw; 1397 struct aq_macsec_txsc *aq_txsc; 1398 const struct macsec_secy *secy; 1399 int sc_idx = 0, txsc_idx = 0; 1400 enum aq_macsec_sc_sa sc_sa; 1401 struct macsec_tx_sa *tx_sa; 1402 unsigned char an = 0; 1403 int ret; 1404 int i; 1405 1406 sc_sa = cfg->sc_sa; 1407 1408 ret = aq_mss_get_egress_sa_expired(hw, &egress_sa_expired); 1409 if (unlikely(ret)) 1410 return; 1411 1412 ret = aq_mss_get_egress_sa_threshold_expired(hw, 1413 &egress_sa_threshold_expired); 1414 1415 for (i = 0; i < AQ_MACSEC_MAX_SA; i++) { 1416 if (egress_sa_expired & BIT(i)) { 1417 an = aq_sa_from_sa_idx(sc_sa, i); 1418 sc_idx = aq_sc_idx_from_sa_idx(sc_sa, i); 1419 txsc_idx = aq_get_txsc_idx_from_sc_idx(sc_sa, sc_idx); 1420 if (txsc_idx < 0) 1421 continue; 1422 1423 aq_txsc = &cfg->aq_txsc[txsc_idx]; 1424 if (!(cfg->txsc_idx_busy & BIT(txsc_idx))) { 1425 netdev_warn(nic->ndev, 1426 "PN threshold expired on invalid TX SC"); 1427 continue; 1428 } 1429 1430 secy = aq_txsc->sw_secy; 1431 if (!netif_running(secy->netdev)) { 1432 netdev_warn(nic->ndev, 1433 "PN threshold expired on down TX SC"); 1434 continue; 1435 } 1436 1437 if (unlikely(!(aq_txsc->tx_sa_idx_busy & BIT(an)))) { 1438 netdev_warn(nic->ndev, 1439 "PN threshold expired on invalid TX SA"); 1440 continue; 1441 } 1442 1443 tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]); 1444 macsec_pn_wrapped((struct macsec_secy *)secy, tx_sa); 1445 } 1446 } 1447 1448 aq_mss_set_egress_sa_expired(hw, egress_sa_expired); 1449 if (likely(!ret)) 1450 aq_mss_set_egress_sa_threshold_expired(hw, 1451 egress_sa_threshold_expired); 1452 } 1453 1454 const struct macsec_ops aq_macsec_ops = { 1455 .mdo_dev_open = aq_mdo_dev_open, 1456 .mdo_dev_stop = aq_mdo_dev_stop, 1457 .mdo_add_secy = aq_mdo_add_secy, 1458 .mdo_upd_secy = aq_mdo_upd_secy, 1459 .mdo_del_secy = aq_mdo_del_secy, 1460 .mdo_add_rxsc = aq_mdo_add_rxsc, 1461 .mdo_upd_rxsc = aq_mdo_upd_rxsc, 1462 .mdo_del_rxsc = aq_mdo_del_rxsc, 1463 .mdo_add_rxsa = aq_mdo_add_rxsa, 1464 .mdo_upd_rxsa = aq_mdo_upd_rxsa, 1465 .mdo_del_rxsa = aq_mdo_del_rxsa, 1466 .mdo_add_txsa = aq_mdo_add_txsa, 1467 .mdo_upd_txsa = aq_mdo_upd_txsa, 1468 .mdo_del_txsa = aq_mdo_del_txsa, 1469 .mdo_get_dev_stats = aq_mdo_get_dev_stats, 1470 .mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats, 1471 .mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats, 1472 .mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats, 1473 .mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats, 1474 }; 1475 1476 int aq_macsec_init(struct aq_nic_s *nic) 1477 { 1478 struct aq_macsec_cfg *cfg; 1479 u32 caps_lo; 1480 1481 if (!nic->aq_fw_ops->get_link_capabilities) 1482 return 0; 1483 1484 caps_lo = nic->aq_fw_ops->get_link_capabilities(nic->aq_hw); 1485 1486 if (!(caps_lo & BIT(CAPS_LO_MACSEC))) 1487 return 0; 1488 1489 nic->macsec_cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 1490 if (!nic->macsec_cfg) 1491 return -ENOMEM; 1492 1493 nic->ndev->features |= NETIF_F_HW_MACSEC; 1494 nic->ndev->macsec_ops = &aq_macsec_ops; 1495 1496 return 0; 1497 } 1498 1499 void aq_macsec_free(struct aq_nic_s *nic) 1500 { 1501 kfree(nic->macsec_cfg); 1502 nic->macsec_cfg = NULL; 1503 } 1504 1505 int aq_macsec_enable(struct aq_nic_s *nic) 1506 { 1507 u32 ctl_ether_types[1] = { ETH_P_PAE }; 1508 struct macsec_msg_fw_response resp = { 0 }; 1509 struct macsec_msg_fw_request msg = { 0 }; 1510 struct aq_hw_s *hw = nic->aq_hw; 1511 int num_ctl_ether_types = 0; 1512 int index = 0, tbl_idx; 1513 int ret; 1514 1515 if (!nic->macsec_cfg) 1516 return 0; 1517 1518 rtnl_lock(); 1519 1520 if (nic->aq_fw_ops->send_macsec_req) { 1521 struct macsec_cfg_request cfg = { 0 }; 1522 1523 cfg.enabled = 1; 1524 cfg.egress_threshold = 0xffffffff; 1525 cfg.ingress_threshold = 0xffffffff; 1526 cfg.interrupts_enabled = 1; 1527 1528 msg.msg_type = macsec_cfg_msg; 1529 msg.cfg = cfg; 1530 1531 ret = nic->aq_fw_ops->send_macsec_req(hw, &msg, &resp); 1532 if (ret) 1533 goto unlock; 1534 } 1535 1536 /* Init Ethertype bypass filters */ 1537 for (index = 0; index < ARRAY_SIZE(ctl_ether_types); index++) { 1538 struct aq_mss_ingress_prectlf_record rx_prectlf_rec; 1539 struct aq_mss_egress_ctlf_record tx_ctlf_rec; 1540 1541 if (ctl_ether_types[index] == 0) 1542 continue; 1543 1544 memset(&tx_ctlf_rec, 0, sizeof(tx_ctlf_rec)); 1545 tx_ctlf_rec.eth_type = ctl_ether_types[index]; 1546 tx_ctlf_rec.match_type = 4; /* Match eth_type only */ 1547 tx_ctlf_rec.match_mask = 0xf; /* match for eth_type */ 1548 tx_ctlf_rec.action = 0; /* Bypass MACSEC modules */ 1549 tbl_idx = NUMROWS_EGRESSCTLFRECORD - num_ctl_ether_types - 1; 1550 aq_mss_set_egress_ctlf_record(hw, &tx_ctlf_rec, tbl_idx); 1551 1552 memset(&rx_prectlf_rec, 0, sizeof(rx_prectlf_rec)); 1553 rx_prectlf_rec.eth_type = ctl_ether_types[index]; 1554 rx_prectlf_rec.match_type = 4; /* Match eth_type only */ 1555 rx_prectlf_rec.match_mask = 0xf; /* match for eth_type */ 1556 rx_prectlf_rec.action = 0; /* Bypass MACSEC modules */ 1557 tbl_idx = 1558 NUMROWS_INGRESSPRECTLFRECORD - num_ctl_ether_types - 1; 1559 aq_mss_set_ingress_prectlf_record(hw, &rx_prectlf_rec, tbl_idx); 1560 1561 num_ctl_ether_types++; 1562 } 1563 1564 ret = aq_apply_macsec_cfg(nic); 1565 1566 unlock: 1567 rtnl_unlock(); 1568 return ret; 1569 } 1570 1571 void aq_macsec_work(struct aq_nic_s *nic) 1572 { 1573 if (!nic->macsec_cfg) 1574 return; 1575 1576 if (!netif_carrier_ok(nic->ndev)) 1577 return; 1578 1579 rtnl_lock(); 1580 aq_check_txsa_expiration(nic); 1581 rtnl_unlock(); 1582 } 1583 1584 int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic) 1585 { 1586 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 1587 int i, cnt = 0; 1588 1589 if (!cfg) 1590 return 0; 1591 1592 for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { 1593 if (!test_bit(i, &cfg->rxsc_idx_busy)) 1594 continue; 1595 cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy); 1596 } 1597 1598 return cnt; 1599 } 1600 1601 int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic) 1602 { 1603 if (!nic->macsec_cfg) 1604 return 0; 1605 1606 return hweight_long(nic->macsec_cfg->txsc_idx_busy); 1607 } 1608 1609 int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic) 1610 { 1611 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 1612 int i, cnt = 0; 1613 1614 if (!cfg) 1615 return 0; 1616 1617 for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { 1618 if (!test_bit(i, &cfg->txsc_idx_busy)) 1619 continue; 1620 cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy); 1621 } 1622 1623 return cnt; 1624 } 1625 1626 static int aq_macsec_update_stats(struct aq_nic_s *nic) 1627 { 1628 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 1629 struct aq_hw_s *hw = nic->aq_hw; 1630 struct aq_macsec_txsc *aq_txsc; 1631 struct aq_macsec_rxsc *aq_rxsc; 1632 int i, sa_idx, assoc_num; 1633 int ret = 0; 1634 1635 aq_get_macsec_common_stats(hw, &cfg->stats); 1636 1637 for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { 1638 if (!(cfg->txsc_idx_busy & BIT(i))) 1639 continue; 1640 aq_txsc = &cfg->aq_txsc[i]; 1641 1642 ret = aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, 1643 &aq_txsc->stats); 1644 if (ret) 1645 return ret; 1646 1647 for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) { 1648 if (!test_bit(assoc_num, &aq_txsc->tx_sa_idx_busy)) 1649 continue; 1650 sa_idx = aq_txsc->hw_sc_idx | assoc_num; 1651 ret = aq_get_txsa_stats(hw, sa_idx, 1652 &aq_txsc->tx_sa_stats[assoc_num]); 1653 if (ret) 1654 return ret; 1655 } 1656 } 1657 1658 for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { 1659 if (!(test_bit(i, &cfg->rxsc_idx_busy))) 1660 continue; 1661 aq_rxsc = &cfg->aq_rxsc[i]; 1662 1663 for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) { 1664 if (!test_bit(assoc_num, &aq_rxsc->rx_sa_idx_busy)) 1665 continue; 1666 sa_idx = aq_rxsc->hw_sc_idx | assoc_num; 1667 1668 ret = aq_get_rxsa_stats(hw, sa_idx, 1669 &aq_rxsc->rx_sa_stats[assoc_num]); 1670 if (ret) 1671 return ret; 1672 } 1673 } 1674 1675 return ret; 1676 } 1677 1678 u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data) 1679 { 1680 struct aq_macsec_cfg *cfg = nic->macsec_cfg; 1681 struct aq_macsec_common_stats *common_stats; 1682 struct aq_macsec_tx_sc_stats *txsc_stats; 1683 struct aq_macsec_tx_sa_stats *txsa_stats; 1684 struct aq_macsec_rx_sa_stats *rxsa_stats; 1685 struct aq_macsec_txsc *aq_txsc; 1686 struct aq_macsec_rxsc *aq_rxsc; 1687 unsigned int assoc_num; 1688 unsigned int sc_num; 1689 unsigned int i = 0U; 1690 1691 if (!cfg) 1692 return data; 1693 1694 aq_macsec_update_stats(nic); 1695 1696 common_stats = &cfg->stats; 1697 data[i] = common_stats->in.ctl_pkts; 1698 data[++i] = common_stats->in.tagged_miss_pkts; 1699 data[++i] = common_stats->in.untagged_miss_pkts; 1700 data[++i] = common_stats->in.notag_pkts; 1701 data[++i] = common_stats->in.untagged_pkts; 1702 data[++i] = common_stats->in.bad_tag_pkts; 1703 data[++i] = common_stats->in.no_sci_pkts; 1704 data[++i] = common_stats->in.unknown_sci_pkts; 1705 data[++i] = common_stats->in.ctrl_prt_pass_pkts; 1706 data[++i] = common_stats->in.unctrl_prt_pass_pkts; 1707 data[++i] = common_stats->in.ctrl_prt_fail_pkts; 1708 data[++i] = common_stats->in.unctrl_prt_fail_pkts; 1709 data[++i] = common_stats->in.too_long_pkts; 1710 data[++i] = common_stats->in.igpoc_ctl_pkts; 1711 data[++i] = common_stats->in.ecc_error_pkts; 1712 data[++i] = common_stats->in.unctrl_hit_drop_redir; 1713 data[++i] = common_stats->out.ctl_pkts; 1714 data[++i] = common_stats->out.unknown_sa_pkts; 1715 data[++i] = common_stats->out.untagged_pkts; 1716 data[++i] = common_stats->out.too_long; 1717 data[++i] = common_stats->out.ecc_error_pkts; 1718 data[++i] = common_stats->out.unctrl_hit_drop_redir; 1719 1720 for (sc_num = 0; sc_num < AQ_MACSEC_MAX_SC; sc_num++) { 1721 if (!(test_bit(sc_num, &cfg->txsc_idx_busy))) 1722 continue; 1723 1724 aq_txsc = &cfg->aq_txsc[sc_num]; 1725 txsc_stats = &aq_txsc->stats; 1726 1727 data[++i] = txsc_stats->sc_protected_pkts; 1728 data[++i] = txsc_stats->sc_encrypted_pkts; 1729 data[++i] = txsc_stats->sc_protected_octets; 1730 data[++i] = txsc_stats->sc_encrypted_octets; 1731 1732 for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) { 1733 if (!test_bit(assoc_num, &aq_txsc->tx_sa_idx_busy)) 1734 continue; 1735 1736 txsa_stats = &aq_txsc->tx_sa_stats[assoc_num]; 1737 1738 data[++i] = txsa_stats->sa_hit_drop_redirect; 1739 data[++i] = txsa_stats->sa_protected2_pkts; 1740 data[++i] = txsa_stats->sa_protected_pkts; 1741 data[++i] = txsa_stats->sa_encrypted_pkts; 1742 } 1743 } 1744 1745 for (sc_num = 0; sc_num < AQ_MACSEC_MAX_SC; sc_num++) { 1746 if (!(test_bit(sc_num, &cfg->rxsc_idx_busy))) 1747 continue; 1748 1749 aq_rxsc = &cfg->aq_rxsc[sc_num]; 1750 1751 for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) { 1752 if (!test_bit(assoc_num, &aq_rxsc->rx_sa_idx_busy)) 1753 continue; 1754 1755 rxsa_stats = &aq_rxsc->rx_sa_stats[assoc_num]; 1756 1757 data[++i] = rxsa_stats->untagged_hit_pkts; 1758 data[++i] = rxsa_stats->ctrl_hit_drop_redir_pkts; 1759 data[++i] = rxsa_stats->not_using_sa; 1760 data[++i] = rxsa_stats->unused_sa; 1761 data[++i] = rxsa_stats->not_valid_pkts; 1762 data[++i] = rxsa_stats->invalid_pkts; 1763 data[++i] = rxsa_stats->ok_pkts; 1764 data[++i] = rxsa_stats->late_pkts; 1765 data[++i] = rxsa_stats->delayed_pkts; 1766 data[++i] = rxsa_stats->unchecked_pkts; 1767 data[++i] = rxsa_stats->validated_octets; 1768 data[++i] = rxsa_stats->decrypted_octets; 1769 } 1770 } 1771 1772 i++; 1773 1774 data += i; 1775 1776 return data; 1777 } 1778