1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/interrupt.h> 13 #include <linux/delay.h> 14 #include <linux/irq.h> 15 #include <linux/pci.h> 16 #include <linux/sysfs.h> 17 18 #include "cgx.h" 19 #include "rvu.h" 20 #include "rvu_reg.h" 21 #include "ptp.h" 22 23 #define DRV_NAME "octeontx2-af" 24 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" 25 26 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); 27 28 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 29 struct rvu_block *block, int lf); 30 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 31 struct rvu_block *block, int lf); 32 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc); 33 34 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, 35 int type, int num, 36 void (mbox_handler)(struct work_struct *), 37 void (mbox_up_handler)(struct work_struct *)); 38 enum { 39 TYPE_AFVF, 40 TYPE_AFPF, 41 }; 42 43 /* Supported devices */ 44 static const struct pci_device_id rvu_id_table[] = { 45 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) }, 46 { 0, } /* end of table */ 47 }; 48 49 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>"); 50 MODULE_DESCRIPTION(DRV_STRING); 51 MODULE_LICENSE("GPL v2"); 52 MODULE_DEVICE_TABLE(pci, rvu_id_table); 53 54 static char *mkex_profile; /* MKEX profile name */ 55 module_param(mkex_profile, charp, 0000); 56 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string"); 57 58 static void rvu_setup_hw_capabilities(struct rvu *rvu) 59 { 60 struct rvu_hwinfo *hw = rvu->hw; 61 62 hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1; 63 hw->cap.nix_fixed_txschq_mapping = false; 64 hw->cap.nix_shaping = true; 65 hw->cap.nix_tx_link_bp = true; 66 hw->cap.nix_rx_multicast = true; 67 68 if (is_rvu_96xx_B0(rvu)) { 69 hw->cap.nix_fixed_txschq_mapping = true; 70 hw->cap.nix_txsch_per_cgx_lmac = 4; 71 hw->cap.nix_txsch_per_lbk_lmac = 132; 72 hw->cap.nix_txsch_per_sdp_lmac = 76; 73 hw->cap.nix_shaping = false; 74 hw->cap.nix_tx_link_bp = false; 75 if (is_rvu_96xx_A0(rvu)) 76 hw->cap.nix_rx_multicast = false; 77 } 78 } 79 80 /* Poll a RVU block's register 'offset', for a 'zero' 81 * or 'nonzero' at bits specified by 'mask' 82 */ 83 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) 84 { 85 unsigned long timeout = jiffies + usecs_to_jiffies(10000); 86 void __iomem *reg; 87 u64 reg_val; 88 89 reg = rvu->afreg_base + ((block << 28) | offset); 90 again: 91 reg_val = readq(reg); 92 if (zero && !(reg_val & mask)) 93 return 0; 94 if (!zero && (reg_val & mask)) 95 return 0; 96 if (time_before(jiffies, timeout)) { 97 usleep_range(1, 5); 98 goto again; 99 } 100 return -EBUSY; 101 } 102 103 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc) 104 { 105 int id; 106 107 if (!rsrc->bmap) 108 return -EINVAL; 109 110 id = find_first_zero_bit(rsrc->bmap, rsrc->max); 111 if (id >= rsrc->max) 112 return -ENOSPC; 113 114 __set_bit(id, rsrc->bmap); 115 116 return id; 117 } 118 119 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc) 120 { 121 int start; 122 123 if (!rsrc->bmap) 124 return -EINVAL; 125 126 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); 127 if (start >= rsrc->max) 128 return -ENOSPC; 129 130 bitmap_set(rsrc->bmap, start, nrsrc); 131 return start; 132 } 133 134 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) 135 { 136 if (!rsrc->bmap) 137 return; 138 if (start >= rsrc->max) 139 return; 140 141 bitmap_clear(rsrc->bmap, start, nrsrc); 142 } 143 144 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc) 145 { 146 int start; 147 148 if (!rsrc->bmap) 149 return false; 150 151 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); 152 if (start >= rsrc->max) 153 return false; 154 155 return true; 156 } 157 158 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id) 159 { 160 if (!rsrc->bmap) 161 return; 162 163 __clear_bit(id, rsrc->bmap); 164 } 165 166 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc) 167 { 168 int used; 169 170 if (!rsrc->bmap) 171 return 0; 172 173 used = bitmap_weight(rsrc->bmap, rsrc->max); 174 return (rsrc->max - used); 175 } 176 177 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc) 178 { 179 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max), 180 sizeof(long), GFP_KERNEL); 181 if (!rsrc->bmap) 182 return -ENOMEM; 183 return 0; 184 } 185 186 /* Get block LF's HW index from a PF_FUNC's block slot number */ 187 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) 188 { 189 u16 match = 0; 190 int lf; 191 192 mutex_lock(&rvu->rsrc_lock); 193 for (lf = 0; lf < block->lf.max; lf++) { 194 if (block->fn_map[lf] == pcifunc) { 195 if (slot == match) { 196 mutex_unlock(&rvu->rsrc_lock); 197 return lf; 198 } 199 match++; 200 } 201 } 202 mutex_unlock(&rvu->rsrc_lock); 203 return -ENODEV; 204 } 205 206 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E. 207 * Some silicon variants of OcteonTX2 supports 208 * multiple blocks of same type. 209 * 210 * @pcifunc has to be zero when no LF is yet attached. 211 */ 212 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) 213 { 214 int devnum, blkaddr = -ENODEV; 215 u64 cfg, reg; 216 bool is_pf; 217 218 switch (blktype) { 219 case BLKTYPE_NPC: 220 blkaddr = BLKADDR_NPC; 221 goto exit; 222 case BLKTYPE_NPA: 223 blkaddr = BLKADDR_NPA; 224 goto exit; 225 case BLKTYPE_NIX: 226 /* For now assume NIX0 */ 227 if (!pcifunc) { 228 blkaddr = BLKADDR_NIX0; 229 goto exit; 230 } 231 break; 232 case BLKTYPE_SSO: 233 blkaddr = BLKADDR_SSO; 234 goto exit; 235 case BLKTYPE_SSOW: 236 blkaddr = BLKADDR_SSOW; 237 goto exit; 238 case BLKTYPE_TIM: 239 blkaddr = BLKADDR_TIM; 240 goto exit; 241 case BLKTYPE_CPT: 242 /* For now assume CPT0 */ 243 if (!pcifunc) { 244 blkaddr = BLKADDR_CPT0; 245 goto exit; 246 } 247 break; 248 } 249 250 /* Check if this is a RVU PF or VF */ 251 if (pcifunc & RVU_PFVF_FUNC_MASK) { 252 is_pf = false; 253 devnum = rvu_get_hwvf(rvu, pcifunc); 254 } else { 255 is_pf = true; 256 devnum = rvu_get_pf(pcifunc); 257 } 258 259 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */ 260 if (blktype == BLKTYPE_NIX) { 261 reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG; 262 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 263 if (cfg) 264 blkaddr = BLKADDR_NIX0; 265 } 266 267 /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */ 268 if (blktype == BLKTYPE_CPT) { 269 reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG; 270 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 271 if (cfg) 272 blkaddr = BLKADDR_CPT0; 273 } 274 275 exit: 276 if (is_block_implemented(rvu->hw, blkaddr)) 277 return blkaddr; 278 return -ENODEV; 279 } 280 281 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, 282 struct rvu_block *block, u16 pcifunc, 283 u16 lf, bool attach) 284 { 285 int devnum, num_lfs = 0; 286 bool is_pf; 287 u64 reg; 288 289 if (lf >= block->lf.max) { 290 dev_err(&rvu->pdev->dev, 291 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n", 292 __func__, lf, block->name, block->lf.max); 293 return; 294 } 295 296 /* Check if this is for a RVU PF or VF */ 297 if (pcifunc & RVU_PFVF_FUNC_MASK) { 298 is_pf = false; 299 devnum = rvu_get_hwvf(rvu, pcifunc); 300 } else { 301 is_pf = true; 302 devnum = rvu_get_pf(pcifunc); 303 } 304 305 block->fn_map[lf] = attach ? pcifunc : 0; 306 307 switch (block->type) { 308 case BLKTYPE_NPA: 309 pfvf->npalf = attach ? true : false; 310 num_lfs = pfvf->npalf; 311 break; 312 case BLKTYPE_NIX: 313 pfvf->nixlf = attach ? true : false; 314 num_lfs = pfvf->nixlf; 315 break; 316 case BLKTYPE_SSO: 317 attach ? pfvf->sso++ : pfvf->sso--; 318 num_lfs = pfvf->sso; 319 break; 320 case BLKTYPE_SSOW: 321 attach ? pfvf->ssow++ : pfvf->ssow--; 322 num_lfs = pfvf->ssow; 323 break; 324 case BLKTYPE_TIM: 325 attach ? pfvf->timlfs++ : pfvf->timlfs--; 326 num_lfs = pfvf->timlfs; 327 break; 328 case BLKTYPE_CPT: 329 attach ? pfvf->cptlfs++ : pfvf->cptlfs--; 330 num_lfs = pfvf->cptlfs; 331 break; 332 } 333 334 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg; 335 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); 336 } 337 338 inline int rvu_get_pf(u16 pcifunc) 339 { 340 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 341 } 342 343 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) 344 { 345 u64 cfg; 346 347 /* Get numVFs attached to this PF and first HWVF */ 348 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 349 *numvfs = (cfg >> 12) & 0xFF; 350 *hwvf = cfg & 0xFFF; 351 } 352 353 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) 354 { 355 int pf, func; 356 u64 cfg; 357 358 pf = rvu_get_pf(pcifunc); 359 func = pcifunc & RVU_PFVF_FUNC_MASK; 360 361 /* Get first HWVF attached to this PF */ 362 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 363 364 return ((cfg & 0xFFF) + func - 1); 365 } 366 367 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) 368 { 369 /* Check if it is a PF or VF */ 370 if (pcifunc & RVU_PFVF_FUNC_MASK) 371 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; 372 else 373 return &rvu->pf[rvu_get_pf(pcifunc)]; 374 } 375 376 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) 377 { 378 int pf, vf, nvfs; 379 u64 cfg; 380 381 pf = rvu_get_pf(pcifunc); 382 if (pf >= rvu->hw->total_pfs) 383 return false; 384 385 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 386 return true; 387 388 /* Check if VF is within number of VFs attached to this PF */ 389 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 390 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 391 nvfs = (cfg >> 12) & 0xFF; 392 if (vf >= nvfs) 393 return false; 394 395 return true; 396 } 397 398 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr) 399 { 400 struct rvu_block *block; 401 402 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT) 403 return false; 404 405 block = &hw->block[blkaddr]; 406 return block->implemented; 407 } 408 409 static void rvu_check_block_implemented(struct rvu *rvu) 410 { 411 struct rvu_hwinfo *hw = rvu->hw; 412 struct rvu_block *block; 413 int blkid; 414 u64 cfg; 415 416 /* For each block check if 'implemented' bit is set */ 417 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 418 block = &hw->block[blkid]; 419 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid)); 420 if (cfg & BIT_ULL(11)) 421 block->implemented = true; 422 } 423 } 424 425 static void rvu_setup_rvum_blk_revid(struct rvu *rvu) 426 { 427 rvu_write64(rvu, BLKADDR_RVUM, 428 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 429 RVU_BLK_RVUM_REVID); 430 } 431 432 static void rvu_clear_rvum_blk_revid(struct rvu *rvu) 433 { 434 rvu_write64(rvu, BLKADDR_RVUM, 435 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00); 436 } 437 438 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf) 439 { 440 int err; 441 442 if (!block->implemented) 443 return 0; 444 445 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12)); 446 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12), 447 true); 448 return err; 449 } 450 451 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) 452 { 453 struct rvu_block *block = &rvu->hw->block[blkaddr]; 454 455 if (!block->implemented) 456 return; 457 458 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); 459 rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); 460 } 461 462 static void rvu_reset_all_blocks(struct rvu *rvu) 463 { 464 /* Do a HW reset of all RVU blocks */ 465 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST); 466 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST); 467 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST); 468 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); 469 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); 470 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); 471 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST); 472 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST); 473 rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST); 474 } 475 476 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) 477 { 478 struct rvu_pfvf *pfvf; 479 u64 cfg; 480 int lf; 481 482 for (lf = 0; lf < block->lf.max; lf++) { 483 cfg = rvu_read64(rvu, block->addr, 484 block->lfcfg_reg | (lf << block->lfshift)); 485 if (!(cfg & BIT_ULL(63))) 486 continue; 487 488 /* Set this resource as being used */ 489 __set_bit(lf, block->lf.bmap); 490 491 /* Get, to whom this LF is attached */ 492 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF); 493 rvu_update_rsrc_map(rvu, pfvf, block, 494 (cfg >> 8) & 0xFFFF, lf, true); 495 496 /* Set start MSIX vector for this LF within this PF/VF */ 497 rvu_set_msix_offset(rvu, pfvf, block, lf); 498 } 499 } 500 501 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf) 502 { 503 int min_vecs; 504 505 if (!vf) 506 goto check_pf; 507 508 if (!nvecs) { 509 dev_warn(rvu->dev, 510 "PF%d:VF%d is configured with zero msix vectors, %d\n", 511 pf, vf - 1, nvecs); 512 } 513 return; 514 515 check_pf: 516 if (pf == 0) 517 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT; 518 else 519 min_vecs = RVU_PF_INT_VEC_CNT; 520 521 if (!(nvecs < min_vecs)) 522 return; 523 dev_warn(rvu->dev, 524 "PF%d is configured with too few vectors, %d, min is %d\n", 525 pf, nvecs, min_vecs); 526 } 527 528 static int rvu_setup_msix_resources(struct rvu *rvu) 529 { 530 struct rvu_hwinfo *hw = rvu->hw; 531 int pf, vf, numvfs, hwvf, err; 532 int nvecs, offset, max_msix; 533 struct rvu_pfvf *pfvf; 534 u64 cfg, phy_addr; 535 dma_addr_t iova; 536 537 for (pf = 0; pf < hw->total_pfs; pf++) { 538 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 539 /* If PF is not enabled, nothing to do */ 540 if (!((cfg >> 20) & 0x01)) 541 continue; 542 543 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 544 545 pfvf = &rvu->pf[pf]; 546 /* Get num of MSIX vectors attached to this PF */ 547 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf)); 548 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1; 549 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0); 550 551 /* Alloc msix bitmap for this PF */ 552 err = rvu_alloc_bitmap(&pfvf->msix); 553 if (err) 554 return err; 555 556 /* Allocate memory for MSIX vector to RVU block LF mapping */ 557 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, 558 sizeof(u16), GFP_KERNEL); 559 if (!pfvf->msix_lfmap) 560 return -ENOMEM; 561 562 /* For PF0 (AF) firmware will set msix vector offsets for 563 * AF, block AF and PF0_INT vectors, so jump to VFs. 564 */ 565 if (!pf) 566 goto setup_vfmsix; 567 568 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors. 569 * These are allocated on driver init and never freed, 570 * so no need to set 'msix_lfmap' for these. 571 */ 572 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf)); 573 nvecs = (cfg >> 12) & 0xFF; 574 cfg &= ~0x7FFULL; 575 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 576 rvu_write64(rvu, BLKADDR_RVUM, 577 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset); 578 setup_vfmsix: 579 /* Alloc msix bitmap for VFs */ 580 for (vf = 0; vf < numvfs; vf++) { 581 pfvf = &rvu->hwvf[hwvf + vf]; 582 /* Get num of MSIX vectors attached to this VF */ 583 cfg = rvu_read64(rvu, BLKADDR_RVUM, 584 RVU_PRIV_PFX_MSIX_CFG(pf)); 585 pfvf->msix.max = (cfg & 0xFFF) + 1; 586 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1); 587 588 /* Alloc msix bitmap for this VF */ 589 err = rvu_alloc_bitmap(&pfvf->msix); 590 if (err) 591 return err; 592 593 pfvf->msix_lfmap = 594 devm_kcalloc(rvu->dev, pfvf->msix.max, 595 sizeof(u16), GFP_KERNEL); 596 if (!pfvf->msix_lfmap) 597 return -ENOMEM; 598 599 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors. 600 * These are allocated on driver init and never freed, 601 * so no need to set 'msix_lfmap' for these. 602 */ 603 cfg = rvu_read64(rvu, BLKADDR_RVUM, 604 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf)); 605 nvecs = (cfg >> 12) & 0xFF; 606 cfg &= ~0x7FFULL; 607 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 608 rvu_write64(rvu, BLKADDR_RVUM, 609 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf), 610 cfg | offset); 611 } 612 } 613 614 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence 615 * create a IOMMU mapping for the physcial address configured by 616 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA. 617 */ 618 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 619 max_msix = cfg & 0xFFFFF; 620 if (rvu->fwdata && rvu->fwdata->msixtr_base) 621 phy_addr = rvu->fwdata->msixtr_base; 622 else 623 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE); 624 625 iova = dma_map_resource(rvu->dev, phy_addr, 626 max_msix * PCI_MSIX_ENTRY_SIZE, 627 DMA_BIDIRECTIONAL, 0); 628 629 if (dma_mapping_error(rvu->dev, iova)) 630 return -ENOMEM; 631 632 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova); 633 rvu->msix_base_iova = iova; 634 rvu->msixtr_base_phy = phy_addr; 635 636 return 0; 637 } 638 639 static void rvu_reset_msix(struct rvu *rvu) 640 { 641 /* Restore msixtr base register */ 642 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, 643 rvu->msixtr_base_phy); 644 } 645 646 static void rvu_free_hw_resources(struct rvu *rvu) 647 { 648 struct rvu_hwinfo *hw = rvu->hw; 649 struct rvu_block *block; 650 struct rvu_pfvf *pfvf; 651 int id, max_msix; 652 u64 cfg; 653 654 rvu_npa_freemem(rvu); 655 rvu_npc_freemem(rvu); 656 rvu_nix_freemem(rvu); 657 658 /* Free block LF bitmaps */ 659 for (id = 0; id < BLK_COUNT; id++) { 660 block = &hw->block[id]; 661 kfree(block->lf.bmap); 662 } 663 664 /* Free MSIX bitmaps */ 665 for (id = 0; id < hw->total_pfs; id++) { 666 pfvf = &rvu->pf[id]; 667 kfree(pfvf->msix.bmap); 668 } 669 670 for (id = 0; id < hw->total_vfs; id++) { 671 pfvf = &rvu->hwvf[id]; 672 kfree(pfvf->msix.bmap); 673 } 674 675 /* Unmap MSIX vector base IOVA mapping */ 676 if (!rvu->msix_base_iova) 677 return; 678 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 679 max_msix = cfg & 0xFFFFF; 680 dma_unmap_resource(rvu->dev, rvu->msix_base_iova, 681 max_msix * PCI_MSIX_ENTRY_SIZE, 682 DMA_BIDIRECTIONAL, 0); 683 684 rvu_reset_msix(rvu); 685 mutex_destroy(&rvu->rsrc_lock); 686 } 687 688 static void rvu_setup_pfvf_macaddress(struct rvu *rvu) 689 { 690 struct rvu_hwinfo *hw = rvu->hw; 691 int pf, vf, numvfs, hwvf; 692 struct rvu_pfvf *pfvf; 693 u64 *mac; 694 695 for (pf = 0; pf < hw->total_pfs; pf++) { 696 if (!is_pf_cgxmapped(rvu, pf)) 697 continue; 698 /* Assign MAC address to PF */ 699 pfvf = &rvu->pf[pf]; 700 if (rvu->fwdata && pf < PF_MACNUM_MAX) { 701 mac = &rvu->fwdata->pf_macs[pf]; 702 if (*mac) 703 u64_to_ether_addr(*mac, pfvf->mac_addr); 704 else 705 eth_random_addr(pfvf->mac_addr); 706 } else { 707 eth_random_addr(pfvf->mac_addr); 708 } 709 710 /* Assign MAC address to VFs */ 711 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 712 for (vf = 0; vf < numvfs; vf++, hwvf++) { 713 pfvf = &rvu->hwvf[hwvf]; 714 if (rvu->fwdata && hwvf < VF_MACNUM_MAX) { 715 mac = &rvu->fwdata->vf_macs[hwvf]; 716 if (*mac) 717 u64_to_ether_addr(*mac, pfvf->mac_addr); 718 else 719 eth_random_addr(pfvf->mac_addr); 720 } else { 721 eth_random_addr(pfvf->mac_addr); 722 } 723 } 724 } 725 } 726 727 static int rvu_fwdata_init(struct rvu *rvu) 728 { 729 u64 fwdbase; 730 int err; 731 732 /* Get firmware data base address */ 733 err = cgx_get_fwdata_base(&fwdbase); 734 if (err) 735 goto fail; 736 rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata)); 737 if (!rvu->fwdata) 738 goto fail; 739 if (!is_rvu_fwdata_valid(rvu)) { 740 dev_err(rvu->dev, 741 "Mismatch in 'fwdata' struct btw kernel and firmware\n"); 742 iounmap(rvu->fwdata); 743 rvu->fwdata = NULL; 744 return -EINVAL; 745 } 746 return 0; 747 fail: 748 dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n"); 749 return -EIO; 750 } 751 752 static void rvu_fwdata_exit(struct rvu *rvu) 753 { 754 if (rvu->fwdata) 755 iounmap(rvu->fwdata); 756 } 757 758 static int rvu_setup_hw_resources(struct rvu *rvu) 759 { 760 struct rvu_hwinfo *hw = rvu->hw; 761 struct rvu_block *block; 762 int blkid, err; 763 u64 cfg; 764 765 /* Get HW supported max RVU PF & VF count */ 766 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 767 hw->total_pfs = (cfg >> 32) & 0xFF; 768 hw->total_vfs = (cfg >> 20) & 0xFFF; 769 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF; 770 771 /* Init NPA LF's bitmap */ 772 block = &hw->block[BLKADDR_NPA]; 773 if (!block->implemented) 774 goto nix; 775 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST); 776 block->lf.max = (cfg >> 16) & 0xFFF; 777 block->addr = BLKADDR_NPA; 778 block->type = BLKTYPE_NPA; 779 block->lfshift = 8; 780 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG; 781 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG; 782 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG; 783 block->lfcfg_reg = NPA_PRIV_LFX_CFG; 784 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG; 785 block->lfreset_reg = NPA_AF_LF_RST; 786 sprintf(block->name, "NPA"); 787 err = rvu_alloc_bitmap(&block->lf); 788 if (err) 789 return err; 790 791 nix: 792 /* Init NIX LF's bitmap */ 793 block = &hw->block[BLKADDR_NIX0]; 794 if (!block->implemented) 795 goto sso; 796 cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2); 797 block->lf.max = cfg & 0xFFF; 798 block->addr = BLKADDR_NIX0; 799 block->type = BLKTYPE_NIX; 800 block->lfshift = 8; 801 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG; 802 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG; 803 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG; 804 block->lfcfg_reg = NIX_PRIV_LFX_CFG; 805 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; 806 block->lfreset_reg = NIX_AF_LF_RST; 807 sprintf(block->name, "NIX"); 808 err = rvu_alloc_bitmap(&block->lf); 809 if (err) 810 return err; 811 812 sso: 813 /* Init SSO group's bitmap */ 814 block = &hw->block[BLKADDR_SSO]; 815 if (!block->implemented) 816 goto ssow; 817 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST); 818 block->lf.max = cfg & 0xFFFF; 819 block->addr = BLKADDR_SSO; 820 block->type = BLKTYPE_SSO; 821 block->multislot = true; 822 block->lfshift = 3; 823 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG; 824 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG; 825 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG; 826 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG; 827 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG; 828 block->lfreset_reg = SSO_AF_LF_HWGRP_RST; 829 sprintf(block->name, "SSO GROUP"); 830 err = rvu_alloc_bitmap(&block->lf); 831 if (err) 832 return err; 833 834 ssow: 835 /* Init SSO workslot's bitmap */ 836 block = &hw->block[BLKADDR_SSOW]; 837 if (!block->implemented) 838 goto tim; 839 block->lf.max = (cfg >> 56) & 0xFF; 840 block->addr = BLKADDR_SSOW; 841 block->type = BLKTYPE_SSOW; 842 block->multislot = true; 843 block->lfshift = 3; 844 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG; 845 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG; 846 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG; 847 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG; 848 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG; 849 block->lfreset_reg = SSOW_AF_LF_HWS_RST; 850 sprintf(block->name, "SSOWS"); 851 err = rvu_alloc_bitmap(&block->lf); 852 if (err) 853 return err; 854 855 tim: 856 /* Init TIM LF's bitmap */ 857 block = &hw->block[BLKADDR_TIM]; 858 if (!block->implemented) 859 goto cpt; 860 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST); 861 block->lf.max = cfg & 0xFFFF; 862 block->addr = BLKADDR_TIM; 863 block->type = BLKTYPE_TIM; 864 block->multislot = true; 865 block->lfshift = 3; 866 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG; 867 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG; 868 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG; 869 block->lfcfg_reg = TIM_PRIV_LFX_CFG; 870 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG; 871 block->lfreset_reg = TIM_AF_LF_RST; 872 sprintf(block->name, "TIM"); 873 err = rvu_alloc_bitmap(&block->lf); 874 if (err) 875 return err; 876 877 cpt: 878 /* Init CPT LF's bitmap */ 879 block = &hw->block[BLKADDR_CPT0]; 880 if (!block->implemented) 881 goto init; 882 cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0); 883 block->lf.max = cfg & 0xFF; 884 block->addr = BLKADDR_CPT0; 885 block->type = BLKTYPE_CPT; 886 block->multislot = true; 887 block->lfshift = 3; 888 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG; 889 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG; 890 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG; 891 block->lfcfg_reg = CPT_PRIV_LFX_CFG; 892 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; 893 block->lfreset_reg = CPT_AF_LF_RST; 894 sprintf(block->name, "CPT"); 895 err = rvu_alloc_bitmap(&block->lf); 896 if (err) 897 return err; 898 899 init: 900 /* Allocate memory for PFVF data */ 901 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, 902 sizeof(struct rvu_pfvf), GFP_KERNEL); 903 if (!rvu->pf) 904 return -ENOMEM; 905 906 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, 907 sizeof(struct rvu_pfvf), GFP_KERNEL); 908 if (!rvu->hwvf) 909 return -ENOMEM; 910 911 mutex_init(&rvu->rsrc_lock); 912 913 rvu_fwdata_init(rvu); 914 915 err = rvu_setup_msix_resources(rvu); 916 if (err) 917 return err; 918 919 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 920 block = &hw->block[blkid]; 921 if (!block->lf.bmap) 922 continue; 923 924 /* Allocate memory for block LF/slot to pcifunc mapping info */ 925 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max, 926 sizeof(u16), GFP_KERNEL); 927 if (!block->fn_map) { 928 err = -ENOMEM; 929 goto msix_err; 930 } 931 932 /* Scan all blocks to check if low level firmware has 933 * already provisioned any of the resources to a PF/VF. 934 */ 935 rvu_scan_block(rvu, block); 936 } 937 938 err = rvu_npc_init(rvu); 939 if (err) 940 goto npc_err; 941 942 err = rvu_cgx_init(rvu); 943 if (err) 944 goto cgx_err; 945 946 /* Assign MACs for CGX mapped functions */ 947 rvu_setup_pfvf_macaddress(rvu); 948 949 err = rvu_npa_init(rvu); 950 if (err) 951 goto npa_err; 952 953 err = rvu_nix_init(rvu); 954 if (err) 955 goto nix_err; 956 957 return 0; 958 959 nix_err: 960 rvu_nix_freemem(rvu); 961 npa_err: 962 rvu_npa_freemem(rvu); 963 cgx_err: 964 rvu_cgx_exit(rvu); 965 npc_err: 966 rvu_npc_freemem(rvu); 967 rvu_fwdata_exit(rvu); 968 msix_err: 969 rvu_reset_msix(rvu); 970 return err; 971 } 972 973 /* NPA and NIX admin queue APIs */ 974 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq) 975 { 976 if (!aq) 977 return; 978 979 qmem_free(rvu->dev, aq->inst); 980 qmem_free(rvu->dev, aq->res); 981 devm_kfree(rvu->dev, aq); 982 } 983 984 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, 985 int qsize, int inst_size, int res_size) 986 { 987 struct admin_queue *aq; 988 int err; 989 990 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL); 991 if (!*ad_queue) 992 return -ENOMEM; 993 aq = *ad_queue; 994 995 /* Alloc memory for instructions i.e AQ */ 996 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size); 997 if (err) { 998 devm_kfree(rvu->dev, aq); 999 return err; 1000 } 1001 1002 /* Alloc memory for results */ 1003 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size); 1004 if (err) { 1005 rvu_aq_free(rvu, aq); 1006 return err; 1007 } 1008 1009 spin_lock_init(&aq->lock); 1010 return 0; 1011 } 1012 1013 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, 1014 struct ready_msg_rsp *rsp) 1015 { 1016 if (rvu->fwdata) { 1017 rsp->rclk_freq = rvu->fwdata->rclk; 1018 rsp->sclk_freq = rvu->fwdata->sclk; 1019 } 1020 return 0; 1021 } 1022 1023 /* Get current count of a RVU block's LF/slots 1024 * provisioned to a given RVU func. 1025 */ 1026 static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype) 1027 { 1028 switch (blktype) { 1029 case BLKTYPE_NPA: 1030 return pfvf->npalf ? 1 : 0; 1031 case BLKTYPE_NIX: 1032 return pfvf->nixlf ? 1 : 0; 1033 case BLKTYPE_SSO: 1034 return pfvf->sso; 1035 case BLKTYPE_SSOW: 1036 return pfvf->ssow; 1037 case BLKTYPE_TIM: 1038 return pfvf->timlfs; 1039 case BLKTYPE_CPT: 1040 return pfvf->cptlfs; 1041 } 1042 return 0; 1043 } 1044 1045 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) 1046 { 1047 struct rvu_pfvf *pfvf; 1048 1049 if (!is_pf_func_valid(rvu, pcifunc)) 1050 return false; 1051 1052 pfvf = rvu_get_pfvf(rvu, pcifunc); 1053 1054 /* Check if this PFFUNC has a LF of type blktype attached */ 1055 if (!rvu_get_rsrc_mapcount(pfvf, blktype)) 1056 return false; 1057 1058 return true; 1059 } 1060 1061 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, 1062 int pcifunc, int slot) 1063 { 1064 u64 val; 1065 1066 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13); 1067 rvu_write64(rvu, block->addr, block->lookup_reg, val); 1068 /* Wait for the lookup to finish */ 1069 /* TODO: put some timeout here */ 1070 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13)) 1071 ; 1072 1073 val = rvu_read64(rvu, block->addr, block->lookup_reg); 1074 1075 /* Check LF valid bit */ 1076 if (!(val & (1ULL << 12))) 1077 return -1; 1078 1079 return (val & 0xFFF); 1080 } 1081 1082 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) 1083 { 1084 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1085 struct rvu_hwinfo *hw = rvu->hw; 1086 struct rvu_block *block; 1087 int slot, lf, num_lfs; 1088 int blkaddr; 1089 1090 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc); 1091 if (blkaddr < 0) 1092 return; 1093 1094 block = &hw->block[blkaddr]; 1095 1096 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type); 1097 if (!num_lfs) 1098 return; 1099 1100 for (slot = 0; slot < num_lfs; slot++) { 1101 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot); 1102 if (lf < 0) /* This should never happen */ 1103 continue; 1104 1105 /* Disable the LF */ 1106 rvu_write64(rvu, blkaddr, block->lfcfg_reg | 1107 (lf << block->lfshift), 0x00ULL); 1108 1109 /* Update SW maintained mapping info as well */ 1110 rvu_update_rsrc_map(rvu, pfvf, block, 1111 pcifunc, lf, false); 1112 1113 /* Free the resource */ 1114 rvu_free_rsrc(&block->lf, lf); 1115 1116 /* Clear MSIX vector offset for this LF */ 1117 rvu_clear_msix_offset(rvu, pfvf, block, lf); 1118 } 1119 } 1120 1121 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, 1122 u16 pcifunc) 1123 { 1124 struct rvu_hwinfo *hw = rvu->hw; 1125 bool detach_all = true; 1126 struct rvu_block *block; 1127 int blkid; 1128 1129 mutex_lock(&rvu->rsrc_lock); 1130 1131 /* Check for partial resource detach */ 1132 if (detach && detach->partial) 1133 detach_all = false; 1134 1135 /* Check for RVU block's LFs attached to this func, 1136 * if so, detach them. 1137 */ 1138 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 1139 block = &hw->block[blkid]; 1140 if (!block->lf.bmap) 1141 continue; 1142 if (!detach_all && detach) { 1143 if (blkid == BLKADDR_NPA && !detach->npalf) 1144 continue; 1145 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf) 1146 continue; 1147 else if ((blkid == BLKADDR_SSO) && !detach->sso) 1148 continue; 1149 else if ((blkid == BLKADDR_SSOW) && !detach->ssow) 1150 continue; 1151 else if ((blkid == BLKADDR_TIM) && !detach->timlfs) 1152 continue; 1153 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs) 1154 continue; 1155 } 1156 rvu_detach_block(rvu, pcifunc, block->type); 1157 } 1158 1159 mutex_unlock(&rvu->rsrc_lock); 1160 return 0; 1161 } 1162 1163 int rvu_mbox_handler_detach_resources(struct rvu *rvu, 1164 struct rsrc_detach *detach, 1165 struct msg_rsp *rsp) 1166 { 1167 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); 1168 } 1169 1170 static void rvu_attach_block(struct rvu *rvu, int pcifunc, 1171 int blktype, int num_lfs) 1172 { 1173 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1174 struct rvu_hwinfo *hw = rvu->hw; 1175 struct rvu_block *block; 1176 int slot, lf; 1177 int blkaddr; 1178 u64 cfg; 1179 1180 if (!num_lfs) 1181 return; 1182 1183 blkaddr = rvu_get_blkaddr(rvu, blktype, 0); 1184 if (blkaddr < 0) 1185 return; 1186 1187 block = &hw->block[blkaddr]; 1188 if (!block->lf.bmap) 1189 return; 1190 1191 for (slot = 0; slot < num_lfs; slot++) { 1192 /* Allocate the resource */ 1193 lf = rvu_alloc_rsrc(&block->lf); 1194 if (lf < 0) 1195 return; 1196 1197 cfg = (1ULL << 63) | (pcifunc << 8) | slot; 1198 rvu_write64(rvu, blkaddr, block->lfcfg_reg | 1199 (lf << block->lfshift), cfg); 1200 rvu_update_rsrc_map(rvu, pfvf, block, 1201 pcifunc, lf, true); 1202 1203 /* Set start MSIX vector for this LF within this PF/VF */ 1204 rvu_set_msix_offset(rvu, pfvf, block, lf); 1205 } 1206 } 1207 1208 static int rvu_check_rsrc_availability(struct rvu *rvu, 1209 struct rsrc_attach *req, u16 pcifunc) 1210 { 1211 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1212 struct rvu_hwinfo *hw = rvu->hw; 1213 struct rvu_block *block; 1214 int free_lfs, mappedlfs; 1215 1216 /* Only one NPA LF can be attached */ 1217 if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) { 1218 block = &hw->block[BLKADDR_NPA]; 1219 free_lfs = rvu_rsrc_free_count(&block->lf); 1220 if (!free_lfs) 1221 goto fail; 1222 } else if (req->npalf) { 1223 dev_err(&rvu->pdev->dev, 1224 "Func 0x%x: Invalid req, already has NPA\n", 1225 pcifunc); 1226 return -EINVAL; 1227 } 1228 1229 /* Only one NIX LF can be attached */ 1230 if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) { 1231 block = &hw->block[BLKADDR_NIX0]; 1232 free_lfs = rvu_rsrc_free_count(&block->lf); 1233 if (!free_lfs) 1234 goto fail; 1235 } else if (req->nixlf) { 1236 dev_err(&rvu->pdev->dev, 1237 "Func 0x%x: Invalid req, already has NIX\n", 1238 pcifunc); 1239 return -EINVAL; 1240 } 1241 1242 if (req->sso) { 1243 block = &hw->block[BLKADDR_SSO]; 1244 /* Is request within limits ? */ 1245 if (req->sso > block->lf.max) { 1246 dev_err(&rvu->pdev->dev, 1247 "Func 0x%x: Invalid SSO req, %d > max %d\n", 1248 pcifunc, req->sso, block->lf.max); 1249 return -EINVAL; 1250 } 1251 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); 1252 free_lfs = rvu_rsrc_free_count(&block->lf); 1253 /* Check if additional resources are available */ 1254 if (req->sso > mappedlfs && 1255 ((req->sso - mappedlfs) > free_lfs)) 1256 goto fail; 1257 } 1258 1259 if (req->ssow) { 1260 block = &hw->block[BLKADDR_SSOW]; 1261 if (req->ssow > block->lf.max) { 1262 dev_err(&rvu->pdev->dev, 1263 "Func 0x%x: Invalid SSOW req, %d > max %d\n", 1264 pcifunc, req->sso, block->lf.max); 1265 return -EINVAL; 1266 } 1267 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); 1268 free_lfs = rvu_rsrc_free_count(&block->lf); 1269 if (req->ssow > mappedlfs && 1270 ((req->ssow - mappedlfs) > free_lfs)) 1271 goto fail; 1272 } 1273 1274 if (req->timlfs) { 1275 block = &hw->block[BLKADDR_TIM]; 1276 if (req->timlfs > block->lf.max) { 1277 dev_err(&rvu->pdev->dev, 1278 "Func 0x%x: Invalid TIMLF req, %d > max %d\n", 1279 pcifunc, req->timlfs, block->lf.max); 1280 return -EINVAL; 1281 } 1282 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); 1283 free_lfs = rvu_rsrc_free_count(&block->lf); 1284 if (req->timlfs > mappedlfs && 1285 ((req->timlfs - mappedlfs) > free_lfs)) 1286 goto fail; 1287 } 1288 1289 if (req->cptlfs) { 1290 block = &hw->block[BLKADDR_CPT0]; 1291 if (req->cptlfs > block->lf.max) { 1292 dev_err(&rvu->pdev->dev, 1293 "Func 0x%x: Invalid CPTLF req, %d > max %d\n", 1294 pcifunc, req->cptlfs, block->lf.max); 1295 return -EINVAL; 1296 } 1297 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); 1298 free_lfs = rvu_rsrc_free_count(&block->lf); 1299 if (req->cptlfs > mappedlfs && 1300 ((req->cptlfs - mappedlfs) > free_lfs)) 1301 goto fail; 1302 } 1303 1304 return 0; 1305 1306 fail: 1307 dev_info(rvu->dev, "Request for %s failed\n", block->name); 1308 return -ENOSPC; 1309 } 1310 1311 int rvu_mbox_handler_attach_resources(struct rvu *rvu, 1312 struct rsrc_attach *attach, 1313 struct msg_rsp *rsp) 1314 { 1315 u16 pcifunc = attach->hdr.pcifunc; 1316 int err; 1317 1318 /* If first request, detach all existing attached resources */ 1319 if (!attach->modify) 1320 rvu_detach_rsrcs(rvu, NULL, pcifunc); 1321 1322 mutex_lock(&rvu->rsrc_lock); 1323 1324 /* Check if the request can be accommodated */ 1325 err = rvu_check_rsrc_availability(rvu, attach, pcifunc); 1326 if (err) 1327 goto exit; 1328 1329 /* Now attach the requested resources */ 1330 if (attach->npalf) 1331 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1); 1332 1333 if (attach->nixlf) 1334 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1); 1335 1336 if (attach->sso) { 1337 /* RVU func doesn't know which exact LF or slot is attached 1338 * to it, it always sees as slot 0,1,2. So for a 'modify' 1339 * request, simply detach all existing attached LFs/slots 1340 * and attach a fresh. 1341 */ 1342 if (attach->modify) 1343 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO); 1344 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso); 1345 } 1346 1347 if (attach->ssow) { 1348 if (attach->modify) 1349 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW); 1350 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow); 1351 } 1352 1353 if (attach->timlfs) { 1354 if (attach->modify) 1355 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM); 1356 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs); 1357 } 1358 1359 if (attach->cptlfs) { 1360 if (attach->modify) 1361 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT); 1362 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs); 1363 } 1364 1365 exit: 1366 mutex_unlock(&rvu->rsrc_lock); 1367 return err; 1368 } 1369 1370 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1371 int blkaddr, int lf) 1372 { 1373 u16 vec; 1374 1375 if (lf < 0) 1376 return MSIX_VECTOR_INVALID; 1377 1378 for (vec = 0; vec < pfvf->msix.max; vec++) { 1379 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf)) 1380 return vec; 1381 } 1382 return MSIX_VECTOR_INVALID; 1383 } 1384 1385 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1386 struct rvu_block *block, int lf) 1387 { 1388 u16 nvecs, vec, offset; 1389 u64 cfg; 1390 1391 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | 1392 (lf << block->lfshift)); 1393 nvecs = (cfg >> 12) & 0xFF; 1394 1395 /* Check and alloc MSIX vectors, must be contiguous */ 1396 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs)) 1397 return; 1398 1399 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 1400 1401 /* Config MSIX offset in LF */ 1402 rvu_write64(rvu, block->addr, block->msixcfg_reg | 1403 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset); 1404 1405 /* Update the bitmap as well */ 1406 for (vec = 0; vec < nvecs; vec++) 1407 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf); 1408 } 1409 1410 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1411 struct rvu_block *block, int lf) 1412 { 1413 u16 nvecs, vec, offset; 1414 u64 cfg; 1415 1416 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | 1417 (lf << block->lfshift)); 1418 nvecs = (cfg >> 12) & 0xFF; 1419 1420 /* Clear MSIX offset in LF */ 1421 rvu_write64(rvu, block->addr, block->msixcfg_reg | 1422 (lf << block->lfshift), cfg & ~0x7FFULL); 1423 1424 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf); 1425 1426 /* Update the mapping */ 1427 for (vec = 0; vec < nvecs; vec++) 1428 pfvf->msix_lfmap[offset + vec] = 0; 1429 1430 /* Free the same in MSIX bitmap */ 1431 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); 1432 } 1433 1434 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, 1435 struct msix_offset_rsp *rsp) 1436 { 1437 struct rvu_hwinfo *hw = rvu->hw; 1438 u16 pcifunc = req->hdr.pcifunc; 1439 struct rvu_pfvf *pfvf; 1440 int lf, slot; 1441 1442 pfvf = rvu_get_pfvf(rvu, pcifunc); 1443 if (!pfvf->msix.bmap) 1444 return 0; 1445 1446 /* Set MSIX offsets for each block's LFs attached to this PF/VF */ 1447 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0); 1448 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf); 1449 1450 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0); 1451 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf); 1452 1453 rsp->sso = pfvf->sso; 1454 for (slot = 0; slot < rsp->sso; slot++) { 1455 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot); 1456 rsp->sso_msixoff[slot] = 1457 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf); 1458 } 1459 1460 rsp->ssow = pfvf->ssow; 1461 for (slot = 0; slot < rsp->ssow; slot++) { 1462 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot); 1463 rsp->ssow_msixoff[slot] = 1464 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf); 1465 } 1466 1467 rsp->timlfs = pfvf->timlfs; 1468 for (slot = 0; slot < rsp->timlfs; slot++) { 1469 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot); 1470 rsp->timlf_msixoff[slot] = 1471 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf); 1472 } 1473 1474 rsp->cptlfs = pfvf->cptlfs; 1475 for (slot = 0; slot < rsp->cptlfs; slot++) { 1476 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot); 1477 rsp->cptlf_msixoff[slot] = 1478 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf); 1479 } 1480 return 0; 1481 } 1482 1483 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, 1484 struct msg_rsp *rsp) 1485 { 1486 u16 pcifunc = req->hdr.pcifunc; 1487 u16 vf, numvfs; 1488 u64 cfg; 1489 1490 vf = pcifunc & RVU_PFVF_FUNC_MASK; 1491 cfg = rvu_read64(rvu, BLKADDR_RVUM, 1492 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc))); 1493 numvfs = (cfg >> 12) & 0xFF; 1494 1495 if (vf && vf <= numvfs) 1496 __rvu_flr_handler(rvu, pcifunc); 1497 else 1498 return RVU_INVALID_VF_ID; 1499 1500 return 0; 1501 } 1502 1503 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, 1504 struct get_hw_cap_rsp *rsp) 1505 { 1506 struct rvu_hwinfo *hw = rvu->hw; 1507 1508 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping; 1509 rsp->nix_shaping = hw->cap.nix_shaping; 1510 1511 return 0; 1512 } 1513 1514 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, 1515 struct mbox_msghdr *req) 1516 { 1517 struct rvu *rvu = pci_get_drvdata(mbox->pdev); 1518 1519 /* Check if valid, if not reply with a invalid msg */ 1520 if (req->sig != OTX2_MBOX_REQ_SIG) 1521 goto bad_message; 1522 1523 switch (req->id) { 1524 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 1525 case _id: { \ 1526 struct _rsp_type *rsp; \ 1527 int err; \ 1528 \ 1529 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ 1530 mbox, devid, \ 1531 sizeof(struct _rsp_type)); \ 1532 /* some handlers should complete even if reply */ \ 1533 /* could not be allocated */ \ 1534 if (!rsp && \ 1535 _id != MBOX_MSG_DETACH_RESOURCES && \ 1536 _id != MBOX_MSG_NIX_TXSCH_FREE && \ 1537 _id != MBOX_MSG_VF_FLR) \ 1538 return -ENOMEM; \ 1539 if (rsp) { \ 1540 rsp->hdr.id = _id; \ 1541 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ 1542 rsp->hdr.pcifunc = req->pcifunc; \ 1543 rsp->hdr.rc = 0; \ 1544 } \ 1545 \ 1546 err = rvu_mbox_handler_ ## _fn_name(rvu, \ 1547 (struct _req_type *)req, \ 1548 rsp); \ 1549 if (rsp && err) \ 1550 rsp->hdr.rc = err; \ 1551 \ 1552 return rsp ? err : -ENOMEM; \ 1553 } 1554 MBOX_MESSAGES 1555 #undef M 1556 1557 bad_message: 1558 default: 1559 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id); 1560 return -ENODEV; 1561 } 1562 } 1563 1564 static void __rvu_mbox_handler(struct rvu_work *mwork, int type) 1565 { 1566 struct rvu *rvu = mwork->rvu; 1567 int offset, err, id, devid; 1568 struct otx2_mbox_dev *mdev; 1569 struct mbox_hdr *req_hdr; 1570 struct mbox_msghdr *msg; 1571 struct mbox_wq_info *mw; 1572 struct otx2_mbox *mbox; 1573 1574 switch (type) { 1575 case TYPE_AFPF: 1576 mw = &rvu->afpf_wq_info; 1577 break; 1578 case TYPE_AFVF: 1579 mw = &rvu->afvf_wq_info; 1580 break; 1581 default: 1582 return; 1583 } 1584 1585 devid = mwork - mw->mbox_wrk; 1586 mbox = &mw->mbox; 1587 mdev = &mbox->dev[devid]; 1588 1589 /* Process received mbox messages */ 1590 req_hdr = mdev->mbase + mbox->rx_start; 1591 if (mw->mbox_wrk[devid].num_msgs == 0) 1592 return; 1593 1594 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 1595 1596 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { 1597 msg = mdev->mbase + offset; 1598 1599 /* Set which PF/VF sent this message based on mbox IRQ */ 1600 switch (type) { 1601 case TYPE_AFPF: 1602 msg->pcifunc &= 1603 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); 1604 msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT); 1605 break; 1606 case TYPE_AFVF: 1607 msg->pcifunc &= 1608 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT); 1609 msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1; 1610 break; 1611 } 1612 1613 err = rvu_process_mbox_msg(mbox, devid, msg); 1614 if (!err) { 1615 offset = mbox->rx_start + msg->next_msgoff; 1616 continue; 1617 } 1618 1619 if (msg->pcifunc & RVU_PFVF_FUNC_MASK) 1620 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", 1621 err, otx2_mbox_id2name(msg->id), 1622 msg->id, rvu_get_pf(msg->pcifunc), 1623 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); 1624 else 1625 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", 1626 err, otx2_mbox_id2name(msg->id), 1627 msg->id, devid); 1628 } 1629 mw->mbox_wrk[devid].num_msgs = 0; 1630 1631 /* Send mbox responses to VF/PF */ 1632 otx2_mbox_msg_send(mbox, devid); 1633 } 1634 1635 static inline void rvu_afpf_mbox_handler(struct work_struct *work) 1636 { 1637 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1638 1639 __rvu_mbox_handler(mwork, TYPE_AFPF); 1640 } 1641 1642 static inline void rvu_afvf_mbox_handler(struct work_struct *work) 1643 { 1644 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1645 1646 __rvu_mbox_handler(mwork, TYPE_AFVF); 1647 } 1648 1649 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type) 1650 { 1651 struct rvu *rvu = mwork->rvu; 1652 struct otx2_mbox_dev *mdev; 1653 struct mbox_hdr *rsp_hdr; 1654 struct mbox_msghdr *msg; 1655 struct mbox_wq_info *mw; 1656 struct otx2_mbox *mbox; 1657 int offset, id, devid; 1658 1659 switch (type) { 1660 case TYPE_AFPF: 1661 mw = &rvu->afpf_wq_info; 1662 break; 1663 case TYPE_AFVF: 1664 mw = &rvu->afvf_wq_info; 1665 break; 1666 default: 1667 return; 1668 } 1669 1670 devid = mwork - mw->mbox_wrk_up; 1671 mbox = &mw->mbox_up; 1672 mdev = &mbox->dev[devid]; 1673 1674 rsp_hdr = mdev->mbase + mbox->rx_start; 1675 if (mw->mbox_wrk_up[devid].up_num_msgs == 0) { 1676 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n"); 1677 return; 1678 } 1679 1680 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 1681 1682 for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) { 1683 msg = mdev->mbase + offset; 1684 1685 if (msg->id >= MBOX_MSG_MAX) { 1686 dev_err(rvu->dev, 1687 "Mbox msg with unknown ID 0x%x\n", msg->id); 1688 goto end; 1689 } 1690 1691 if (msg->sig != OTX2_MBOX_RSP_SIG) { 1692 dev_err(rvu->dev, 1693 "Mbox msg with wrong signature %x, ID 0x%x\n", 1694 msg->sig, msg->id); 1695 goto end; 1696 } 1697 1698 switch (msg->id) { 1699 case MBOX_MSG_CGX_LINK_EVENT: 1700 break; 1701 default: 1702 if (msg->rc) 1703 dev_err(rvu->dev, 1704 "Mbox msg response has err %d, ID 0x%x\n", 1705 msg->rc, msg->id); 1706 break; 1707 } 1708 end: 1709 offset = mbox->rx_start + msg->next_msgoff; 1710 mdev->msgs_acked++; 1711 } 1712 mw->mbox_wrk_up[devid].up_num_msgs = 0; 1713 1714 otx2_mbox_reset(mbox, devid); 1715 } 1716 1717 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work) 1718 { 1719 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1720 1721 __rvu_mbox_up_handler(mwork, TYPE_AFPF); 1722 } 1723 1724 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) 1725 { 1726 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1727 1728 __rvu_mbox_up_handler(mwork, TYPE_AFVF); 1729 } 1730 1731 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, 1732 int type, int num, 1733 void (mbox_handler)(struct work_struct *), 1734 void (mbox_up_handler)(struct work_struct *)) 1735 { 1736 void __iomem *hwbase = NULL, *reg_base; 1737 int err, i, dir, dir_up; 1738 struct rvu_work *mwork; 1739 const char *name; 1740 u64 bar4_addr; 1741 1742 switch (type) { 1743 case TYPE_AFPF: 1744 name = "rvu_afpf_mailbox"; 1745 bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); 1746 dir = MBOX_DIR_AFPF; 1747 dir_up = MBOX_DIR_AFPF_UP; 1748 reg_base = rvu->afreg_base; 1749 break; 1750 case TYPE_AFVF: 1751 name = "rvu_afvf_mailbox"; 1752 bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); 1753 dir = MBOX_DIR_PFVF; 1754 dir_up = MBOX_DIR_PFVF_UP; 1755 reg_base = rvu->pfreg_base; 1756 break; 1757 default: 1758 return -EINVAL; 1759 } 1760 1761 mw->mbox_wq = alloc_workqueue(name, 1762 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1763 num); 1764 if (!mw->mbox_wq) 1765 return -ENOMEM; 1766 1767 mw->mbox_wrk = devm_kcalloc(rvu->dev, num, 1768 sizeof(struct rvu_work), GFP_KERNEL); 1769 if (!mw->mbox_wrk) { 1770 err = -ENOMEM; 1771 goto exit; 1772 } 1773 1774 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num, 1775 sizeof(struct rvu_work), GFP_KERNEL); 1776 if (!mw->mbox_wrk_up) { 1777 err = -ENOMEM; 1778 goto exit; 1779 } 1780 1781 /* Mailbox is a reserved memory (in RAM) region shared between 1782 * RVU devices, shouldn't be mapped as device memory to allow 1783 * unaligned accesses. 1784 */ 1785 hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num); 1786 if (!hwbase) { 1787 dev_err(rvu->dev, "Unable to map mailbox region\n"); 1788 err = -ENOMEM; 1789 goto exit; 1790 } 1791 1792 err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num); 1793 if (err) 1794 goto exit; 1795 1796 err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev, 1797 reg_base, dir_up, num); 1798 if (err) 1799 goto exit; 1800 1801 for (i = 0; i < num; i++) { 1802 mwork = &mw->mbox_wrk[i]; 1803 mwork->rvu = rvu; 1804 INIT_WORK(&mwork->work, mbox_handler); 1805 1806 mwork = &mw->mbox_wrk_up[i]; 1807 mwork->rvu = rvu; 1808 INIT_WORK(&mwork->work, mbox_up_handler); 1809 } 1810 1811 return 0; 1812 exit: 1813 if (hwbase) 1814 iounmap((void __iomem *)hwbase); 1815 destroy_workqueue(mw->mbox_wq); 1816 return err; 1817 } 1818 1819 static void rvu_mbox_destroy(struct mbox_wq_info *mw) 1820 { 1821 if (mw->mbox_wq) { 1822 flush_workqueue(mw->mbox_wq); 1823 destroy_workqueue(mw->mbox_wq); 1824 mw->mbox_wq = NULL; 1825 } 1826 1827 if (mw->mbox.hwbase) 1828 iounmap((void __iomem *)mw->mbox.hwbase); 1829 1830 otx2_mbox_destroy(&mw->mbox); 1831 otx2_mbox_destroy(&mw->mbox_up); 1832 } 1833 1834 static void rvu_queue_work(struct mbox_wq_info *mw, int first, 1835 int mdevs, u64 intr) 1836 { 1837 struct otx2_mbox_dev *mdev; 1838 struct otx2_mbox *mbox; 1839 struct mbox_hdr *hdr; 1840 int i; 1841 1842 for (i = first; i < mdevs; i++) { 1843 /* start from 0 */ 1844 if (!(intr & BIT_ULL(i - first))) 1845 continue; 1846 1847 mbox = &mw->mbox; 1848 mdev = &mbox->dev[i]; 1849 hdr = mdev->mbase + mbox->rx_start; 1850 1851 /*The hdr->num_msgs is set to zero immediately in the interrupt 1852 * handler to ensure that it holds a correct value next time 1853 * when the interrupt handler is called. 1854 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler 1855 * pf>mbox.up_num_msgs holds the data for use in 1856 * pfaf_mbox_up_handler. 1857 */ 1858 1859 if (hdr->num_msgs) { 1860 mw->mbox_wrk[i].num_msgs = hdr->num_msgs; 1861 hdr->num_msgs = 0; 1862 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work); 1863 } 1864 mbox = &mw->mbox_up; 1865 mdev = &mbox->dev[i]; 1866 hdr = mdev->mbase + mbox->rx_start; 1867 if (hdr->num_msgs) { 1868 mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs; 1869 hdr->num_msgs = 0; 1870 queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work); 1871 } 1872 } 1873 } 1874 1875 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) 1876 { 1877 struct rvu *rvu = (struct rvu *)rvu_irq; 1878 int vfs = rvu->vfs; 1879 u64 intr; 1880 1881 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); 1882 /* Clear interrupts */ 1883 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); 1884 1885 /* Sync with mbox memory region */ 1886 rmb(); 1887 1888 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr); 1889 1890 /* Handle VF interrupts */ 1891 if (vfs > 64) { 1892 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1)); 1893 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr); 1894 1895 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); 1896 vfs -= 64; 1897 } 1898 1899 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); 1900 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr); 1901 1902 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr); 1903 1904 return IRQ_HANDLED; 1905 } 1906 1907 static void rvu_enable_mbox_intr(struct rvu *rvu) 1908 { 1909 struct rvu_hwinfo *hw = rvu->hw; 1910 1911 /* Clear spurious irqs, if any */ 1912 rvu_write64(rvu, BLKADDR_RVUM, 1913 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); 1914 1915 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ 1916 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S, 1917 INTR_MASK(hw->total_pfs) & ~1ULL); 1918 } 1919 1920 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) 1921 { 1922 struct rvu_block *block; 1923 int slot, lf, num_lfs; 1924 int err; 1925 1926 block = &rvu->hw->block[blkaddr]; 1927 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), 1928 block->type); 1929 if (!num_lfs) 1930 return; 1931 for (slot = 0; slot < num_lfs; slot++) { 1932 lf = rvu_get_lf(rvu, block, pcifunc, slot); 1933 if (lf < 0) 1934 continue; 1935 1936 /* Cleanup LF and reset it */ 1937 if (block->addr == BLKADDR_NIX0) 1938 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf); 1939 else if (block->addr == BLKADDR_NPA) 1940 rvu_npa_lf_teardown(rvu, pcifunc, lf); 1941 1942 err = rvu_lf_reset(rvu, block, lf); 1943 if (err) { 1944 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", 1945 block->addr, lf); 1946 } 1947 } 1948 } 1949 1950 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) 1951 { 1952 mutex_lock(&rvu->flr_lock); 1953 /* Reset order should reflect inter-block dependencies: 1954 * 1. Reset any packet/work sources (NIX, CPT, TIM) 1955 * 2. Flush and reset SSO/SSOW 1956 * 3. Cleanup pools (NPA) 1957 */ 1958 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0); 1959 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0); 1960 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM); 1961 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); 1962 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); 1963 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); 1964 rvu_detach_rsrcs(rvu, NULL, pcifunc); 1965 mutex_unlock(&rvu->flr_lock); 1966 } 1967 1968 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf) 1969 { 1970 int reg = 0; 1971 1972 /* pcifunc = 0(PF0) | (vf + 1) */ 1973 __rvu_flr_handler(rvu, vf + 1); 1974 1975 if (vf >= 64) { 1976 reg = 1; 1977 vf = vf - 64; 1978 } 1979 1980 /* Signal FLR finish and enable IRQ */ 1981 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); 1982 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); 1983 } 1984 1985 static void rvu_flr_handler(struct work_struct *work) 1986 { 1987 struct rvu_work *flrwork = container_of(work, struct rvu_work, work); 1988 struct rvu *rvu = flrwork->rvu; 1989 u16 pcifunc, numvfs, vf; 1990 u64 cfg; 1991 int pf; 1992 1993 pf = flrwork - rvu->flr_wrk; 1994 if (pf >= rvu->hw->total_pfs) { 1995 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs); 1996 return; 1997 } 1998 1999 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2000 numvfs = (cfg >> 12) & 0xFF; 2001 pcifunc = pf << RVU_PFVF_PF_SHIFT; 2002 2003 for (vf = 0; vf < numvfs; vf++) 2004 __rvu_flr_handler(rvu, (pcifunc | (vf + 1))); 2005 2006 __rvu_flr_handler(rvu, pcifunc); 2007 2008 /* Signal FLR finish */ 2009 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf)); 2010 2011 /* Enable interrupt */ 2012 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf)); 2013 } 2014 2015 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs) 2016 { 2017 int dev, vf, reg = 0; 2018 u64 intr; 2019 2020 if (start_vf >= 64) 2021 reg = 1; 2022 2023 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg)); 2024 if (!intr) 2025 return; 2026 2027 for (vf = 0; vf < numvfs; vf++) { 2028 if (!(intr & BIT_ULL(vf))) 2029 continue; 2030 dev = vf + start_vf + rvu->hw->total_pfs; 2031 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); 2032 /* Clear and disable the interrupt */ 2033 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); 2034 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); 2035 } 2036 } 2037 2038 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq) 2039 { 2040 struct rvu *rvu = (struct rvu *)rvu_irq; 2041 u64 intr; 2042 u8 pf; 2043 2044 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT); 2045 if (!intr) 2046 goto afvf_flr; 2047 2048 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2049 if (intr & (1ULL << pf)) { 2050 /* PF is already dead do only AF related operations */ 2051 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); 2052 /* clear interrupt */ 2053 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, 2054 BIT_ULL(pf)); 2055 /* Disable the interrupt */ 2056 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, 2057 BIT_ULL(pf)); 2058 } 2059 } 2060 2061 afvf_flr: 2062 rvu_afvf_queue_flr_work(rvu, 0, 64); 2063 if (rvu->vfs > 64) 2064 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64); 2065 2066 return IRQ_HANDLED; 2067 } 2068 2069 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr) 2070 { 2071 int vf; 2072 2073 /* Nothing to be done here other than clearing the 2074 * TRPEND bit. 2075 */ 2076 for (vf = 0; vf < 64; vf++) { 2077 if (intr & (1ULL << vf)) { 2078 /* clear the trpend due to ME(master enable) */ 2079 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf)); 2080 /* clear interrupt */ 2081 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf)); 2082 } 2083 } 2084 } 2085 2086 /* Handles ME interrupts from VFs of AF */ 2087 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq) 2088 { 2089 struct rvu *rvu = (struct rvu *)rvu_irq; 2090 int vfset; 2091 u64 intr; 2092 2093 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); 2094 2095 for (vfset = 0; vfset <= 1; vfset++) { 2096 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset)); 2097 if (intr) 2098 rvu_me_handle_vfset(rvu, vfset, intr); 2099 } 2100 2101 return IRQ_HANDLED; 2102 } 2103 2104 /* Handles ME interrupts from PFs */ 2105 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq) 2106 { 2107 struct rvu *rvu = (struct rvu *)rvu_irq; 2108 u64 intr; 2109 u8 pf; 2110 2111 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); 2112 2113 /* Nothing to be done here other than clearing the 2114 * TRPEND bit. 2115 */ 2116 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2117 if (intr & (1ULL << pf)) { 2118 /* clear the trpend due to ME(master enable) */ 2119 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, 2120 BIT_ULL(pf)); 2121 /* clear interrupt */ 2122 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT, 2123 BIT_ULL(pf)); 2124 } 2125 } 2126 2127 return IRQ_HANDLED; 2128 } 2129 2130 static void rvu_unregister_interrupts(struct rvu *rvu) 2131 { 2132 int irq; 2133 2134 /* Disable the Mbox interrupt */ 2135 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, 2136 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2137 2138 /* Disable the PF FLR interrupt */ 2139 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, 2140 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2141 2142 /* Disable the PF ME interrupt */ 2143 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C, 2144 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2145 2146 for (irq = 0; irq < rvu->num_vec; irq++) { 2147 if (rvu->irq_allocated[irq]) 2148 free_irq(pci_irq_vector(rvu->pdev, irq), rvu); 2149 } 2150 2151 pci_free_irq_vectors(rvu->pdev); 2152 rvu->num_vec = 0; 2153 } 2154 2155 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu) 2156 { 2157 struct rvu_pfvf *pfvf = &rvu->pf[0]; 2158 int offset; 2159 2160 pfvf = &rvu->pf[0]; 2161 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; 2162 2163 /* Make sure there are enough MSIX vectors configured so that 2164 * VF interrupts can be handled. Offset equal to zero means 2165 * that PF vectors are not configured and overlapping AF vectors. 2166 */ 2167 return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) && 2168 offset; 2169 } 2170 2171 static int rvu_register_interrupts(struct rvu *rvu) 2172 { 2173 int ret, offset, pf_vec_start; 2174 2175 rvu->num_vec = pci_msix_vec_count(rvu->pdev); 2176 2177 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec, 2178 NAME_SIZE, GFP_KERNEL); 2179 if (!rvu->irq_name) 2180 return -ENOMEM; 2181 2182 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec, 2183 sizeof(bool), GFP_KERNEL); 2184 if (!rvu->irq_allocated) 2185 return -ENOMEM; 2186 2187 /* Enable MSI-X */ 2188 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec, 2189 rvu->num_vec, PCI_IRQ_MSIX); 2190 if (ret < 0) { 2191 dev_err(rvu->dev, 2192 "RVUAF: Request for %d msix vectors failed, ret %d\n", 2193 rvu->num_vec, ret); 2194 return ret; 2195 } 2196 2197 /* Register mailbox interrupt handler */ 2198 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); 2199 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), 2200 rvu_mbox_intr_handler, 0, 2201 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); 2202 if (ret) { 2203 dev_err(rvu->dev, 2204 "RVUAF: IRQ registration failed for mbox irq\n"); 2205 goto fail; 2206 } 2207 2208 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; 2209 2210 /* Enable mailbox interrupts from all PFs */ 2211 rvu_enable_mbox_intr(rvu); 2212 2213 /* Register FLR interrupt handler */ 2214 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], 2215 "RVUAF FLR"); 2216 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR), 2217 rvu_flr_intr_handler, 0, 2218 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], 2219 rvu); 2220 if (ret) { 2221 dev_err(rvu->dev, 2222 "RVUAF: IRQ registration failed for FLR\n"); 2223 goto fail; 2224 } 2225 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true; 2226 2227 /* Enable FLR interrupt for all PFs*/ 2228 rvu_write64(rvu, BLKADDR_RVUM, 2229 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs)); 2230 2231 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, 2232 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2233 2234 /* Register ME interrupt handler */ 2235 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], 2236 "RVUAF ME"); 2237 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME), 2238 rvu_me_pf_intr_handler, 0, 2239 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], 2240 rvu); 2241 if (ret) { 2242 dev_err(rvu->dev, 2243 "RVUAF: IRQ registration failed for ME\n"); 2244 } 2245 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true; 2246 2247 /* Clear TRPEND bit for all PF */ 2248 rvu_write64(rvu, BLKADDR_RVUM, 2249 RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs)); 2250 /* Enable ME interrupt for all PFs*/ 2251 rvu_write64(rvu, BLKADDR_RVUM, 2252 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs)); 2253 2254 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S, 2255 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2256 2257 if (!rvu_afvf_msix_vectors_num_ok(rvu)) 2258 return 0; 2259 2260 /* Get PF MSIX vectors offset. */ 2261 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM, 2262 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; 2263 2264 /* Register MBOX0 interrupt. */ 2265 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; 2266 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); 2267 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2268 rvu_mbox_intr_handler, 0, 2269 &rvu->irq_name[offset * NAME_SIZE], 2270 rvu); 2271 if (ret) 2272 dev_err(rvu->dev, 2273 "RVUAF: IRQ registration failed for Mbox0\n"); 2274 2275 rvu->irq_allocated[offset] = true; 2276 2277 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so 2278 * simply increment current offset by 1. 2279 */ 2280 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; 2281 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); 2282 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2283 rvu_mbox_intr_handler, 0, 2284 &rvu->irq_name[offset * NAME_SIZE], 2285 rvu); 2286 if (ret) 2287 dev_err(rvu->dev, 2288 "RVUAF: IRQ registration failed for Mbox1\n"); 2289 2290 rvu->irq_allocated[offset] = true; 2291 2292 /* Register FLR interrupt handler for AF's VFs */ 2293 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0; 2294 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0"); 2295 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2296 rvu_flr_intr_handler, 0, 2297 &rvu->irq_name[offset * NAME_SIZE], rvu); 2298 if (ret) { 2299 dev_err(rvu->dev, 2300 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n"); 2301 goto fail; 2302 } 2303 rvu->irq_allocated[offset] = true; 2304 2305 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1; 2306 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1"); 2307 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2308 rvu_flr_intr_handler, 0, 2309 &rvu->irq_name[offset * NAME_SIZE], rvu); 2310 if (ret) { 2311 dev_err(rvu->dev, 2312 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n"); 2313 goto fail; 2314 } 2315 rvu->irq_allocated[offset] = true; 2316 2317 /* Register ME interrupt handler for AF's VFs */ 2318 offset = pf_vec_start + RVU_PF_INT_VEC_VFME0; 2319 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0"); 2320 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2321 rvu_me_vf_intr_handler, 0, 2322 &rvu->irq_name[offset * NAME_SIZE], rvu); 2323 if (ret) { 2324 dev_err(rvu->dev, 2325 "RVUAF: IRQ registration failed for RVUAFVF ME0\n"); 2326 goto fail; 2327 } 2328 rvu->irq_allocated[offset] = true; 2329 2330 offset = pf_vec_start + RVU_PF_INT_VEC_VFME1; 2331 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1"); 2332 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2333 rvu_me_vf_intr_handler, 0, 2334 &rvu->irq_name[offset * NAME_SIZE], rvu); 2335 if (ret) { 2336 dev_err(rvu->dev, 2337 "RVUAF: IRQ registration failed for RVUAFVF ME1\n"); 2338 goto fail; 2339 } 2340 rvu->irq_allocated[offset] = true; 2341 return 0; 2342 2343 fail: 2344 rvu_unregister_interrupts(rvu); 2345 return ret; 2346 } 2347 2348 static void rvu_flr_wq_destroy(struct rvu *rvu) 2349 { 2350 if (rvu->flr_wq) { 2351 flush_workqueue(rvu->flr_wq); 2352 destroy_workqueue(rvu->flr_wq); 2353 rvu->flr_wq = NULL; 2354 } 2355 } 2356 2357 static int rvu_flr_init(struct rvu *rvu) 2358 { 2359 int dev, num_devs; 2360 u64 cfg; 2361 int pf; 2362 2363 /* Enable FLR for all PFs*/ 2364 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2365 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2366 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf), 2367 cfg | BIT_ULL(22)); 2368 } 2369 2370 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr", 2371 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 2372 1); 2373 if (!rvu->flr_wq) 2374 return -ENOMEM; 2375 2376 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev); 2377 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs, 2378 sizeof(struct rvu_work), GFP_KERNEL); 2379 if (!rvu->flr_wrk) { 2380 destroy_workqueue(rvu->flr_wq); 2381 return -ENOMEM; 2382 } 2383 2384 for (dev = 0; dev < num_devs; dev++) { 2385 rvu->flr_wrk[dev].rvu = rvu; 2386 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler); 2387 } 2388 2389 mutex_init(&rvu->flr_lock); 2390 2391 return 0; 2392 } 2393 2394 static void rvu_disable_afvf_intr(struct rvu *rvu) 2395 { 2396 int vfs = rvu->vfs; 2397 2398 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs)); 2399 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); 2400 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); 2401 if (vfs <= 64) 2402 return; 2403 2404 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), 2405 INTR_MASK(vfs - 64)); 2406 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); 2407 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); 2408 } 2409 2410 static void rvu_enable_afvf_intr(struct rvu *rvu) 2411 { 2412 int vfs = rvu->vfs; 2413 2414 /* Clear any pending interrupts and enable AF VF interrupts for 2415 * the first 64 VFs. 2416 */ 2417 /* Mbox */ 2418 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs)); 2419 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs)); 2420 2421 /* FLR */ 2422 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); 2423 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); 2424 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs)); 2425 2426 /* Same for remaining VFs, if any. */ 2427 if (vfs <= 64) 2428 return; 2429 2430 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64)); 2431 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), 2432 INTR_MASK(vfs - 64)); 2433 2434 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64)); 2435 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); 2436 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); 2437 } 2438 2439 #define PCI_DEVID_OCTEONTX2_LBK 0xA061 2440 2441 static int lbk_get_num_chans(void) 2442 { 2443 struct pci_dev *pdev; 2444 void __iomem *base; 2445 int ret = -EIO; 2446 2447 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK, 2448 NULL); 2449 if (!pdev) 2450 goto err; 2451 2452 base = pci_ioremap_bar(pdev, 0); 2453 if (!base) 2454 goto err_put; 2455 2456 /* Read number of available LBK channels from LBK(0)_CONST register. */ 2457 ret = (readq(base + 0x10) >> 32) & 0xffff; 2458 iounmap(base); 2459 err_put: 2460 pci_dev_put(pdev); 2461 err: 2462 return ret; 2463 } 2464 2465 static int rvu_enable_sriov(struct rvu *rvu) 2466 { 2467 struct pci_dev *pdev = rvu->pdev; 2468 int err, chans, vfs; 2469 2470 if (!rvu_afvf_msix_vectors_num_ok(rvu)) { 2471 dev_warn(&pdev->dev, 2472 "Skipping SRIOV enablement since not enough IRQs are available\n"); 2473 return 0; 2474 } 2475 2476 chans = lbk_get_num_chans(); 2477 if (chans < 0) 2478 return chans; 2479 2480 vfs = pci_sriov_get_totalvfs(pdev); 2481 2482 /* Limit VFs in case we have more VFs than LBK channels available. */ 2483 if (vfs > chans) 2484 vfs = chans; 2485 2486 if (!vfs) 2487 return 0; 2488 2489 /* Save VFs number for reference in VF interrupts handlers. 2490 * Since interrupts might start arriving during SRIOV enablement 2491 * ordinary API cannot be used to get number of enabled VFs. 2492 */ 2493 rvu->vfs = vfs; 2494 2495 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs, 2496 rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler); 2497 if (err) 2498 return err; 2499 2500 rvu_enable_afvf_intr(rvu); 2501 /* Make sure IRQs are enabled before SRIOV. */ 2502 mb(); 2503 2504 err = pci_enable_sriov(pdev, vfs); 2505 if (err) { 2506 rvu_disable_afvf_intr(rvu); 2507 rvu_mbox_destroy(&rvu->afvf_wq_info); 2508 return err; 2509 } 2510 2511 return 0; 2512 } 2513 2514 static void rvu_disable_sriov(struct rvu *rvu) 2515 { 2516 rvu_disable_afvf_intr(rvu); 2517 rvu_mbox_destroy(&rvu->afvf_wq_info); 2518 pci_disable_sriov(rvu->pdev); 2519 } 2520 2521 static void rvu_update_module_params(struct rvu *rvu) 2522 { 2523 const char *default_pfl_name = "default"; 2524 2525 strscpy(rvu->mkex_pfl_name, 2526 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN); 2527 } 2528 2529 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2530 { 2531 struct device *dev = &pdev->dev; 2532 struct rvu *rvu; 2533 int err; 2534 2535 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL); 2536 if (!rvu) 2537 return -ENOMEM; 2538 2539 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL); 2540 if (!rvu->hw) { 2541 devm_kfree(dev, rvu); 2542 return -ENOMEM; 2543 } 2544 2545 pci_set_drvdata(pdev, rvu); 2546 rvu->pdev = pdev; 2547 rvu->dev = &pdev->dev; 2548 2549 err = pci_enable_device(pdev); 2550 if (err) { 2551 dev_err(dev, "Failed to enable PCI device\n"); 2552 goto err_freemem; 2553 } 2554 2555 err = pci_request_regions(pdev, DRV_NAME); 2556 if (err) { 2557 dev_err(dev, "PCI request regions failed 0x%x\n", err); 2558 goto err_disable_device; 2559 } 2560 2561 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 2562 if (err) { 2563 dev_err(dev, "DMA mask config failed, abort\n"); 2564 goto err_release_regions; 2565 } 2566 2567 pci_set_master(pdev); 2568 2569 rvu->ptp = ptp_get(); 2570 if (IS_ERR(rvu->ptp)) { 2571 err = PTR_ERR(rvu->ptp); 2572 if (err == -EPROBE_DEFER) 2573 goto err_release_regions; 2574 rvu->ptp = NULL; 2575 } 2576 2577 /* Map Admin function CSRs */ 2578 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0); 2579 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); 2580 if (!rvu->afreg_base || !rvu->pfreg_base) { 2581 dev_err(dev, "Unable to map admin function CSRs, aborting\n"); 2582 err = -ENOMEM; 2583 goto err_put_ptp; 2584 } 2585 2586 /* Store module params in rvu structure */ 2587 rvu_update_module_params(rvu); 2588 2589 /* Check which blocks the HW supports */ 2590 rvu_check_block_implemented(rvu); 2591 2592 rvu_reset_all_blocks(rvu); 2593 2594 rvu_setup_hw_capabilities(rvu); 2595 2596 err = rvu_setup_hw_resources(rvu); 2597 if (err) 2598 goto err_put_ptp; 2599 2600 /* Init mailbox btw AF and PFs */ 2601 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF, 2602 rvu->hw->total_pfs, rvu_afpf_mbox_handler, 2603 rvu_afpf_mbox_up_handler); 2604 if (err) 2605 goto err_hwsetup; 2606 2607 err = rvu_flr_init(rvu); 2608 if (err) 2609 goto err_mbox; 2610 2611 err = rvu_register_interrupts(rvu); 2612 if (err) 2613 goto err_flr; 2614 2615 rvu_setup_rvum_blk_revid(rvu); 2616 2617 /* Enable AF's VFs (if any) */ 2618 err = rvu_enable_sriov(rvu); 2619 if (err) 2620 goto err_irq; 2621 2622 /* Initialize debugfs */ 2623 rvu_dbg_init(rvu); 2624 2625 return 0; 2626 err_irq: 2627 rvu_unregister_interrupts(rvu); 2628 err_flr: 2629 rvu_flr_wq_destroy(rvu); 2630 err_mbox: 2631 rvu_mbox_destroy(&rvu->afpf_wq_info); 2632 err_hwsetup: 2633 rvu_cgx_exit(rvu); 2634 rvu_fwdata_exit(rvu); 2635 rvu_reset_all_blocks(rvu); 2636 rvu_free_hw_resources(rvu); 2637 rvu_clear_rvum_blk_revid(rvu); 2638 err_put_ptp: 2639 ptp_put(rvu->ptp); 2640 err_release_regions: 2641 pci_release_regions(pdev); 2642 err_disable_device: 2643 pci_disable_device(pdev); 2644 err_freemem: 2645 pci_set_drvdata(pdev, NULL); 2646 devm_kfree(&pdev->dev, rvu->hw); 2647 devm_kfree(dev, rvu); 2648 return err; 2649 } 2650 2651 static void rvu_remove(struct pci_dev *pdev) 2652 { 2653 struct rvu *rvu = pci_get_drvdata(pdev); 2654 2655 rvu_dbg_exit(rvu); 2656 rvu_unregister_interrupts(rvu); 2657 rvu_flr_wq_destroy(rvu); 2658 rvu_cgx_exit(rvu); 2659 rvu_fwdata_exit(rvu); 2660 rvu_mbox_destroy(&rvu->afpf_wq_info); 2661 rvu_disable_sriov(rvu); 2662 rvu_reset_all_blocks(rvu); 2663 rvu_free_hw_resources(rvu); 2664 rvu_clear_rvum_blk_revid(rvu); 2665 ptp_put(rvu->ptp); 2666 pci_release_regions(pdev); 2667 pci_disable_device(pdev); 2668 pci_set_drvdata(pdev, NULL); 2669 2670 devm_kfree(&pdev->dev, rvu->hw); 2671 devm_kfree(&pdev->dev, rvu); 2672 } 2673 2674 static struct pci_driver rvu_driver = { 2675 .name = DRV_NAME, 2676 .id_table = rvu_id_table, 2677 .probe = rvu_probe, 2678 .remove = rvu_remove, 2679 }; 2680 2681 static int __init rvu_init_module(void) 2682 { 2683 int err; 2684 2685 pr_info("%s: %s\n", DRV_NAME, DRV_STRING); 2686 2687 err = pci_register_driver(&cgx_driver); 2688 if (err < 0) 2689 return err; 2690 2691 err = pci_register_driver(&ptp_driver); 2692 if (err < 0) 2693 goto ptp_err; 2694 2695 err = pci_register_driver(&rvu_driver); 2696 if (err < 0) 2697 goto rvu_err; 2698 2699 return 0; 2700 rvu_err: 2701 pci_unregister_driver(&ptp_driver); 2702 ptp_err: 2703 pci_unregister_driver(&cgx_driver); 2704 2705 return err; 2706 } 2707 2708 static void __exit rvu_cleanup_module(void) 2709 { 2710 pci_unregister_driver(&rvu_driver); 2711 pci_unregister_driver(&ptp_driver); 2712 pci_unregister_driver(&cgx_driver); 2713 } 2714 2715 module_init(rvu_init_module); 2716 module_exit(rvu_cleanup_module); 2717