1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/interrupt.h> 13 #include <linux/delay.h> 14 #include <linux/irq.h> 15 #include <linux/pci.h> 16 #include <linux/sysfs.h> 17 18 #include "cgx.h" 19 #include "rvu.h" 20 #include "rvu_reg.h" 21 22 #define DRV_NAME "octeontx2-af" 23 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" 24 #define DRV_VERSION "1.0" 25 26 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); 27 28 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 29 struct rvu_block *block, int lf); 30 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 31 struct rvu_block *block, int lf); 32 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc); 33 34 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, 35 int type, int num, 36 void (mbox_handler)(struct work_struct *), 37 void (mbox_up_handler)(struct work_struct *)); 38 enum { 39 TYPE_AFVF, 40 TYPE_AFPF, 41 }; 42 43 /* Supported devices */ 44 static const struct pci_device_id rvu_id_table[] = { 45 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) }, 46 { 0, } /* end of table */ 47 }; 48 49 MODULE_AUTHOR("Marvell International Ltd."); 50 MODULE_DESCRIPTION(DRV_STRING); 51 MODULE_LICENSE("GPL v2"); 52 MODULE_VERSION(DRV_VERSION); 53 MODULE_DEVICE_TABLE(pci, rvu_id_table); 54 55 static char *mkex_profile; /* MKEX profile name */ 56 module_param(mkex_profile, charp, 0000); 57 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string"); 58 59 static void rvu_setup_hw_capabilities(struct rvu *rvu) 60 { 61 struct rvu_hwinfo *hw = rvu->hw; 62 63 hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1; 64 hw->cap.nix_fixed_txschq_mapping = false; 65 hw->cap.nix_shaping = true; 66 hw->cap.nix_tx_link_bp = true; 67 hw->cap.nix_rx_multicast = true; 68 69 if (is_rvu_96xx_B0(rvu)) { 70 hw->cap.nix_fixed_txschq_mapping = true; 71 hw->cap.nix_txsch_per_cgx_lmac = 4; 72 hw->cap.nix_txsch_per_lbk_lmac = 132; 73 hw->cap.nix_txsch_per_sdp_lmac = 76; 74 hw->cap.nix_shaping = false; 75 hw->cap.nix_tx_link_bp = false; 76 if (is_rvu_96xx_A0(rvu)) 77 hw->cap.nix_rx_multicast = false; 78 } 79 } 80 81 /* Poll a RVU block's register 'offset', for a 'zero' 82 * or 'nonzero' at bits specified by 'mask' 83 */ 84 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) 85 { 86 unsigned long timeout = jiffies + usecs_to_jiffies(10000); 87 void __iomem *reg; 88 u64 reg_val; 89 90 reg = rvu->afreg_base + ((block << 28) | offset); 91 while (time_before(jiffies, timeout)) { 92 reg_val = readq(reg); 93 if (zero && !(reg_val & mask)) 94 return 0; 95 if (!zero && (reg_val & mask)) 96 return 0; 97 usleep_range(1, 5); 98 } 99 return -EBUSY; 100 } 101 102 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc) 103 { 104 int id; 105 106 if (!rsrc->bmap) 107 return -EINVAL; 108 109 id = find_first_zero_bit(rsrc->bmap, rsrc->max); 110 if (id >= rsrc->max) 111 return -ENOSPC; 112 113 __set_bit(id, rsrc->bmap); 114 115 return id; 116 } 117 118 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc) 119 { 120 int start; 121 122 if (!rsrc->bmap) 123 return -EINVAL; 124 125 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); 126 if (start >= rsrc->max) 127 return -ENOSPC; 128 129 bitmap_set(rsrc->bmap, start, nrsrc); 130 return start; 131 } 132 133 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) 134 { 135 if (!rsrc->bmap) 136 return; 137 if (start >= rsrc->max) 138 return; 139 140 bitmap_clear(rsrc->bmap, start, nrsrc); 141 } 142 143 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc) 144 { 145 int start; 146 147 if (!rsrc->bmap) 148 return false; 149 150 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); 151 if (start >= rsrc->max) 152 return false; 153 154 return true; 155 } 156 157 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id) 158 { 159 if (!rsrc->bmap) 160 return; 161 162 __clear_bit(id, rsrc->bmap); 163 } 164 165 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc) 166 { 167 int used; 168 169 if (!rsrc->bmap) 170 return 0; 171 172 used = bitmap_weight(rsrc->bmap, rsrc->max); 173 return (rsrc->max - used); 174 } 175 176 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc) 177 { 178 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max), 179 sizeof(long), GFP_KERNEL); 180 if (!rsrc->bmap) 181 return -ENOMEM; 182 return 0; 183 } 184 185 /* Get block LF's HW index from a PF_FUNC's block slot number */ 186 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) 187 { 188 u16 match = 0; 189 int lf; 190 191 mutex_lock(&rvu->rsrc_lock); 192 for (lf = 0; lf < block->lf.max; lf++) { 193 if (block->fn_map[lf] == pcifunc) { 194 if (slot == match) { 195 mutex_unlock(&rvu->rsrc_lock); 196 return lf; 197 } 198 match++; 199 } 200 } 201 mutex_unlock(&rvu->rsrc_lock); 202 return -ENODEV; 203 } 204 205 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E. 206 * Some silicon variants of OcteonTX2 supports 207 * multiple blocks of same type. 208 * 209 * @pcifunc has to be zero when no LF is yet attached. 210 */ 211 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) 212 { 213 int devnum, blkaddr = -ENODEV; 214 u64 cfg, reg; 215 bool is_pf; 216 217 switch (blktype) { 218 case BLKTYPE_NPC: 219 blkaddr = BLKADDR_NPC; 220 goto exit; 221 case BLKTYPE_NPA: 222 blkaddr = BLKADDR_NPA; 223 goto exit; 224 case BLKTYPE_NIX: 225 /* For now assume NIX0 */ 226 if (!pcifunc) { 227 blkaddr = BLKADDR_NIX0; 228 goto exit; 229 } 230 break; 231 case BLKTYPE_SSO: 232 blkaddr = BLKADDR_SSO; 233 goto exit; 234 case BLKTYPE_SSOW: 235 blkaddr = BLKADDR_SSOW; 236 goto exit; 237 case BLKTYPE_TIM: 238 blkaddr = BLKADDR_TIM; 239 goto exit; 240 case BLKTYPE_CPT: 241 /* For now assume CPT0 */ 242 if (!pcifunc) { 243 blkaddr = BLKADDR_CPT0; 244 goto exit; 245 } 246 break; 247 } 248 249 /* Check if this is a RVU PF or VF */ 250 if (pcifunc & RVU_PFVF_FUNC_MASK) { 251 is_pf = false; 252 devnum = rvu_get_hwvf(rvu, pcifunc); 253 } else { 254 is_pf = true; 255 devnum = rvu_get_pf(pcifunc); 256 } 257 258 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */ 259 if (blktype == BLKTYPE_NIX) { 260 reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG; 261 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 262 if (cfg) 263 blkaddr = BLKADDR_NIX0; 264 } 265 266 /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */ 267 if (blktype == BLKTYPE_CPT) { 268 reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG; 269 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 270 if (cfg) 271 blkaddr = BLKADDR_CPT0; 272 } 273 274 exit: 275 if (is_block_implemented(rvu->hw, blkaddr)) 276 return blkaddr; 277 return -ENODEV; 278 } 279 280 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, 281 struct rvu_block *block, u16 pcifunc, 282 u16 lf, bool attach) 283 { 284 int devnum, num_lfs = 0; 285 bool is_pf; 286 u64 reg; 287 288 if (lf >= block->lf.max) { 289 dev_err(&rvu->pdev->dev, 290 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n", 291 __func__, lf, block->name, block->lf.max); 292 return; 293 } 294 295 /* Check if this is for a RVU PF or VF */ 296 if (pcifunc & RVU_PFVF_FUNC_MASK) { 297 is_pf = false; 298 devnum = rvu_get_hwvf(rvu, pcifunc); 299 } else { 300 is_pf = true; 301 devnum = rvu_get_pf(pcifunc); 302 } 303 304 block->fn_map[lf] = attach ? pcifunc : 0; 305 306 switch (block->type) { 307 case BLKTYPE_NPA: 308 pfvf->npalf = attach ? true : false; 309 num_lfs = pfvf->npalf; 310 break; 311 case BLKTYPE_NIX: 312 pfvf->nixlf = attach ? true : false; 313 num_lfs = pfvf->nixlf; 314 break; 315 case BLKTYPE_SSO: 316 attach ? pfvf->sso++ : pfvf->sso--; 317 num_lfs = pfvf->sso; 318 break; 319 case BLKTYPE_SSOW: 320 attach ? pfvf->ssow++ : pfvf->ssow--; 321 num_lfs = pfvf->ssow; 322 break; 323 case BLKTYPE_TIM: 324 attach ? pfvf->timlfs++ : pfvf->timlfs--; 325 num_lfs = pfvf->timlfs; 326 break; 327 case BLKTYPE_CPT: 328 attach ? pfvf->cptlfs++ : pfvf->cptlfs--; 329 num_lfs = pfvf->cptlfs; 330 break; 331 } 332 333 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg; 334 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); 335 } 336 337 inline int rvu_get_pf(u16 pcifunc) 338 { 339 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 340 } 341 342 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) 343 { 344 u64 cfg; 345 346 /* Get numVFs attached to this PF and first HWVF */ 347 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 348 *numvfs = (cfg >> 12) & 0xFF; 349 *hwvf = cfg & 0xFFF; 350 } 351 352 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) 353 { 354 int pf, func; 355 u64 cfg; 356 357 pf = rvu_get_pf(pcifunc); 358 func = pcifunc & RVU_PFVF_FUNC_MASK; 359 360 /* Get first HWVF attached to this PF */ 361 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 362 363 return ((cfg & 0xFFF) + func - 1); 364 } 365 366 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) 367 { 368 /* Check if it is a PF or VF */ 369 if (pcifunc & RVU_PFVF_FUNC_MASK) 370 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; 371 else 372 return &rvu->pf[rvu_get_pf(pcifunc)]; 373 } 374 375 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) 376 { 377 int pf, vf, nvfs; 378 u64 cfg; 379 380 pf = rvu_get_pf(pcifunc); 381 if (pf >= rvu->hw->total_pfs) 382 return false; 383 384 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 385 return true; 386 387 /* Check if VF is within number of VFs attached to this PF */ 388 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 389 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 390 nvfs = (cfg >> 12) & 0xFF; 391 if (vf >= nvfs) 392 return false; 393 394 return true; 395 } 396 397 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr) 398 { 399 struct rvu_block *block; 400 401 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT) 402 return false; 403 404 block = &hw->block[blkaddr]; 405 return block->implemented; 406 } 407 408 static void rvu_check_block_implemented(struct rvu *rvu) 409 { 410 struct rvu_hwinfo *hw = rvu->hw; 411 struct rvu_block *block; 412 int blkid; 413 u64 cfg; 414 415 /* For each block check if 'implemented' bit is set */ 416 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 417 block = &hw->block[blkid]; 418 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid)); 419 if (cfg & BIT_ULL(11)) 420 block->implemented = true; 421 } 422 } 423 424 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf) 425 { 426 int err; 427 428 if (!block->implemented) 429 return 0; 430 431 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12)); 432 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12), 433 true); 434 return err; 435 } 436 437 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) 438 { 439 struct rvu_block *block = &rvu->hw->block[blkaddr]; 440 441 if (!block->implemented) 442 return; 443 444 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); 445 rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); 446 } 447 448 static void rvu_reset_all_blocks(struct rvu *rvu) 449 { 450 /* Do a HW reset of all RVU blocks */ 451 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST); 452 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST); 453 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST); 454 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); 455 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); 456 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); 457 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST); 458 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST); 459 rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST); 460 } 461 462 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) 463 { 464 struct rvu_pfvf *pfvf; 465 u64 cfg; 466 int lf; 467 468 for (lf = 0; lf < block->lf.max; lf++) { 469 cfg = rvu_read64(rvu, block->addr, 470 block->lfcfg_reg | (lf << block->lfshift)); 471 if (!(cfg & BIT_ULL(63))) 472 continue; 473 474 /* Set this resource as being used */ 475 __set_bit(lf, block->lf.bmap); 476 477 /* Get, to whom this LF is attached */ 478 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF); 479 rvu_update_rsrc_map(rvu, pfvf, block, 480 (cfg >> 8) & 0xFFFF, lf, true); 481 482 /* Set start MSIX vector for this LF within this PF/VF */ 483 rvu_set_msix_offset(rvu, pfvf, block, lf); 484 } 485 } 486 487 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf) 488 { 489 int min_vecs; 490 491 if (!vf) 492 goto check_pf; 493 494 if (!nvecs) { 495 dev_warn(rvu->dev, 496 "PF%d:VF%d is configured with zero msix vectors, %d\n", 497 pf, vf - 1, nvecs); 498 } 499 return; 500 501 check_pf: 502 if (pf == 0) 503 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT; 504 else 505 min_vecs = RVU_PF_INT_VEC_CNT; 506 507 if (!(nvecs < min_vecs)) 508 return; 509 dev_warn(rvu->dev, 510 "PF%d is configured with too few vectors, %d, min is %d\n", 511 pf, nvecs, min_vecs); 512 } 513 514 static int rvu_setup_msix_resources(struct rvu *rvu) 515 { 516 struct rvu_hwinfo *hw = rvu->hw; 517 int pf, vf, numvfs, hwvf, err; 518 int nvecs, offset, max_msix; 519 struct rvu_pfvf *pfvf; 520 u64 cfg, phy_addr; 521 dma_addr_t iova; 522 523 for (pf = 0; pf < hw->total_pfs; pf++) { 524 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 525 /* If PF is not enabled, nothing to do */ 526 if (!((cfg >> 20) & 0x01)) 527 continue; 528 529 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 530 531 pfvf = &rvu->pf[pf]; 532 /* Get num of MSIX vectors attached to this PF */ 533 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf)); 534 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1; 535 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0); 536 537 /* Alloc msix bitmap for this PF */ 538 err = rvu_alloc_bitmap(&pfvf->msix); 539 if (err) 540 return err; 541 542 /* Allocate memory for MSIX vector to RVU block LF mapping */ 543 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, 544 sizeof(u16), GFP_KERNEL); 545 if (!pfvf->msix_lfmap) 546 return -ENOMEM; 547 548 /* For PF0 (AF) firmware will set msix vector offsets for 549 * AF, block AF and PF0_INT vectors, so jump to VFs. 550 */ 551 if (!pf) 552 goto setup_vfmsix; 553 554 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors. 555 * These are allocated on driver init and never freed, 556 * so no need to set 'msix_lfmap' for these. 557 */ 558 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf)); 559 nvecs = (cfg >> 12) & 0xFF; 560 cfg &= ~0x7FFULL; 561 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 562 rvu_write64(rvu, BLKADDR_RVUM, 563 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset); 564 setup_vfmsix: 565 /* Alloc msix bitmap for VFs */ 566 for (vf = 0; vf < numvfs; vf++) { 567 pfvf = &rvu->hwvf[hwvf + vf]; 568 /* Get num of MSIX vectors attached to this VF */ 569 cfg = rvu_read64(rvu, BLKADDR_RVUM, 570 RVU_PRIV_PFX_MSIX_CFG(pf)); 571 pfvf->msix.max = (cfg & 0xFFF) + 1; 572 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1); 573 574 /* Alloc msix bitmap for this VF */ 575 err = rvu_alloc_bitmap(&pfvf->msix); 576 if (err) 577 return err; 578 579 pfvf->msix_lfmap = 580 devm_kcalloc(rvu->dev, pfvf->msix.max, 581 sizeof(u16), GFP_KERNEL); 582 if (!pfvf->msix_lfmap) 583 return -ENOMEM; 584 585 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors. 586 * These are allocated on driver init and never freed, 587 * so no need to set 'msix_lfmap' for these. 588 */ 589 cfg = rvu_read64(rvu, BLKADDR_RVUM, 590 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf)); 591 nvecs = (cfg >> 12) & 0xFF; 592 cfg &= ~0x7FFULL; 593 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 594 rvu_write64(rvu, BLKADDR_RVUM, 595 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf), 596 cfg | offset); 597 } 598 } 599 600 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence 601 * create a IOMMU mapping for the physcial address configured by 602 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA. 603 */ 604 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 605 max_msix = cfg & 0xFFFFF; 606 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE); 607 iova = dma_map_resource(rvu->dev, phy_addr, 608 max_msix * PCI_MSIX_ENTRY_SIZE, 609 DMA_BIDIRECTIONAL, 0); 610 611 if (dma_mapping_error(rvu->dev, iova)) 612 return -ENOMEM; 613 614 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova); 615 rvu->msix_base_iova = iova; 616 617 return 0; 618 } 619 620 static void rvu_free_hw_resources(struct rvu *rvu) 621 { 622 struct rvu_hwinfo *hw = rvu->hw; 623 struct rvu_block *block; 624 struct rvu_pfvf *pfvf; 625 int id, max_msix; 626 u64 cfg; 627 628 rvu_npa_freemem(rvu); 629 rvu_npc_freemem(rvu); 630 rvu_nix_freemem(rvu); 631 632 /* Free block LF bitmaps */ 633 for (id = 0; id < BLK_COUNT; id++) { 634 block = &hw->block[id]; 635 kfree(block->lf.bmap); 636 } 637 638 /* Free MSIX bitmaps */ 639 for (id = 0; id < hw->total_pfs; id++) { 640 pfvf = &rvu->pf[id]; 641 kfree(pfvf->msix.bmap); 642 } 643 644 for (id = 0; id < hw->total_vfs; id++) { 645 pfvf = &rvu->hwvf[id]; 646 kfree(pfvf->msix.bmap); 647 } 648 649 /* Unmap MSIX vector base IOVA mapping */ 650 if (!rvu->msix_base_iova) 651 return; 652 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 653 max_msix = cfg & 0xFFFFF; 654 dma_unmap_resource(rvu->dev, rvu->msix_base_iova, 655 max_msix * PCI_MSIX_ENTRY_SIZE, 656 DMA_BIDIRECTIONAL, 0); 657 658 mutex_destroy(&rvu->rsrc_lock); 659 } 660 661 static int rvu_setup_hw_resources(struct rvu *rvu) 662 { 663 struct rvu_hwinfo *hw = rvu->hw; 664 struct rvu_block *block; 665 int blkid, err; 666 u64 cfg; 667 668 /* Get HW supported max RVU PF & VF count */ 669 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 670 hw->total_pfs = (cfg >> 32) & 0xFF; 671 hw->total_vfs = (cfg >> 20) & 0xFFF; 672 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF; 673 674 /* Init NPA LF's bitmap */ 675 block = &hw->block[BLKADDR_NPA]; 676 if (!block->implemented) 677 goto nix; 678 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST); 679 block->lf.max = (cfg >> 16) & 0xFFF; 680 block->addr = BLKADDR_NPA; 681 block->type = BLKTYPE_NPA; 682 block->lfshift = 8; 683 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG; 684 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG; 685 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG; 686 block->lfcfg_reg = NPA_PRIV_LFX_CFG; 687 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG; 688 block->lfreset_reg = NPA_AF_LF_RST; 689 sprintf(block->name, "NPA"); 690 err = rvu_alloc_bitmap(&block->lf); 691 if (err) 692 return err; 693 694 nix: 695 /* Init NIX LF's bitmap */ 696 block = &hw->block[BLKADDR_NIX0]; 697 if (!block->implemented) 698 goto sso; 699 cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2); 700 block->lf.max = cfg & 0xFFF; 701 block->addr = BLKADDR_NIX0; 702 block->type = BLKTYPE_NIX; 703 block->lfshift = 8; 704 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG; 705 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG; 706 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG; 707 block->lfcfg_reg = NIX_PRIV_LFX_CFG; 708 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; 709 block->lfreset_reg = NIX_AF_LF_RST; 710 sprintf(block->name, "NIX"); 711 err = rvu_alloc_bitmap(&block->lf); 712 if (err) 713 return err; 714 715 sso: 716 /* Init SSO group's bitmap */ 717 block = &hw->block[BLKADDR_SSO]; 718 if (!block->implemented) 719 goto ssow; 720 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST); 721 block->lf.max = cfg & 0xFFFF; 722 block->addr = BLKADDR_SSO; 723 block->type = BLKTYPE_SSO; 724 block->multislot = true; 725 block->lfshift = 3; 726 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG; 727 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG; 728 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG; 729 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG; 730 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG; 731 block->lfreset_reg = SSO_AF_LF_HWGRP_RST; 732 sprintf(block->name, "SSO GROUP"); 733 err = rvu_alloc_bitmap(&block->lf); 734 if (err) 735 return err; 736 737 ssow: 738 /* Init SSO workslot's bitmap */ 739 block = &hw->block[BLKADDR_SSOW]; 740 if (!block->implemented) 741 goto tim; 742 block->lf.max = (cfg >> 56) & 0xFF; 743 block->addr = BLKADDR_SSOW; 744 block->type = BLKTYPE_SSOW; 745 block->multislot = true; 746 block->lfshift = 3; 747 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG; 748 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG; 749 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG; 750 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG; 751 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG; 752 block->lfreset_reg = SSOW_AF_LF_HWS_RST; 753 sprintf(block->name, "SSOWS"); 754 err = rvu_alloc_bitmap(&block->lf); 755 if (err) 756 return err; 757 758 tim: 759 /* Init TIM LF's bitmap */ 760 block = &hw->block[BLKADDR_TIM]; 761 if (!block->implemented) 762 goto cpt; 763 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST); 764 block->lf.max = cfg & 0xFFFF; 765 block->addr = BLKADDR_TIM; 766 block->type = BLKTYPE_TIM; 767 block->multislot = true; 768 block->lfshift = 3; 769 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG; 770 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG; 771 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG; 772 block->lfcfg_reg = TIM_PRIV_LFX_CFG; 773 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG; 774 block->lfreset_reg = TIM_AF_LF_RST; 775 sprintf(block->name, "TIM"); 776 err = rvu_alloc_bitmap(&block->lf); 777 if (err) 778 return err; 779 780 cpt: 781 /* Init CPT LF's bitmap */ 782 block = &hw->block[BLKADDR_CPT0]; 783 if (!block->implemented) 784 goto init; 785 cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0); 786 block->lf.max = cfg & 0xFF; 787 block->addr = BLKADDR_CPT0; 788 block->type = BLKTYPE_CPT; 789 block->multislot = true; 790 block->lfshift = 3; 791 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG; 792 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG; 793 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG; 794 block->lfcfg_reg = CPT_PRIV_LFX_CFG; 795 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; 796 block->lfreset_reg = CPT_AF_LF_RST; 797 sprintf(block->name, "CPT"); 798 err = rvu_alloc_bitmap(&block->lf); 799 if (err) 800 return err; 801 802 init: 803 /* Allocate memory for PFVF data */ 804 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, 805 sizeof(struct rvu_pfvf), GFP_KERNEL); 806 if (!rvu->pf) 807 return -ENOMEM; 808 809 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, 810 sizeof(struct rvu_pfvf), GFP_KERNEL); 811 if (!rvu->hwvf) 812 return -ENOMEM; 813 814 mutex_init(&rvu->rsrc_lock); 815 816 err = rvu_setup_msix_resources(rvu); 817 if (err) 818 return err; 819 820 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 821 block = &hw->block[blkid]; 822 if (!block->lf.bmap) 823 continue; 824 825 /* Allocate memory for block LF/slot to pcifunc mapping info */ 826 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max, 827 sizeof(u16), GFP_KERNEL); 828 if (!block->fn_map) 829 return -ENOMEM; 830 831 /* Scan all blocks to check if low level firmware has 832 * already provisioned any of the resources to a PF/VF. 833 */ 834 rvu_scan_block(rvu, block); 835 } 836 837 err = rvu_npc_init(rvu); 838 if (err) 839 goto exit; 840 841 err = rvu_cgx_init(rvu); 842 if (err) 843 goto exit; 844 845 err = rvu_npa_init(rvu); 846 if (err) 847 goto cgx_err; 848 849 err = rvu_nix_init(rvu); 850 if (err) 851 goto cgx_err; 852 853 return 0; 854 855 cgx_err: 856 rvu_cgx_exit(rvu); 857 exit: 858 return err; 859 } 860 861 /* NPA and NIX admin queue APIs */ 862 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq) 863 { 864 if (!aq) 865 return; 866 867 qmem_free(rvu->dev, aq->inst); 868 qmem_free(rvu->dev, aq->res); 869 devm_kfree(rvu->dev, aq); 870 } 871 872 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, 873 int qsize, int inst_size, int res_size) 874 { 875 struct admin_queue *aq; 876 int err; 877 878 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL); 879 if (!*ad_queue) 880 return -ENOMEM; 881 aq = *ad_queue; 882 883 /* Alloc memory for instructions i.e AQ */ 884 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size); 885 if (err) { 886 devm_kfree(rvu->dev, aq); 887 return err; 888 } 889 890 /* Alloc memory for results */ 891 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size); 892 if (err) { 893 rvu_aq_free(rvu, aq); 894 return err; 895 } 896 897 spin_lock_init(&aq->lock); 898 return 0; 899 } 900 901 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, 902 struct ready_msg_rsp *rsp) 903 { 904 return 0; 905 } 906 907 /* Get current count of a RVU block's LF/slots 908 * provisioned to a given RVU func. 909 */ 910 static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype) 911 { 912 switch (blktype) { 913 case BLKTYPE_NPA: 914 return pfvf->npalf ? 1 : 0; 915 case BLKTYPE_NIX: 916 return pfvf->nixlf ? 1 : 0; 917 case BLKTYPE_SSO: 918 return pfvf->sso; 919 case BLKTYPE_SSOW: 920 return pfvf->ssow; 921 case BLKTYPE_TIM: 922 return pfvf->timlfs; 923 case BLKTYPE_CPT: 924 return pfvf->cptlfs; 925 } 926 return 0; 927 } 928 929 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) 930 { 931 struct rvu_pfvf *pfvf; 932 933 if (!is_pf_func_valid(rvu, pcifunc)) 934 return false; 935 936 pfvf = rvu_get_pfvf(rvu, pcifunc); 937 938 /* Check if this PFFUNC has a LF of type blktype attached */ 939 if (!rvu_get_rsrc_mapcount(pfvf, blktype)) 940 return false; 941 942 return true; 943 } 944 945 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, 946 int pcifunc, int slot) 947 { 948 u64 val; 949 950 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13); 951 rvu_write64(rvu, block->addr, block->lookup_reg, val); 952 /* Wait for the lookup to finish */ 953 /* TODO: put some timeout here */ 954 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13)) 955 ; 956 957 val = rvu_read64(rvu, block->addr, block->lookup_reg); 958 959 /* Check LF valid bit */ 960 if (!(val & (1ULL << 12))) 961 return -1; 962 963 return (val & 0xFFF); 964 } 965 966 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) 967 { 968 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 969 struct rvu_hwinfo *hw = rvu->hw; 970 struct rvu_block *block; 971 int slot, lf, num_lfs; 972 int blkaddr; 973 974 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc); 975 if (blkaddr < 0) 976 return; 977 978 block = &hw->block[blkaddr]; 979 980 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type); 981 if (!num_lfs) 982 return; 983 984 for (slot = 0; slot < num_lfs; slot++) { 985 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot); 986 if (lf < 0) /* This should never happen */ 987 continue; 988 989 /* Disable the LF */ 990 rvu_write64(rvu, blkaddr, block->lfcfg_reg | 991 (lf << block->lfshift), 0x00ULL); 992 993 /* Update SW maintained mapping info as well */ 994 rvu_update_rsrc_map(rvu, pfvf, block, 995 pcifunc, lf, false); 996 997 /* Free the resource */ 998 rvu_free_rsrc(&block->lf, lf); 999 1000 /* Clear MSIX vector offset for this LF */ 1001 rvu_clear_msix_offset(rvu, pfvf, block, lf); 1002 } 1003 } 1004 1005 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, 1006 u16 pcifunc) 1007 { 1008 struct rvu_hwinfo *hw = rvu->hw; 1009 bool detach_all = true; 1010 struct rvu_block *block; 1011 int blkid; 1012 1013 mutex_lock(&rvu->rsrc_lock); 1014 1015 /* Check for partial resource detach */ 1016 if (detach && detach->partial) 1017 detach_all = false; 1018 1019 /* Check for RVU block's LFs attached to this func, 1020 * if so, detach them. 1021 */ 1022 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 1023 block = &hw->block[blkid]; 1024 if (!block->lf.bmap) 1025 continue; 1026 if (!detach_all && detach) { 1027 if (blkid == BLKADDR_NPA && !detach->npalf) 1028 continue; 1029 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf) 1030 continue; 1031 else if ((blkid == BLKADDR_SSO) && !detach->sso) 1032 continue; 1033 else if ((blkid == BLKADDR_SSOW) && !detach->ssow) 1034 continue; 1035 else if ((blkid == BLKADDR_TIM) && !detach->timlfs) 1036 continue; 1037 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs) 1038 continue; 1039 } 1040 rvu_detach_block(rvu, pcifunc, block->type); 1041 } 1042 1043 mutex_unlock(&rvu->rsrc_lock); 1044 return 0; 1045 } 1046 1047 int rvu_mbox_handler_detach_resources(struct rvu *rvu, 1048 struct rsrc_detach *detach, 1049 struct msg_rsp *rsp) 1050 { 1051 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); 1052 } 1053 1054 static void rvu_attach_block(struct rvu *rvu, int pcifunc, 1055 int blktype, int num_lfs) 1056 { 1057 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1058 struct rvu_hwinfo *hw = rvu->hw; 1059 struct rvu_block *block; 1060 int slot, lf; 1061 int blkaddr; 1062 u64 cfg; 1063 1064 if (!num_lfs) 1065 return; 1066 1067 blkaddr = rvu_get_blkaddr(rvu, blktype, 0); 1068 if (blkaddr < 0) 1069 return; 1070 1071 block = &hw->block[blkaddr]; 1072 if (!block->lf.bmap) 1073 return; 1074 1075 for (slot = 0; slot < num_lfs; slot++) { 1076 /* Allocate the resource */ 1077 lf = rvu_alloc_rsrc(&block->lf); 1078 if (lf < 0) 1079 return; 1080 1081 cfg = (1ULL << 63) | (pcifunc << 8) | slot; 1082 rvu_write64(rvu, blkaddr, block->lfcfg_reg | 1083 (lf << block->lfshift), cfg); 1084 rvu_update_rsrc_map(rvu, pfvf, block, 1085 pcifunc, lf, true); 1086 1087 /* Set start MSIX vector for this LF within this PF/VF */ 1088 rvu_set_msix_offset(rvu, pfvf, block, lf); 1089 } 1090 } 1091 1092 static int rvu_check_rsrc_availability(struct rvu *rvu, 1093 struct rsrc_attach *req, u16 pcifunc) 1094 { 1095 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1096 struct rvu_hwinfo *hw = rvu->hw; 1097 struct rvu_block *block; 1098 int free_lfs, mappedlfs; 1099 1100 /* Only one NPA LF can be attached */ 1101 if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) { 1102 block = &hw->block[BLKADDR_NPA]; 1103 free_lfs = rvu_rsrc_free_count(&block->lf); 1104 if (!free_lfs) 1105 goto fail; 1106 } else if (req->npalf) { 1107 dev_err(&rvu->pdev->dev, 1108 "Func 0x%x: Invalid req, already has NPA\n", 1109 pcifunc); 1110 return -EINVAL; 1111 } 1112 1113 /* Only one NIX LF can be attached */ 1114 if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) { 1115 block = &hw->block[BLKADDR_NIX0]; 1116 free_lfs = rvu_rsrc_free_count(&block->lf); 1117 if (!free_lfs) 1118 goto fail; 1119 } else if (req->nixlf) { 1120 dev_err(&rvu->pdev->dev, 1121 "Func 0x%x: Invalid req, already has NIX\n", 1122 pcifunc); 1123 return -EINVAL; 1124 } 1125 1126 if (req->sso) { 1127 block = &hw->block[BLKADDR_SSO]; 1128 /* Is request within limits ? */ 1129 if (req->sso > block->lf.max) { 1130 dev_err(&rvu->pdev->dev, 1131 "Func 0x%x: Invalid SSO req, %d > max %d\n", 1132 pcifunc, req->sso, block->lf.max); 1133 return -EINVAL; 1134 } 1135 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); 1136 free_lfs = rvu_rsrc_free_count(&block->lf); 1137 /* Check if additional resources are available */ 1138 if (req->sso > mappedlfs && 1139 ((req->sso - mappedlfs) > free_lfs)) 1140 goto fail; 1141 } 1142 1143 if (req->ssow) { 1144 block = &hw->block[BLKADDR_SSOW]; 1145 if (req->ssow > block->lf.max) { 1146 dev_err(&rvu->pdev->dev, 1147 "Func 0x%x: Invalid SSOW req, %d > max %d\n", 1148 pcifunc, req->sso, block->lf.max); 1149 return -EINVAL; 1150 } 1151 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); 1152 free_lfs = rvu_rsrc_free_count(&block->lf); 1153 if (req->ssow > mappedlfs && 1154 ((req->ssow - mappedlfs) > free_lfs)) 1155 goto fail; 1156 } 1157 1158 if (req->timlfs) { 1159 block = &hw->block[BLKADDR_TIM]; 1160 if (req->timlfs > block->lf.max) { 1161 dev_err(&rvu->pdev->dev, 1162 "Func 0x%x: Invalid TIMLF req, %d > max %d\n", 1163 pcifunc, req->timlfs, block->lf.max); 1164 return -EINVAL; 1165 } 1166 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); 1167 free_lfs = rvu_rsrc_free_count(&block->lf); 1168 if (req->timlfs > mappedlfs && 1169 ((req->timlfs - mappedlfs) > free_lfs)) 1170 goto fail; 1171 } 1172 1173 if (req->cptlfs) { 1174 block = &hw->block[BLKADDR_CPT0]; 1175 if (req->cptlfs > block->lf.max) { 1176 dev_err(&rvu->pdev->dev, 1177 "Func 0x%x: Invalid CPTLF req, %d > max %d\n", 1178 pcifunc, req->cptlfs, block->lf.max); 1179 return -EINVAL; 1180 } 1181 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); 1182 free_lfs = rvu_rsrc_free_count(&block->lf); 1183 if (req->cptlfs > mappedlfs && 1184 ((req->cptlfs - mappedlfs) > free_lfs)) 1185 goto fail; 1186 } 1187 1188 return 0; 1189 1190 fail: 1191 dev_info(rvu->dev, "Request for %s failed\n", block->name); 1192 return -ENOSPC; 1193 } 1194 1195 int rvu_mbox_handler_attach_resources(struct rvu *rvu, 1196 struct rsrc_attach *attach, 1197 struct msg_rsp *rsp) 1198 { 1199 u16 pcifunc = attach->hdr.pcifunc; 1200 int err; 1201 1202 /* If first request, detach all existing attached resources */ 1203 if (!attach->modify) 1204 rvu_detach_rsrcs(rvu, NULL, pcifunc); 1205 1206 mutex_lock(&rvu->rsrc_lock); 1207 1208 /* Check if the request can be accommodated */ 1209 err = rvu_check_rsrc_availability(rvu, attach, pcifunc); 1210 if (err) 1211 goto exit; 1212 1213 /* Now attach the requested resources */ 1214 if (attach->npalf) 1215 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1); 1216 1217 if (attach->nixlf) 1218 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1); 1219 1220 if (attach->sso) { 1221 /* RVU func doesn't know which exact LF or slot is attached 1222 * to it, it always sees as slot 0,1,2. So for a 'modify' 1223 * request, simply detach all existing attached LFs/slots 1224 * and attach a fresh. 1225 */ 1226 if (attach->modify) 1227 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO); 1228 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso); 1229 } 1230 1231 if (attach->ssow) { 1232 if (attach->modify) 1233 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW); 1234 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow); 1235 } 1236 1237 if (attach->timlfs) { 1238 if (attach->modify) 1239 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM); 1240 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs); 1241 } 1242 1243 if (attach->cptlfs) { 1244 if (attach->modify) 1245 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT); 1246 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs); 1247 } 1248 1249 exit: 1250 mutex_unlock(&rvu->rsrc_lock); 1251 return err; 1252 } 1253 1254 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1255 int blkaddr, int lf) 1256 { 1257 u16 vec; 1258 1259 if (lf < 0) 1260 return MSIX_VECTOR_INVALID; 1261 1262 for (vec = 0; vec < pfvf->msix.max; vec++) { 1263 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf)) 1264 return vec; 1265 } 1266 return MSIX_VECTOR_INVALID; 1267 } 1268 1269 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1270 struct rvu_block *block, int lf) 1271 { 1272 u16 nvecs, vec, offset; 1273 u64 cfg; 1274 1275 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | 1276 (lf << block->lfshift)); 1277 nvecs = (cfg >> 12) & 0xFF; 1278 1279 /* Check and alloc MSIX vectors, must be contiguous */ 1280 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs)) 1281 return; 1282 1283 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 1284 1285 /* Config MSIX offset in LF */ 1286 rvu_write64(rvu, block->addr, block->msixcfg_reg | 1287 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset); 1288 1289 /* Update the bitmap as well */ 1290 for (vec = 0; vec < nvecs; vec++) 1291 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf); 1292 } 1293 1294 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1295 struct rvu_block *block, int lf) 1296 { 1297 u16 nvecs, vec, offset; 1298 u64 cfg; 1299 1300 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | 1301 (lf << block->lfshift)); 1302 nvecs = (cfg >> 12) & 0xFF; 1303 1304 /* Clear MSIX offset in LF */ 1305 rvu_write64(rvu, block->addr, block->msixcfg_reg | 1306 (lf << block->lfshift), cfg & ~0x7FFULL); 1307 1308 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf); 1309 1310 /* Update the mapping */ 1311 for (vec = 0; vec < nvecs; vec++) 1312 pfvf->msix_lfmap[offset + vec] = 0; 1313 1314 /* Free the same in MSIX bitmap */ 1315 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); 1316 } 1317 1318 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, 1319 struct msix_offset_rsp *rsp) 1320 { 1321 struct rvu_hwinfo *hw = rvu->hw; 1322 u16 pcifunc = req->hdr.pcifunc; 1323 struct rvu_pfvf *pfvf; 1324 int lf, slot; 1325 1326 pfvf = rvu_get_pfvf(rvu, pcifunc); 1327 if (!pfvf->msix.bmap) 1328 return 0; 1329 1330 /* Set MSIX offsets for each block's LFs attached to this PF/VF */ 1331 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0); 1332 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf); 1333 1334 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0); 1335 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf); 1336 1337 rsp->sso = pfvf->sso; 1338 for (slot = 0; slot < rsp->sso; slot++) { 1339 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot); 1340 rsp->sso_msixoff[slot] = 1341 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf); 1342 } 1343 1344 rsp->ssow = pfvf->ssow; 1345 for (slot = 0; slot < rsp->ssow; slot++) { 1346 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot); 1347 rsp->ssow_msixoff[slot] = 1348 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf); 1349 } 1350 1351 rsp->timlfs = pfvf->timlfs; 1352 for (slot = 0; slot < rsp->timlfs; slot++) { 1353 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot); 1354 rsp->timlf_msixoff[slot] = 1355 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf); 1356 } 1357 1358 rsp->cptlfs = pfvf->cptlfs; 1359 for (slot = 0; slot < rsp->cptlfs; slot++) { 1360 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot); 1361 rsp->cptlf_msixoff[slot] = 1362 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf); 1363 } 1364 return 0; 1365 } 1366 1367 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, 1368 struct msg_rsp *rsp) 1369 { 1370 u16 pcifunc = req->hdr.pcifunc; 1371 u16 vf, numvfs; 1372 u64 cfg; 1373 1374 vf = pcifunc & RVU_PFVF_FUNC_MASK; 1375 cfg = rvu_read64(rvu, BLKADDR_RVUM, 1376 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc))); 1377 numvfs = (cfg >> 12) & 0xFF; 1378 1379 if (vf && vf <= numvfs) 1380 __rvu_flr_handler(rvu, pcifunc); 1381 else 1382 return RVU_INVALID_VF_ID; 1383 1384 return 0; 1385 } 1386 1387 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, 1388 struct get_hw_cap_rsp *rsp) 1389 { 1390 struct rvu_hwinfo *hw = rvu->hw; 1391 1392 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping; 1393 rsp->nix_shaping = hw->cap.nix_shaping; 1394 1395 return 0; 1396 } 1397 1398 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, 1399 struct mbox_msghdr *req) 1400 { 1401 struct rvu *rvu = pci_get_drvdata(mbox->pdev); 1402 1403 /* Check if valid, if not reply with a invalid msg */ 1404 if (req->sig != OTX2_MBOX_REQ_SIG) 1405 goto bad_message; 1406 1407 switch (req->id) { 1408 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 1409 case _id: { \ 1410 struct _rsp_type *rsp; \ 1411 int err; \ 1412 \ 1413 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ 1414 mbox, devid, \ 1415 sizeof(struct _rsp_type)); \ 1416 /* some handlers should complete even if reply */ \ 1417 /* could not be allocated */ \ 1418 if (!rsp && \ 1419 _id != MBOX_MSG_DETACH_RESOURCES && \ 1420 _id != MBOX_MSG_NIX_TXSCH_FREE && \ 1421 _id != MBOX_MSG_VF_FLR) \ 1422 return -ENOMEM; \ 1423 if (rsp) { \ 1424 rsp->hdr.id = _id; \ 1425 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ 1426 rsp->hdr.pcifunc = req->pcifunc; \ 1427 rsp->hdr.rc = 0; \ 1428 } \ 1429 \ 1430 err = rvu_mbox_handler_ ## _fn_name(rvu, \ 1431 (struct _req_type *)req, \ 1432 rsp); \ 1433 if (rsp && err) \ 1434 rsp->hdr.rc = err; \ 1435 \ 1436 return rsp ? err : -ENOMEM; \ 1437 } 1438 MBOX_MESSAGES 1439 #undef M 1440 1441 bad_message: 1442 default: 1443 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id); 1444 return -ENODEV; 1445 } 1446 } 1447 1448 static void __rvu_mbox_handler(struct rvu_work *mwork, int type) 1449 { 1450 struct rvu *rvu = mwork->rvu; 1451 int offset, err, id, devid; 1452 struct otx2_mbox_dev *mdev; 1453 struct mbox_hdr *req_hdr; 1454 struct mbox_msghdr *msg; 1455 struct mbox_wq_info *mw; 1456 struct otx2_mbox *mbox; 1457 1458 switch (type) { 1459 case TYPE_AFPF: 1460 mw = &rvu->afpf_wq_info; 1461 break; 1462 case TYPE_AFVF: 1463 mw = &rvu->afvf_wq_info; 1464 break; 1465 default: 1466 return; 1467 } 1468 1469 devid = mwork - mw->mbox_wrk; 1470 mbox = &mw->mbox; 1471 mdev = &mbox->dev[devid]; 1472 1473 /* Process received mbox messages */ 1474 req_hdr = mdev->mbase + mbox->rx_start; 1475 if (mw->mbox_wrk[devid].num_msgs == 0) 1476 return; 1477 1478 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 1479 1480 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { 1481 msg = mdev->mbase + offset; 1482 1483 /* Set which PF/VF sent this message based on mbox IRQ */ 1484 switch (type) { 1485 case TYPE_AFPF: 1486 msg->pcifunc &= 1487 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); 1488 msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT); 1489 break; 1490 case TYPE_AFVF: 1491 msg->pcifunc &= 1492 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT); 1493 msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1; 1494 break; 1495 } 1496 1497 err = rvu_process_mbox_msg(mbox, devid, msg); 1498 if (!err) { 1499 offset = mbox->rx_start + msg->next_msgoff; 1500 continue; 1501 } 1502 1503 if (msg->pcifunc & RVU_PFVF_FUNC_MASK) 1504 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", 1505 err, otx2_mbox_id2name(msg->id), 1506 msg->id, rvu_get_pf(msg->pcifunc), 1507 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); 1508 else 1509 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", 1510 err, otx2_mbox_id2name(msg->id), 1511 msg->id, devid); 1512 } 1513 mw->mbox_wrk[devid].num_msgs = 0; 1514 1515 /* Send mbox responses to VF/PF */ 1516 otx2_mbox_msg_send(mbox, devid); 1517 } 1518 1519 static inline void rvu_afpf_mbox_handler(struct work_struct *work) 1520 { 1521 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1522 1523 __rvu_mbox_handler(mwork, TYPE_AFPF); 1524 } 1525 1526 static inline void rvu_afvf_mbox_handler(struct work_struct *work) 1527 { 1528 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1529 1530 __rvu_mbox_handler(mwork, TYPE_AFVF); 1531 } 1532 1533 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type) 1534 { 1535 struct rvu *rvu = mwork->rvu; 1536 struct otx2_mbox_dev *mdev; 1537 struct mbox_hdr *rsp_hdr; 1538 struct mbox_msghdr *msg; 1539 struct mbox_wq_info *mw; 1540 struct otx2_mbox *mbox; 1541 int offset, id, devid; 1542 1543 switch (type) { 1544 case TYPE_AFPF: 1545 mw = &rvu->afpf_wq_info; 1546 break; 1547 case TYPE_AFVF: 1548 mw = &rvu->afvf_wq_info; 1549 break; 1550 default: 1551 return; 1552 } 1553 1554 devid = mwork - mw->mbox_wrk_up; 1555 mbox = &mw->mbox_up; 1556 mdev = &mbox->dev[devid]; 1557 1558 rsp_hdr = mdev->mbase + mbox->rx_start; 1559 if (mw->mbox_wrk_up[devid].up_num_msgs == 0) { 1560 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n"); 1561 return; 1562 } 1563 1564 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 1565 1566 for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) { 1567 msg = mdev->mbase + offset; 1568 1569 if (msg->id >= MBOX_MSG_MAX) { 1570 dev_err(rvu->dev, 1571 "Mbox msg with unknown ID 0x%x\n", msg->id); 1572 goto end; 1573 } 1574 1575 if (msg->sig != OTX2_MBOX_RSP_SIG) { 1576 dev_err(rvu->dev, 1577 "Mbox msg with wrong signature %x, ID 0x%x\n", 1578 msg->sig, msg->id); 1579 goto end; 1580 } 1581 1582 switch (msg->id) { 1583 case MBOX_MSG_CGX_LINK_EVENT: 1584 break; 1585 default: 1586 if (msg->rc) 1587 dev_err(rvu->dev, 1588 "Mbox msg response has err %d, ID 0x%x\n", 1589 msg->rc, msg->id); 1590 break; 1591 } 1592 end: 1593 offset = mbox->rx_start + msg->next_msgoff; 1594 mdev->msgs_acked++; 1595 } 1596 mw->mbox_wrk_up[devid].up_num_msgs = 0; 1597 1598 otx2_mbox_reset(mbox, devid); 1599 } 1600 1601 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work) 1602 { 1603 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1604 1605 __rvu_mbox_up_handler(mwork, TYPE_AFPF); 1606 } 1607 1608 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) 1609 { 1610 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1611 1612 __rvu_mbox_up_handler(mwork, TYPE_AFVF); 1613 } 1614 1615 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, 1616 int type, int num, 1617 void (mbox_handler)(struct work_struct *), 1618 void (mbox_up_handler)(struct work_struct *)) 1619 { 1620 void __iomem *hwbase = NULL, *reg_base; 1621 int err, i, dir, dir_up; 1622 struct rvu_work *mwork; 1623 const char *name; 1624 u64 bar4_addr; 1625 1626 switch (type) { 1627 case TYPE_AFPF: 1628 name = "rvu_afpf_mailbox"; 1629 bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); 1630 dir = MBOX_DIR_AFPF; 1631 dir_up = MBOX_DIR_AFPF_UP; 1632 reg_base = rvu->afreg_base; 1633 break; 1634 case TYPE_AFVF: 1635 name = "rvu_afvf_mailbox"; 1636 bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); 1637 dir = MBOX_DIR_PFVF; 1638 dir_up = MBOX_DIR_PFVF_UP; 1639 reg_base = rvu->pfreg_base; 1640 break; 1641 default: 1642 return -EINVAL; 1643 } 1644 1645 mw->mbox_wq = alloc_workqueue(name, 1646 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1647 num); 1648 if (!mw->mbox_wq) 1649 return -ENOMEM; 1650 1651 mw->mbox_wrk = devm_kcalloc(rvu->dev, num, 1652 sizeof(struct rvu_work), GFP_KERNEL); 1653 if (!mw->mbox_wrk) { 1654 err = -ENOMEM; 1655 goto exit; 1656 } 1657 1658 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num, 1659 sizeof(struct rvu_work), GFP_KERNEL); 1660 if (!mw->mbox_wrk_up) { 1661 err = -ENOMEM; 1662 goto exit; 1663 } 1664 1665 /* Mailbox is a reserved memory (in RAM) region shared between 1666 * RVU devices, shouldn't be mapped as device memory to allow 1667 * unaligned accesses. 1668 */ 1669 hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num); 1670 if (!hwbase) { 1671 dev_err(rvu->dev, "Unable to map mailbox region\n"); 1672 err = -ENOMEM; 1673 goto exit; 1674 } 1675 1676 err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num); 1677 if (err) 1678 goto exit; 1679 1680 err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev, 1681 reg_base, dir_up, num); 1682 if (err) 1683 goto exit; 1684 1685 for (i = 0; i < num; i++) { 1686 mwork = &mw->mbox_wrk[i]; 1687 mwork->rvu = rvu; 1688 INIT_WORK(&mwork->work, mbox_handler); 1689 1690 mwork = &mw->mbox_wrk_up[i]; 1691 mwork->rvu = rvu; 1692 INIT_WORK(&mwork->work, mbox_up_handler); 1693 } 1694 1695 return 0; 1696 exit: 1697 if (hwbase) 1698 iounmap((void __iomem *)hwbase); 1699 destroy_workqueue(mw->mbox_wq); 1700 return err; 1701 } 1702 1703 static void rvu_mbox_destroy(struct mbox_wq_info *mw) 1704 { 1705 if (mw->mbox_wq) { 1706 flush_workqueue(mw->mbox_wq); 1707 destroy_workqueue(mw->mbox_wq); 1708 mw->mbox_wq = NULL; 1709 } 1710 1711 if (mw->mbox.hwbase) 1712 iounmap((void __iomem *)mw->mbox.hwbase); 1713 1714 otx2_mbox_destroy(&mw->mbox); 1715 otx2_mbox_destroy(&mw->mbox_up); 1716 } 1717 1718 static void rvu_queue_work(struct mbox_wq_info *mw, int first, 1719 int mdevs, u64 intr) 1720 { 1721 struct otx2_mbox_dev *mdev; 1722 struct otx2_mbox *mbox; 1723 struct mbox_hdr *hdr; 1724 int i; 1725 1726 for (i = first; i < mdevs; i++) { 1727 /* start from 0 */ 1728 if (!(intr & BIT_ULL(i - first))) 1729 continue; 1730 1731 mbox = &mw->mbox; 1732 mdev = &mbox->dev[i]; 1733 hdr = mdev->mbase + mbox->rx_start; 1734 1735 /*The hdr->num_msgs is set to zero immediately in the interrupt 1736 * handler to ensure that it holds a correct value next time 1737 * when the interrupt handler is called. 1738 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler 1739 * pf>mbox.up_num_msgs holds the data for use in 1740 * pfaf_mbox_up_handler. 1741 */ 1742 1743 if (hdr->num_msgs) { 1744 mw->mbox_wrk[i].num_msgs = hdr->num_msgs; 1745 hdr->num_msgs = 0; 1746 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work); 1747 } 1748 mbox = &mw->mbox_up; 1749 mdev = &mbox->dev[i]; 1750 hdr = mdev->mbase + mbox->rx_start; 1751 if (hdr->num_msgs) { 1752 mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs; 1753 hdr->num_msgs = 0; 1754 queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work); 1755 } 1756 } 1757 } 1758 1759 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) 1760 { 1761 struct rvu *rvu = (struct rvu *)rvu_irq; 1762 int vfs = rvu->vfs; 1763 u64 intr; 1764 1765 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); 1766 /* Clear interrupts */ 1767 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); 1768 1769 /* Sync with mbox memory region */ 1770 rmb(); 1771 1772 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr); 1773 1774 /* Handle VF interrupts */ 1775 if (vfs > 64) { 1776 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1)); 1777 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr); 1778 1779 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); 1780 vfs -= 64; 1781 } 1782 1783 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); 1784 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr); 1785 1786 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr); 1787 1788 return IRQ_HANDLED; 1789 } 1790 1791 static void rvu_enable_mbox_intr(struct rvu *rvu) 1792 { 1793 struct rvu_hwinfo *hw = rvu->hw; 1794 1795 /* Clear spurious irqs, if any */ 1796 rvu_write64(rvu, BLKADDR_RVUM, 1797 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); 1798 1799 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ 1800 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S, 1801 INTR_MASK(hw->total_pfs) & ~1ULL); 1802 } 1803 1804 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) 1805 { 1806 struct rvu_block *block; 1807 int slot, lf, num_lfs; 1808 int err; 1809 1810 block = &rvu->hw->block[blkaddr]; 1811 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), 1812 block->type); 1813 if (!num_lfs) 1814 return; 1815 for (slot = 0; slot < num_lfs; slot++) { 1816 lf = rvu_get_lf(rvu, block, pcifunc, slot); 1817 if (lf < 0) 1818 continue; 1819 1820 /* Cleanup LF and reset it */ 1821 if (block->addr == BLKADDR_NIX0) 1822 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf); 1823 else if (block->addr == BLKADDR_NPA) 1824 rvu_npa_lf_teardown(rvu, pcifunc, lf); 1825 1826 err = rvu_lf_reset(rvu, block, lf); 1827 if (err) { 1828 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", 1829 block->addr, lf); 1830 } 1831 } 1832 } 1833 1834 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) 1835 { 1836 mutex_lock(&rvu->flr_lock); 1837 /* Reset order should reflect inter-block dependencies: 1838 * 1. Reset any packet/work sources (NIX, CPT, TIM) 1839 * 2. Flush and reset SSO/SSOW 1840 * 3. Cleanup pools (NPA) 1841 */ 1842 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0); 1843 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0); 1844 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM); 1845 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); 1846 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); 1847 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); 1848 rvu_detach_rsrcs(rvu, NULL, pcifunc); 1849 mutex_unlock(&rvu->flr_lock); 1850 } 1851 1852 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf) 1853 { 1854 int reg = 0; 1855 1856 /* pcifunc = 0(PF0) | (vf + 1) */ 1857 __rvu_flr_handler(rvu, vf + 1); 1858 1859 if (vf >= 64) { 1860 reg = 1; 1861 vf = vf - 64; 1862 } 1863 1864 /* Signal FLR finish and enable IRQ */ 1865 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); 1866 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); 1867 } 1868 1869 static void rvu_flr_handler(struct work_struct *work) 1870 { 1871 struct rvu_work *flrwork = container_of(work, struct rvu_work, work); 1872 struct rvu *rvu = flrwork->rvu; 1873 u16 pcifunc, numvfs, vf; 1874 u64 cfg; 1875 int pf; 1876 1877 pf = flrwork - rvu->flr_wrk; 1878 if (pf >= rvu->hw->total_pfs) { 1879 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs); 1880 return; 1881 } 1882 1883 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 1884 numvfs = (cfg >> 12) & 0xFF; 1885 pcifunc = pf << RVU_PFVF_PF_SHIFT; 1886 1887 for (vf = 0; vf < numvfs; vf++) 1888 __rvu_flr_handler(rvu, (pcifunc | (vf + 1))); 1889 1890 __rvu_flr_handler(rvu, pcifunc); 1891 1892 /* Signal FLR finish */ 1893 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf)); 1894 1895 /* Enable interrupt */ 1896 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf)); 1897 } 1898 1899 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs) 1900 { 1901 int dev, vf, reg = 0; 1902 u64 intr; 1903 1904 if (start_vf >= 64) 1905 reg = 1; 1906 1907 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg)); 1908 if (!intr) 1909 return; 1910 1911 for (vf = 0; vf < numvfs; vf++) { 1912 if (!(intr & BIT_ULL(vf))) 1913 continue; 1914 dev = vf + start_vf + rvu->hw->total_pfs; 1915 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); 1916 /* Clear and disable the interrupt */ 1917 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); 1918 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); 1919 } 1920 } 1921 1922 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq) 1923 { 1924 struct rvu *rvu = (struct rvu *)rvu_irq; 1925 u64 intr; 1926 u8 pf; 1927 1928 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT); 1929 if (!intr) 1930 goto afvf_flr; 1931 1932 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 1933 if (intr & (1ULL << pf)) { 1934 /* PF is already dead do only AF related operations */ 1935 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); 1936 /* clear interrupt */ 1937 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, 1938 BIT_ULL(pf)); 1939 /* Disable the interrupt */ 1940 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, 1941 BIT_ULL(pf)); 1942 } 1943 } 1944 1945 afvf_flr: 1946 rvu_afvf_queue_flr_work(rvu, 0, 64); 1947 if (rvu->vfs > 64) 1948 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64); 1949 1950 return IRQ_HANDLED; 1951 } 1952 1953 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr) 1954 { 1955 int vf; 1956 1957 /* Nothing to be done here other than clearing the 1958 * TRPEND bit. 1959 */ 1960 for (vf = 0; vf < 64; vf++) { 1961 if (intr & (1ULL << vf)) { 1962 /* clear the trpend due to ME(master enable) */ 1963 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf)); 1964 /* clear interrupt */ 1965 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf)); 1966 } 1967 } 1968 } 1969 1970 /* Handles ME interrupts from VFs of AF */ 1971 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq) 1972 { 1973 struct rvu *rvu = (struct rvu *)rvu_irq; 1974 int vfset; 1975 u64 intr; 1976 1977 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); 1978 1979 for (vfset = 0; vfset <= 1; vfset++) { 1980 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset)); 1981 if (intr) 1982 rvu_me_handle_vfset(rvu, vfset, intr); 1983 } 1984 1985 return IRQ_HANDLED; 1986 } 1987 1988 /* Handles ME interrupts from PFs */ 1989 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq) 1990 { 1991 struct rvu *rvu = (struct rvu *)rvu_irq; 1992 u64 intr; 1993 u8 pf; 1994 1995 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); 1996 1997 /* Nothing to be done here other than clearing the 1998 * TRPEND bit. 1999 */ 2000 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2001 if (intr & (1ULL << pf)) { 2002 /* clear the trpend due to ME(master enable) */ 2003 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, 2004 BIT_ULL(pf)); 2005 /* clear interrupt */ 2006 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT, 2007 BIT_ULL(pf)); 2008 } 2009 } 2010 2011 return IRQ_HANDLED; 2012 } 2013 2014 static void rvu_unregister_interrupts(struct rvu *rvu) 2015 { 2016 int irq; 2017 2018 /* Disable the Mbox interrupt */ 2019 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, 2020 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2021 2022 /* Disable the PF FLR interrupt */ 2023 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, 2024 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2025 2026 /* Disable the PF ME interrupt */ 2027 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C, 2028 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2029 2030 for (irq = 0; irq < rvu->num_vec; irq++) { 2031 if (rvu->irq_allocated[irq]) 2032 free_irq(pci_irq_vector(rvu->pdev, irq), rvu); 2033 } 2034 2035 pci_free_irq_vectors(rvu->pdev); 2036 rvu->num_vec = 0; 2037 } 2038 2039 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu) 2040 { 2041 struct rvu_pfvf *pfvf = &rvu->pf[0]; 2042 int offset; 2043 2044 pfvf = &rvu->pf[0]; 2045 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; 2046 2047 /* Make sure there are enough MSIX vectors configured so that 2048 * VF interrupts can be handled. Offset equal to zero means 2049 * that PF vectors are not configured and overlapping AF vectors. 2050 */ 2051 return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) && 2052 offset; 2053 } 2054 2055 static int rvu_register_interrupts(struct rvu *rvu) 2056 { 2057 int ret, offset, pf_vec_start; 2058 2059 rvu->num_vec = pci_msix_vec_count(rvu->pdev); 2060 2061 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec, 2062 NAME_SIZE, GFP_KERNEL); 2063 if (!rvu->irq_name) 2064 return -ENOMEM; 2065 2066 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec, 2067 sizeof(bool), GFP_KERNEL); 2068 if (!rvu->irq_allocated) 2069 return -ENOMEM; 2070 2071 /* Enable MSI-X */ 2072 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec, 2073 rvu->num_vec, PCI_IRQ_MSIX); 2074 if (ret < 0) { 2075 dev_err(rvu->dev, 2076 "RVUAF: Request for %d msix vectors failed, ret %d\n", 2077 rvu->num_vec, ret); 2078 return ret; 2079 } 2080 2081 /* Register mailbox interrupt handler */ 2082 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); 2083 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), 2084 rvu_mbox_intr_handler, 0, 2085 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); 2086 if (ret) { 2087 dev_err(rvu->dev, 2088 "RVUAF: IRQ registration failed for mbox irq\n"); 2089 goto fail; 2090 } 2091 2092 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; 2093 2094 /* Enable mailbox interrupts from all PFs */ 2095 rvu_enable_mbox_intr(rvu); 2096 2097 /* Register FLR interrupt handler */ 2098 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], 2099 "RVUAF FLR"); 2100 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR), 2101 rvu_flr_intr_handler, 0, 2102 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], 2103 rvu); 2104 if (ret) { 2105 dev_err(rvu->dev, 2106 "RVUAF: IRQ registration failed for FLR\n"); 2107 goto fail; 2108 } 2109 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true; 2110 2111 /* Enable FLR interrupt for all PFs*/ 2112 rvu_write64(rvu, BLKADDR_RVUM, 2113 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs)); 2114 2115 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, 2116 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2117 2118 /* Register ME interrupt handler */ 2119 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], 2120 "RVUAF ME"); 2121 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME), 2122 rvu_me_pf_intr_handler, 0, 2123 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], 2124 rvu); 2125 if (ret) { 2126 dev_err(rvu->dev, 2127 "RVUAF: IRQ registration failed for ME\n"); 2128 } 2129 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true; 2130 2131 /* Enable ME interrupt for all PFs*/ 2132 rvu_write64(rvu, BLKADDR_RVUM, 2133 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs)); 2134 2135 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S, 2136 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2137 2138 if (!rvu_afvf_msix_vectors_num_ok(rvu)) 2139 return 0; 2140 2141 /* Get PF MSIX vectors offset. */ 2142 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM, 2143 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; 2144 2145 /* Register MBOX0 interrupt. */ 2146 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; 2147 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); 2148 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2149 rvu_mbox_intr_handler, 0, 2150 &rvu->irq_name[offset * NAME_SIZE], 2151 rvu); 2152 if (ret) 2153 dev_err(rvu->dev, 2154 "RVUAF: IRQ registration failed for Mbox0\n"); 2155 2156 rvu->irq_allocated[offset] = true; 2157 2158 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so 2159 * simply increment current offset by 1. 2160 */ 2161 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; 2162 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); 2163 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2164 rvu_mbox_intr_handler, 0, 2165 &rvu->irq_name[offset * NAME_SIZE], 2166 rvu); 2167 if (ret) 2168 dev_err(rvu->dev, 2169 "RVUAF: IRQ registration failed for Mbox1\n"); 2170 2171 rvu->irq_allocated[offset] = true; 2172 2173 /* Register FLR interrupt handler for AF's VFs */ 2174 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0; 2175 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0"); 2176 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2177 rvu_flr_intr_handler, 0, 2178 &rvu->irq_name[offset * NAME_SIZE], rvu); 2179 if (ret) { 2180 dev_err(rvu->dev, 2181 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n"); 2182 goto fail; 2183 } 2184 rvu->irq_allocated[offset] = true; 2185 2186 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1; 2187 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1"); 2188 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2189 rvu_flr_intr_handler, 0, 2190 &rvu->irq_name[offset * NAME_SIZE], rvu); 2191 if (ret) { 2192 dev_err(rvu->dev, 2193 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n"); 2194 goto fail; 2195 } 2196 rvu->irq_allocated[offset] = true; 2197 2198 /* Register ME interrupt handler for AF's VFs */ 2199 offset = pf_vec_start + RVU_PF_INT_VEC_VFME0; 2200 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0"); 2201 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2202 rvu_me_vf_intr_handler, 0, 2203 &rvu->irq_name[offset * NAME_SIZE], rvu); 2204 if (ret) { 2205 dev_err(rvu->dev, 2206 "RVUAF: IRQ registration failed for RVUAFVF ME0\n"); 2207 goto fail; 2208 } 2209 rvu->irq_allocated[offset] = true; 2210 2211 offset = pf_vec_start + RVU_PF_INT_VEC_VFME1; 2212 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1"); 2213 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2214 rvu_me_vf_intr_handler, 0, 2215 &rvu->irq_name[offset * NAME_SIZE], rvu); 2216 if (ret) { 2217 dev_err(rvu->dev, 2218 "RVUAF: IRQ registration failed for RVUAFVF ME1\n"); 2219 goto fail; 2220 } 2221 rvu->irq_allocated[offset] = true; 2222 return 0; 2223 2224 fail: 2225 rvu_unregister_interrupts(rvu); 2226 return ret; 2227 } 2228 2229 static void rvu_flr_wq_destroy(struct rvu *rvu) 2230 { 2231 if (rvu->flr_wq) { 2232 flush_workqueue(rvu->flr_wq); 2233 destroy_workqueue(rvu->flr_wq); 2234 rvu->flr_wq = NULL; 2235 } 2236 } 2237 2238 static int rvu_flr_init(struct rvu *rvu) 2239 { 2240 int dev, num_devs; 2241 u64 cfg; 2242 int pf; 2243 2244 /* Enable FLR for all PFs*/ 2245 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2246 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2247 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf), 2248 cfg | BIT_ULL(22)); 2249 } 2250 2251 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr", 2252 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 2253 1); 2254 if (!rvu->flr_wq) 2255 return -ENOMEM; 2256 2257 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev); 2258 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs, 2259 sizeof(struct rvu_work), GFP_KERNEL); 2260 if (!rvu->flr_wrk) { 2261 destroy_workqueue(rvu->flr_wq); 2262 return -ENOMEM; 2263 } 2264 2265 for (dev = 0; dev < num_devs; dev++) { 2266 rvu->flr_wrk[dev].rvu = rvu; 2267 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler); 2268 } 2269 2270 mutex_init(&rvu->flr_lock); 2271 2272 return 0; 2273 } 2274 2275 static void rvu_disable_afvf_intr(struct rvu *rvu) 2276 { 2277 int vfs = rvu->vfs; 2278 2279 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs)); 2280 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); 2281 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); 2282 if (vfs <= 64) 2283 return; 2284 2285 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), 2286 INTR_MASK(vfs - 64)); 2287 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); 2288 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); 2289 } 2290 2291 static void rvu_enable_afvf_intr(struct rvu *rvu) 2292 { 2293 int vfs = rvu->vfs; 2294 2295 /* Clear any pending interrupts and enable AF VF interrupts for 2296 * the first 64 VFs. 2297 */ 2298 /* Mbox */ 2299 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs)); 2300 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs)); 2301 2302 /* FLR */ 2303 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); 2304 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); 2305 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs)); 2306 2307 /* Same for remaining VFs, if any. */ 2308 if (vfs <= 64) 2309 return; 2310 2311 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64)); 2312 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), 2313 INTR_MASK(vfs - 64)); 2314 2315 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64)); 2316 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); 2317 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); 2318 } 2319 2320 #define PCI_DEVID_OCTEONTX2_LBK 0xA061 2321 2322 static int lbk_get_num_chans(void) 2323 { 2324 struct pci_dev *pdev; 2325 void __iomem *base; 2326 int ret = -EIO; 2327 2328 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK, 2329 NULL); 2330 if (!pdev) 2331 goto err; 2332 2333 base = pci_ioremap_bar(pdev, 0); 2334 if (!base) 2335 goto err_put; 2336 2337 /* Read number of available LBK channels from LBK(0)_CONST register. */ 2338 ret = (readq(base + 0x10) >> 32) & 0xffff; 2339 iounmap(base); 2340 err_put: 2341 pci_dev_put(pdev); 2342 err: 2343 return ret; 2344 } 2345 2346 static int rvu_enable_sriov(struct rvu *rvu) 2347 { 2348 struct pci_dev *pdev = rvu->pdev; 2349 int err, chans, vfs; 2350 2351 if (!rvu_afvf_msix_vectors_num_ok(rvu)) { 2352 dev_warn(&pdev->dev, 2353 "Skipping SRIOV enablement since not enough IRQs are available\n"); 2354 return 0; 2355 } 2356 2357 chans = lbk_get_num_chans(); 2358 if (chans < 0) 2359 return chans; 2360 2361 vfs = pci_sriov_get_totalvfs(pdev); 2362 2363 /* Limit VFs in case we have more VFs than LBK channels available. */ 2364 if (vfs > chans) 2365 vfs = chans; 2366 2367 if (!vfs) 2368 return 0; 2369 2370 /* Save VFs number for reference in VF interrupts handlers. 2371 * Since interrupts might start arriving during SRIOV enablement 2372 * ordinary API cannot be used to get number of enabled VFs. 2373 */ 2374 rvu->vfs = vfs; 2375 2376 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs, 2377 rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler); 2378 if (err) 2379 return err; 2380 2381 rvu_enable_afvf_intr(rvu); 2382 /* Make sure IRQs are enabled before SRIOV. */ 2383 mb(); 2384 2385 err = pci_enable_sriov(pdev, vfs); 2386 if (err) { 2387 rvu_disable_afvf_intr(rvu); 2388 rvu_mbox_destroy(&rvu->afvf_wq_info); 2389 return err; 2390 } 2391 2392 return 0; 2393 } 2394 2395 static void rvu_disable_sriov(struct rvu *rvu) 2396 { 2397 rvu_disable_afvf_intr(rvu); 2398 rvu_mbox_destroy(&rvu->afvf_wq_info); 2399 pci_disable_sriov(rvu->pdev); 2400 } 2401 2402 static void rvu_update_module_params(struct rvu *rvu) 2403 { 2404 const char *default_pfl_name = "default"; 2405 2406 strscpy(rvu->mkex_pfl_name, 2407 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN); 2408 } 2409 2410 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2411 { 2412 struct device *dev = &pdev->dev; 2413 struct rvu *rvu; 2414 int err; 2415 2416 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL); 2417 if (!rvu) 2418 return -ENOMEM; 2419 2420 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL); 2421 if (!rvu->hw) { 2422 devm_kfree(dev, rvu); 2423 return -ENOMEM; 2424 } 2425 2426 pci_set_drvdata(pdev, rvu); 2427 rvu->pdev = pdev; 2428 rvu->dev = &pdev->dev; 2429 2430 err = pci_enable_device(pdev); 2431 if (err) { 2432 dev_err(dev, "Failed to enable PCI device\n"); 2433 goto err_freemem; 2434 } 2435 2436 err = pci_request_regions(pdev, DRV_NAME); 2437 if (err) { 2438 dev_err(dev, "PCI request regions failed 0x%x\n", err); 2439 goto err_disable_device; 2440 } 2441 2442 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); 2443 if (err) { 2444 dev_err(dev, "Unable to set DMA mask\n"); 2445 goto err_release_regions; 2446 } 2447 2448 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); 2449 if (err) { 2450 dev_err(dev, "Unable to set consistent DMA mask\n"); 2451 goto err_release_regions; 2452 } 2453 2454 /* Map Admin function CSRs */ 2455 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0); 2456 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); 2457 if (!rvu->afreg_base || !rvu->pfreg_base) { 2458 dev_err(dev, "Unable to map admin function CSRs, aborting\n"); 2459 err = -ENOMEM; 2460 goto err_release_regions; 2461 } 2462 2463 /* Store module params in rvu structure */ 2464 rvu_update_module_params(rvu); 2465 2466 /* Check which blocks the HW supports */ 2467 rvu_check_block_implemented(rvu); 2468 2469 rvu_reset_all_blocks(rvu); 2470 2471 rvu_setup_hw_capabilities(rvu); 2472 2473 err = rvu_setup_hw_resources(rvu); 2474 if (err) 2475 goto err_release_regions; 2476 2477 /* Init mailbox btw AF and PFs */ 2478 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF, 2479 rvu->hw->total_pfs, rvu_afpf_mbox_handler, 2480 rvu_afpf_mbox_up_handler); 2481 if (err) 2482 goto err_hwsetup; 2483 2484 err = rvu_flr_init(rvu); 2485 if (err) 2486 goto err_mbox; 2487 2488 err = rvu_register_interrupts(rvu); 2489 if (err) 2490 goto err_flr; 2491 2492 /* Enable AF's VFs (if any) */ 2493 err = rvu_enable_sriov(rvu); 2494 if (err) 2495 goto err_irq; 2496 2497 /* Initialize debugfs */ 2498 rvu_dbg_init(rvu); 2499 2500 return 0; 2501 err_irq: 2502 rvu_unregister_interrupts(rvu); 2503 err_flr: 2504 rvu_flr_wq_destroy(rvu); 2505 err_mbox: 2506 rvu_mbox_destroy(&rvu->afpf_wq_info); 2507 err_hwsetup: 2508 rvu_cgx_exit(rvu); 2509 rvu_reset_all_blocks(rvu); 2510 rvu_free_hw_resources(rvu); 2511 err_release_regions: 2512 pci_release_regions(pdev); 2513 err_disable_device: 2514 pci_disable_device(pdev); 2515 err_freemem: 2516 pci_set_drvdata(pdev, NULL); 2517 devm_kfree(&pdev->dev, rvu->hw); 2518 devm_kfree(dev, rvu); 2519 return err; 2520 } 2521 2522 static void rvu_remove(struct pci_dev *pdev) 2523 { 2524 struct rvu *rvu = pci_get_drvdata(pdev); 2525 2526 rvu_dbg_exit(rvu); 2527 rvu_unregister_interrupts(rvu); 2528 rvu_flr_wq_destroy(rvu); 2529 rvu_cgx_exit(rvu); 2530 rvu_mbox_destroy(&rvu->afpf_wq_info); 2531 rvu_disable_sriov(rvu); 2532 rvu_reset_all_blocks(rvu); 2533 rvu_free_hw_resources(rvu); 2534 2535 pci_release_regions(pdev); 2536 pci_disable_device(pdev); 2537 pci_set_drvdata(pdev, NULL); 2538 2539 devm_kfree(&pdev->dev, rvu->hw); 2540 devm_kfree(&pdev->dev, rvu); 2541 } 2542 2543 static struct pci_driver rvu_driver = { 2544 .name = DRV_NAME, 2545 .id_table = rvu_id_table, 2546 .probe = rvu_probe, 2547 .remove = rvu_remove, 2548 }; 2549 2550 static int __init rvu_init_module(void) 2551 { 2552 int err; 2553 2554 pr_info("%s: %s\n", DRV_NAME, DRV_STRING); 2555 2556 err = pci_register_driver(&cgx_driver); 2557 if (err < 0) 2558 return err; 2559 2560 err = pci_register_driver(&rvu_driver); 2561 if (err < 0) 2562 pci_unregister_driver(&cgx_driver); 2563 2564 return err; 2565 } 2566 2567 static void __exit rvu_cleanup_module(void) 2568 { 2569 pci_unregister_driver(&rvu_driver); 2570 pci_unregister_driver(&cgx_driver); 2571 } 2572 2573 module_init(rvu_init_module); 2574 module_exit(rvu_cleanup_module); 2575