1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/interrupt.h> 13 #include <linux/delay.h> 14 #include <linux/irq.h> 15 #include <linux/pci.h> 16 #include <linux/sysfs.h> 17 18 #include "cgx.h" 19 #include "rvu.h" 20 #include "rvu_reg.h" 21 22 #define DRV_NAME "octeontx2-af" 23 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" 24 #define DRV_VERSION "1.0" 25 26 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); 27 28 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 29 struct rvu_block *block, int lf); 30 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 31 struct rvu_block *block, int lf); 32 33 /* Supported devices */ 34 static const struct pci_device_id rvu_id_table[] = { 35 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) }, 36 { 0, } /* end of table */ 37 }; 38 39 MODULE_AUTHOR("Marvell International Ltd."); 40 MODULE_DESCRIPTION(DRV_STRING); 41 MODULE_LICENSE("GPL v2"); 42 MODULE_VERSION(DRV_VERSION); 43 MODULE_DEVICE_TABLE(pci, rvu_id_table); 44 45 /* Poll a RVU block's register 'offset', for a 'zero' 46 * or 'nonzero' at bits specified by 'mask' 47 */ 48 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) 49 { 50 void __iomem *reg; 51 int timeout = 100; 52 u64 reg_val; 53 54 reg = rvu->afreg_base + ((block << 28) | offset); 55 while (timeout) { 56 reg_val = readq(reg); 57 if (zero && !(reg_val & mask)) 58 return 0; 59 if (!zero && (reg_val & mask)) 60 return 0; 61 usleep_range(1, 2); 62 timeout--; 63 } 64 return -EBUSY; 65 } 66 67 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc) 68 { 69 int id; 70 71 if (!rsrc->bmap) 72 return -EINVAL; 73 74 id = find_first_zero_bit(rsrc->bmap, rsrc->max); 75 if (id >= rsrc->max) 76 return -ENOSPC; 77 78 __set_bit(id, rsrc->bmap); 79 80 return id; 81 } 82 83 static int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc) 84 { 85 int start; 86 87 if (!rsrc->bmap) 88 return -EINVAL; 89 90 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); 91 if (start >= rsrc->max) 92 return -ENOSPC; 93 94 bitmap_set(rsrc->bmap, start, nrsrc); 95 return start; 96 } 97 98 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) 99 { 100 if (!rsrc->bmap) 101 return; 102 if (start >= rsrc->max) 103 return; 104 105 bitmap_clear(rsrc->bmap, start, nrsrc); 106 } 107 108 static bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc) 109 { 110 int start; 111 112 if (!rsrc->bmap) 113 return false; 114 115 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); 116 if (start >= rsrc->max) 117 return false; 118 119 return true; 120 } 121 122 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id) 123 { 124 if (!rsrc->bmap) 125 return; 126 127 __clear_bit(id, rsrc->bmap); 128 } 129 130 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc) 131 { 132 int used; 133 134 if (!rsrc->bmap) 135 return 0; 136 137 used = bitmap_weight(rsrc->bmap, rsrc->max); 138 return (rsrc->max - used); 139 } 140 141 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc) 142 { 143 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max), 144 sizeof(long), GFP_KERNEL); 145 if (!rsrc->bmap) 146 return -ENOMEM; 147 return 0; 148 } 149 150 /* Get block LF's HW index from a PF_FUNC's block slot number */ 151 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) 152 { 153 u16 match = 0; 154 int lf; 155 156 spin_lock(&rvu->rsrc_lock); 157 for (lf = 0; lf < block->lf.max; lf++) { 158 if (block->fn_map[lf] == pcifunc) { 159 if (slot == match) { 160 spin_unlock(&rvu->rsrc_lock); 161 return lf; 162 } 163 match++; 164 } 165 } 166 spin_unlock(&rvu->rsrc_lock); 167 return -ENODEV; 168 } 169 170 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E. 171 * Some silicon variants of OcteonTX2 supports 172 * multiple blocks of same type. 173 * 174 * @pcifunc has to be zero when no LF is yet attached. 175 */ 176 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) 177 { 178 int devnum, blkaddr = -ENODEV; 179 u64 cfg, reg; 180 bool is_pf; 181 182 switch (blktype) { 183 case BLKTYPE_NPA: 184 blkaddr = BLKADDR_NPA; 185 goto exit; 186 case BLKTYPE_NIX: 187 /* For now assume NIX0 */ 188 if (!pcifunc) { 189 blkaddr = BLKADDR_NIX0; 190 goto exit; 191 } 192 break; 193 case BLKTYPE_SSO: 194 blkaddr = BLKADDR_SSO; 195 goto exit; 196 case BLKTYPE_SSOW: 197 blkaddr = BLKADDR_SSOW; 198 goto exit; 199 case BLKTYPE_TIM: 200 blkaddr = BLKADDR_TIM; 201 goto exit; 202 case BLKTYPE_CPT: 203 /* For now assume CPT0 */ 204 if (!pcifunc) { 205 blkaddr = BLKADDR_CPT0; 206 goto exit; 207 } 208 break; 209 } 210 211 /* Check if this is a RVU PF or VF */ 212 if (pcifunc & RVU_PFVF_FUNC_MASK) { 213 is_pf = false; 214 devnum = rvu_get_hwvf(rvu, pcifunc); 215 } else { 216 is_pf = true; 217 devnum = rvu_get_pf(pcifunc); 218 } 219 220 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */ 221 if (blktype == BLKTYPE_NIX) { 222 reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG; 223 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 224 if (cfg) 225 blkaddr = BLKADDR_NIX0; 226 } 227 228 /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */ 229 if (blktype == BLKTYPE_CPT) { 230 reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG; 231 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 232 if (cfg) 233 blkaddr = BLKADDR_CPT0; 234 } 235 236 exit: 237 if (is_block_implemented(rvu->hw, blkaddr)) 238 return blkaddr; 239 return -ENODEV; 240 } 241 242 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, 243 struct rvu_block *block, u16 pcifunc, 244 u16 lf, bool attach) 245 { 246 int devnum, num_lfs = 0; 247 bool is_pf; 248 u64 reg; 249 250 if (lf >= block->lf.max) { 251 dev_err(&rvu->pdev->dev, 252 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n", 253 __func__, lf, block->name, block->lf.max); 254 return; 255 } 256 257 /* Check if this is for a RVU PF or VF */ 258 if (pcifunc & RVU_PFVF_FUNC_MASK) { 259 is_pf = false; 260 devnum = rvu_get_hwvf(rvu, pcifunc); 261 } else { 262 is_pf = true; 263 devnum = rvu_get_pf(pcifunc); 264 } 265 266 block->fn_map[lf] = attach ? pcifunc : 0; 267 268 switch (block->type) { 269 case BLKTYPE_NPA: 270 pfvf->npalf = attach ? true : false; 271 num_lfs = pfvf->npalf; 272 break; 273 case BLKTYPE_NIX: 274 pfvf->nixlf = attach ? true : false; 275 num_lfs = pfvf->nixlf; 276 break; 277 case BLKTYPE_SSO: 278 attach ? pfvf->sso++ : pfvf->sso--; 279 num_lfs = pfvf->sso; 280 break; 281 case BLKTYPE_SSOW: 282 attach ? pfvf->ssow++ : pfvf->ssow--; 283 num_lfs = pfvf->ssow; 284 break; 285 case BLKTYPE_TIM: 286 attach ? pfvf->timlfs++ : pfvf->timlfs--; 287 num_lfs = pfvf->timlfs; 288 break; 289 case BLKTYPE_CPT: 290 attach ? pfvf->cptlfs++ : pfvf->cptlfs--; 291 num_lfs = pfvf->cptlfs; 292 break; 293 } 294 295 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg; 296 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); 297 } 298 299 inline int rvu_get_pf(u16 pcifunc) 300 { 301 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 302 } 303 304 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) 305 { 306 u64 cfg; 307 308 /* Get numVFs attached to this PF and first HWVF */ 309 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 310 *numvfs = (cfg >> 12) & 0xFF; 311 *hwvf = cfg & 0xFFF; 312 } 313 314 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) 315 { 316 int pf, func; 317 u64 cfg; 318 319 pf = rvu_get_pf(pcifunc); 320 func = pcifunc & RVU_PFVF_FUNC_MASK; 321 322 /* Get first HWVF attached to this PF */ 323 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 324 325 return ((cfg & 0xFFF) + func - 1); 326 } 327 328 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) 329 { 330 /* Check if it is a PF or VF */ 331 if (pcifunc & RVU_PFVF_FUNC_MASK) 332 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; 333 else 334 return &rvu->pf[rvu_get_pf(pcifunc)]; 335 } 336 337 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr) 338 { 339 struct rvu_block *block; 340 341 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT) 342 return false; 343 344 block = &hw->block[blkaddr]; 345 return block->implemented; 346 } 347 348 static void rvu_check_block_implemented(struct rvu *rvu) 349 { 350 struct rvu_hwinfo *hw = rvu->hw; 351 struct rvu_block *block; 352 int blkid; 353 u64 cfg; 354 355 /* For each block check if 'implemented' bit is set */ 356 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 357 block = &hw->block[blkid]; 358 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid)); 359 if (cfg & BIT_ULL(11)) 360 block->implemented = true; 361 } 362 } 363 364 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) 365 { 366 struct rvu_block *block = &rvu->hw->block[blkaddr]; 367 368 if (!block->implemented) 369 return; 370 371 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); 372 rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); 373 } 374 375 static void rvu_reset_all_blocks(struct rvu *rvu) 376 { 377 /* Do a HW reset of all RVU blocks */ 378 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST); 379 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST); 380 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST); 381 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); 382 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); 383 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); 384 rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST); 385 rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST); 386 rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST); 387 } 388 389 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) 390 { 391 struct rvu_pfvf *pfvf; 392 u64 cfg; 393 int lf; 394 395 for (lf = 0; lf < block->lf.max; lf++) { 396 cfg = rvu_read64(rvu, block->addr, 397 block->lfcfg_reg | (lf << block->lfshift)); 398 if (!(cfg & BIT_ULL(63))) 399 continue; 400 401 /* Set this resource as being used */ 402 __set_bit(lf, block->lf.bmap); 403 404 /* Get, to whom this LF is attached */ 405 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF); 406 rvu_update_rsrc_map(rvu, pfvf, block, 407 (cfg >> 8) & 0xFFFF, lf, true); 408 409 /* Set start MSIX vector for this LF within this PF/VF */ 410 rvu_set_msix_offset(rvu, pfvf, block, lf); 411 } 412 } 413 414 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf) 415 { 416 int min_vecs; 417 418 if (!vf) 419 goto check_pf; 420 421 if (!nvecs) { 422 dev_warn(rvu->dev, 423 "PF%d:VF%d is configured with zero msix vectors, %d\n", 424 pf, vf - 1, nvecs); 425 } 426 return; 427 428 check_pf: 429 if (pf == 0) 430 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT; 431 else 432 min_vecs = RVU_PF_INT_VEC_CNT; 433 434 if (!(nvecs < min_vecs)) 435 return; 436 dev_warn(rvu->dev, 437 "PF%d is configured with too few vectors, %d, min is %d\n", 438 pf, nvecs, min_vecs); 439 } 440 441 static int rvu_setup_msix_resources(struct rvu *rvu) 442 { 443 struct rvu_hwinfo *hw = rvu->hw; 444 int pf, vf, numvfs, hwvf, err; 445 int nvecs, offset, max_msix; 446 struct rvu_pfvf *pfvf; 447 u64 cfg, phy_addr; 448 dma_addr_t iova; 449 450 for (pf = 0; pf < hw->total_pfs; pf++) { 451 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 452 /* If PF is not enabled, nothing to do */ 453 if (!((cfg >> 20) & 0x01)) 454 continue; 455 456 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 457 458 pfvf = &rvu->pf[pf]; 459 /* Get num of MSIX vectors attached to this PF */ 460 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf)); 461 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1; 462 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0); 463 464 /* Alloc msix bitmap for this PF */ 465 err = rvu_alloc_bitmap(&pfvf->msix); 466 if (err) 467 return err; 468 469 /* Allocate memory for MSIX vector to RVU block LF mapping */ 470 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, 471 sizeof(u16), GFP_KERNEL); 472 if (!pfvf->msix_lfmap) 473 return -ENOMEM; 474 475 /* For PF0 (AF) firmware will set msix vector offsets for 476 * AF, block AF and PF0_INT vectors, so jump to VFs. 477 */ 478 if (!pf) 479 goto setup_vfmsix; 480 481 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors. 482 * These are allocated on driver init and never freed, 483 * so no need to set 'msix_lfmap' for these. 484 */ 485 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf)); 486 nvecs = (cfg >> 12) & 0xFF; 487 cfg &= ~0x7FFULL; 488 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 489 rvu_write64(rvu, BLKADDR_RVUM, 490 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset); 491 setup_vfmsix: 492 /* Alloc msix bitmap for VFs */ 493 for (vf = 0; vf < numvfs; vf++) { 494 pfvf = &rvu->hwvf[hwvf + vf]; 495 /* Get num of MSIX vectors attached to this VF */ 496 cfg = rvu_read64(rvu, BLKADDR_RVUM, 497 RVU_PRIV_PFX_MSIX_CFG(pf)); 498 pfvf->msix.max = (cfg & 0xFFF) + 1; 499 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1); 500 501 /* Alloc msix bitmap for this VF */ 502 err = rvu_alloc_bitmap(&pfvf->msix); 503 if (err) 504 return err; 505 506 pfvf->msix_lfmap = 507 devm_kcalloc(rvu->dev, pfvf->msix.max, 508 sizeof(u16), GFP_KERNEL); 509 if (!pfvf->msix_lfmap) 510 return -ENOMEM; 511 512 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors. 513 * These are allocated on driver init and never freed, 514 * so no need to set 'msix_lfmap' for these. 515 */ 516 cfg = rvu_read64(rvu, BLKADDR_RVUM, 517 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf)); 518 nvecs = (cfg >> 12) & 0xFF; 519 cfg &= ~0x7FFULL; 520 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 521 rvu_write64(rvu, BLKADDR_RVUM, 522 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf), 523 cfg | offset); 524 } 525 } 526 527 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence 528 * create a IOMMU mapping for the physcial address configured by 529 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA. 530 */ 531 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 532 max_msix = cfg & 0xFFFFF; 533 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE); 534 iova = dma_map_resource(rvu->dev, phy_addr, 535 max_msix * PCI_MSIX_ENTRY_SIZE, 536 DMA_BIDIRECTIONAL, 0); 537 538 if (dma_mapping_error(rvu->dev, iova)) 539 return -ENOMEM; 540 541 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova); 542 rvu->msix_base_iova = iova; 543 544 return 0; 545 } 546 547 static void rvu_free_hw_resources(struct rvu *rvu) 548 { 549 struct rvu_hwinfo *hw = rvu->hw; 550 struct rvu_block *block; 551 struct rvu_pfvf *pfvf; 552 int id, max_msix; 553 u64 cfg; 554 555 /* Free block LF bitmaps */ 556 for (id = 0; id < BLK_COUNT; id++) { 557 block = &hw->block[id]; 558 kfree(block->lf.bmap); 559 } 560 561 /* Free MSIX bitmaps */ 562 for (id = 0; id < hw->total_pfs; id++) { 563 pfvf = &rvu->pf[id]; 564 kfree(pfvf->msix.bmap); 565 } 566 567 for (id = 0; id < hw->total_vfs; id++) { 568 pfvf = &rvu->hwvf[id]; 569 kfree(pfvf->msix.bmap); 570 } 571 572 /* Unmap MSIX vector base IOVA mapping */ 573 if (!rvu->msix_base_iova) 574 return; 575 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 576 max_msix = cfg & 0xFFFFF; 577 dma_unmap_resource(rvu->dev, rvu->msix_base_iova, 578 max_msix * PCI_MSIX_ENTRY_SIZE, 579 DMA_BIDIRECTIONAL, 0); 580 } 581 582 static int rvu_setup_hw_resources(struct rvu *rvu) 583 { 584 struct rvu_hwinfo *hw = rvu->hw; 585 struct rvu_block *block; 586 int blkid, err; 587 u64 cfg; 588 589 /* Get HW supported max RVU PF & VF count */ 590 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 591 hw->total_pfs = (cfg >> 32) & 0xFF; 592 hw->total_vfs = (cfg >> 20) & 0xFFF; 593 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF; 594 595 /* Init NPA LF's bitmap */ 596 block = &hw->block[BLKADDR_NPA]; 597 if (!block->implemented) 598 goto nix; 599 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST); 600 block->lf.max = (cfg >> 16) & 0xFFF; 601 block->addr = BLKADDR_NPA; 602 block->type = BLKTYPE_NPA; 603 block->lfshift = 8; 604 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG; 605 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG; 606 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG; 607 block->lfcfg_reg = NPA_PRIV_LFX_CFG; 608 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG; 609 block->lfreset_reg = NPA_AF_LF_RST; 610 sprintf(block->name, "NPA"); 611 err = rvu_alloc_bitmap(&block->lf); 612 if (err) 613 return err; 614 615 nix: 616 /* Init NIX LF's bitmap */ 617 block = &hw->block[BLKADDR_NIX0]; 618 if (!block->implemented) 619 goto sso; 620 cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2); 621 block->lf.max = cfg & 0xFFF; 622 block->addr = BLKADDR_NIX0; 623 block->type = BLKTYPE_NIX; 624 block->lfshift = 8; 625 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG; 626 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG; 627 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG; 628 block->lfcfg_reg = NIX_PRIV_LFX_CFG; 629 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; 630 block->lfreset_reg = NIX_AF_LF_RST; 631 sprintf(block->name, "NIX"); 632 err = rvu_alloc_bitmap(&block->lf); 633 if (err) 634 return err; 635 636 sso: 637 /* Init SSO group's bitmap */ 638 block = &hw->block[BLKADDR_SSO]; 639 if (!block->implemented) 640 goto ssow; 641 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST); 642 block->lf.max = cfg & 0xFFFF; 643 block->addr = BLKADDR_SSO; 644 block->type = BLKTYPE_SSO; 645 block->multislot = true; 646 block->lfshift = 3; 647 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG; 648 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG; 649 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG; 650 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG; 651 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG; 652 block->lfreset_reg = SSO_AF_LF_HWGRP_RST; 653 sprintf(block->name, "SSO GROUP"); 654 err = rvu_alloc_bitmap(&block->lf); 655 if (err) 656 return err; 657 658 ssow: 659 /* Init SSO workslot's bitmap */ 660 block = &hw->block[BLKADDR_SSOW]; 661 if (!block->implemented) 662 goto tim; 663 block->lf.max = (cfg >> 56) & 0xFF; 664 block->addr = BLKADDR_SSOW; 665 block->type = BLKTYPE_SSOW; 666 block->multislot = true; 667 block->lfshift = 3; 668 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG; 669 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG; 670 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG; 671 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG; 672 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG; 673 block->lfreset_reg = SSOW_AF_LF_HWS_RST; 674 sprintf(block->name, "SSOWS"); 675 err = rvu_alloc_bitmap(&block->lf); 676 if (err) 677 return err; 678 679 tim: 680 /* Init TIM LF's bitmap */ 681 block = &hw->block[BLKADDR_TIM]; 682 if (!block->implemented) 683 goto cpt; 684 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST); 685 block->lf.max = cfg & 0xFFFF; 686 block->addr = BLKADDR_TIM; 687 block->type = BLKTYPE_TIM; 688 block->multislot = true; 689 block->lfshift = 3; 690 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG; 691 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG; 692 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG; 693 block->lfcfg_reg = TIM_PRIV_LFX_CFG; 694 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG; 695 block->lfreset_reg = TIM_AF_LF_RST; 696 sprintf(block->name, "TIM"); 697 err = rvu_alloc_bitmap(&block->lf); 698 if (err) 699 return err; 700 701 cpt: 702 /* Init CPT LF's bitmap */ 703 block = &hw->block[BLKADDR_CPT0]; 704 if (!block->implemented) 705 goto init; 706 cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0); 707 block->lf.max = cfg & 0xFF; 708 block->addr = BLKADDR_CPT0; 709 block->type = BLKTYPE_CPT; 710 block->multislot = true; 711 block->lfshift = 3; 712 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG; 713 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG; 714 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG; 715 block->lfcfg_reg = CPT_PRIV_LFX_CFG; 716 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; 717 block->lfreset_reg = CPT_AF_LF_RST; 718 sprintf(block->name, "CPT"); 719 err = rvu_alloc_bitmap(&block->lf); 720 if (err) 721 return err; 722 723 init: 724 /* Allocate memory for PFVF data */ 725 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, 726 sizeof(struct rvu_pfvf), GFP_KERNEL); 727 if (!rvu->pf) 728 return -ENOMEM; 729 730 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, 731 sizeof(struct rvu_pfvf), GFP_KERNEL); 732 if (!rvu->hwvf) 733 return -ENOMEM; 734 735 spin_lock_init(&rvu->rsrc_lock); 736 737 err = rvu_setup_msix_resources(rvu); 738 if (err) 739 return err; 740 741 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 742 block = &hw->block[blkid]; 743 if (!block->lf.bmap) 744 continue; 745 746 /* Allocate memory for block LF/slot to pcifunc mapping info */ 747 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max, 748 sizeof(u16), GFP_KERNEL); 749 if (!block->fn_map) 750 return -ENOMEM; 751 752 /* Scan all blocks to check if low level firmware has 753 * already provisioned any of the resources to a PF/VF. 754 */ 755 rvu_scan_block(rvu, block); 756 } 757 758 return 0; 759 } 760 761 static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req, 762 struct ready_msg_rsp *rsp) 763 { 764 return 0; 765 } 766 767 /* Get current count of a RVU block's LF/slots 768 * provisioned to a given RVU func. 769 */ 770 static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype) 771 { 772 switch (blktype) { 773 case BLKTYPE_NPA: 774 return pfvf->npalf ? 1 : 0; 775 case BLKTYPE_NIX: 776 return pfvf->nixlf ? 1 : 0; 777 case BLKTYPE_SSO: 778 return pfvf->sso; 779 case BLKTYPE_SSOW: 780 return pfvf->ssow; 781 case BLKTYPE_TIM: 782 return pfvf->timlfs; 783 case BLKTYPE_CPT: 784 return pfvf->cptlfs; 785 } 786 return 0; 787 } 788 789 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, 790 int pcifunc, int slot) 791 { 792 u64 val; 793 794 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13); 795 rvu_write64(rvu, block->addr, block->lookup_reg, val); 796 /* Wait for the lookup to finish */ 797 /* TODO: put some timeout here */ 798 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13)) 799 ; 800 801 val = rvu_read64(rvu, block->addr, block->lookup_reg); 802 803 /* Check LF valid bit */ 804 if (!(val & (1ULL << 12))) 805 return -1; 806 807 return (val & 0xFFF); 808 } 809 810 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) 811 { 812 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 813 struct rvu_hwinfo *hw = rvu->hw; 814 struct rvu_block *block; 815 int slot, lf, num_lfs; 816 int blkaddr; 817 818 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc); 819 if (blkaddr < 0) 820 return; 821 822 block = &hw->block[blkaddr]; 823 824 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type); 825 if (!num_lfs) 826 return; 827 828 for (slot = 0; slot < num_lfs; slot++) { 829 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot); 830 if (lf < 0) /* This should never happen */ 831 continue; 832 833 /* Disable the LF */ 834 rvu_write64(rvu, blkaddr, block->lfcfg_reg | 835 (lf << block->lfshift), 0x00ULL); 836 837 /* Update SW maintained mapping info as well */ 838 rvu_update_rsrc_map(rvu, pfvf, block, 839 pcifunc, lf, false); 840 841 /* Free the resource */ 842 rvu_free_rsrc(&block->lf, lf); 843 844 /* Clear MSIX vector offset for this LF */ 845 rvu_clear_msix_offset(rvu, pfvf, block, lf); 846 } 847 } 848 849 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, 850 u16 pcifunc) 851 { 852 struct rvu_hwinfo *hw = rvu->hw; 853 bool is_pf, detach_all = true; 854 struct rvu_block *block; 855 int devnum, blkid; 856 857 /* Check if this is for a RVU PF or VF */ 858 if (pcifunc & RVU_PFVF_FUNC_MASK) { 859 is_pf = false; 860 devnum = rvu_get_hwvf(rvu, pcifunc); 861 } else { 862 is_pf = true; 863 devnum = rvu_get_pf(pcifunc); 864 } 865 866 spin_lock(&rvu->rsrc_lock); 867 868 /* Check for partial resource detach */ 869 if (detach && detach->partial) 870 detach_all = false; 871 872 /* Check for RVU block's LFs attached to this func, 873 * if so, detach them. 874 */ 875 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 876 block = &hw->block[blkid]; 877 if (!block->lf.bmap) 878 continue; 879 if (!detach_all && detach) { 880 if (blkid == BLKADDR_NPA && !detach->npalf) 881 continue; 882 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf) 883 continue; 884 else if ((blkid == BLKADDR_SSO) && !detach->sso) 885 continue; 886 else if ((blkid == BLKADDR_SSOW) && !detach->ssow) 887 continue; 888 else if ((blkid == BLKADDR_TIM) && !detach->timlfs) 889 continue; 890 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs) 891 continue; 892 } 893 rvu_detach_block(rvu, pcifunc, block->type); 894 } 895 896 spin_unlock(&rvu->rsrc_lock); 897 return 0; 898 } 899 900 static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu, 901 struct rsrc_detach *detach, 902 struct msg_rsp *rsp) 903 { 904 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); 905 } 906 907 static void rvu_attach_block(struct rvu *rvu, int pcifunc, 908 int blktype, int num_lfs) 909 { 910 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 911 struct rvu_hwinfo *hw = rvu->hw; 912 struct rvu_block *block; 913 int slot, lf; 914 int blkaddr; 915 u64 cfg; 916 917 if (!num_lfs) 918 return; 919 920 blkaddr = rvu_get_blkaddr(rvu, blktype, 0); 921 if (blkaddr < 0) 922 return; 923 924 block = &hw->block[blkaddr]; 925 if (!block->lf.bmap) 926 return; 927 928 for (slot = 0; slot < num_lfs; slot++) { 929 /* Allocate the resource */ 930 lf = rvu_alloc_rsrc(&block->lf); 931 if (lf < 0) 932 return; 933 934 cfg = (1ULL << 63) | (pcifunc << 8) | slot; 935 rvu_write64(rvu, blkaddr, block->lfcfg_reg | 936 (lf << block->lfshift), cfg); 937 rvu_update_rsrc_map(rvu, pfvf, block, 938 pcifunc, lf, true); 939 940 /* Set start MSIX vector for this LF within this PF/VF */ 941 rvu_set_msix_offset(rvu, pfvf, block, lf); 942 } 943 } 944 945 static int rvu_check_rsrc_availability(struct rvu *rvu, 946 struct rsrc_attach *req, u16 pcifunc) 947 { 948 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 949 struct rvu_hwinfo *hw = rvu->hw; 950 struct rvu_block *block; 951 int free_lfs, mappedlfs; 952 953 /* Only one NPA LF can be attached */ 954 if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) { 955 block = &hw->block[BLKADDR_NPA]; 956 free_lfs = rvu_rsrc_free_count(&block->lf); 957 if (!free_lfs) 958 goto fail; 959 } else if (req->npalf) { 960 dev_err(&rvu->pdev->dev, 961 "Func 0x%x: Invalid req, already has NPA\n", 962 pcifunc); 963 return -EINVAL; 964 } 965 966 /* Only one NIX LF can be attached */ 967 if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) { 968 block = &hw->block[BLKADDR_NIX0]; 969 free_lfs = rvu_rsrc_free_count(&block->lf); 970 if (!free_lfs) 971 goto fail; 972 } else if (req->nixlf) { 973 dev_err(&rvu->pdev->dev, 974 "Func 0x%x: Invalid req, already has NIX\n", 975 pcifunc); 976 return -EINVAL; 977 } 978 979 if (req->sso) { 980 block = &hw->block[BLKADDR_SSO]; 981 /* Is request within limits ? */ 982 if (req->sso > block->lf.max) { 983 dev_err(&rvu->pdev->dev, 984 "Func 0x%x: Invalid SSO req, %d > max %d\n", 985 pcifunc, req->sso, block->lf.max); 986 return -EINVAL; 987 } 988 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); 989 free_lfs = rvu_rsrc_free_count(&block->lf); 990 /* Check if additional resources are available */ 991 if (req->sso > mappedlfs && 992 ((req->sso - mappedlfs) > free_lfs)) 993 goto fail; 994 } 995 996 if (req->ssow) { 997 block = &hw->block[BLKADDR_SSOW]; 998 if (req->ssow > block->lf.max) { 999 dev_err(&rvu->pdev->dev, 1000 "Func 0x%x: Invalid SSOW req, %d > max %d\n", 1001 pcifunc, req->sso, block->lf.max); 1002 return -EINVAL; 1003 } 1004 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); 1005 free_lfs = rvu_rsrc_free_count(&block->lf); 1006 if (req->ssow > mappedlfs && 1007 ((req->ssow - mappedlfs) > free_lfs)) 1008 goto fail; 1009 } 1010 1011 if (req->timlfs) { 1012 block = &hw->block[BLKADDR_TIM]; 1013 if (req->timlfs > block->lf.max) { 1014 dev_err(&rvu->pdev->dev, 1015 "Func 0x%x: Invalid TIMLF req, %d > max %d\n", 1016 pcifunc, req->timlfs, block->lf.max); 1017 return -EINVAL; 1018 } 1019 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); 1020 free_lfs = rvu_rsrc_free_count(&block->lf); 1021 if (req->timlfs > mappedlfs && 1022 ((req->timlfs - mappedlfs) > free_lfs)) 1023 goto fail; 1024 } 1025 1026 if (req->cptlfs) { 1027 block = &hw->block[BLKADDR_CPT0]; 1028 if (req->cptlfs > block->lf.max) { 1029 dev_err(&rvu->pdev->dev, 1030 "Func 0x%x: Invalid CPTLF req, %d > max %d\n", 1031 pcifunc, req->cptlfs, block->lf.max); 1032 return -EINVAL; 1033 } 1034 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); 1035 free_lfs = rvu_rsrc_free_count(&block->lf); 1036 if (req->cptlfs > mappedlfs && 1037 ((req->cptlfs - mappedlfs) > free_lfs)) 1038 goto fail; 1039 } 1040 1041 return 0; 1042 1043 fail: 1044 dev_info(rvu->dev, "Request for %s failed\n", block->name); 1045 return -ENOSPC; 1046 } 1047 1048 static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu, 1049 struct rsrc_attach *attach, 1050 struct msg_rsp *rsp) 1051 { 1052 u16 pcifunc = attach->hdr.pcifunc; 1053 int devnum, err; 1054 bool is_pf; 1055 1056 /* If first request, detach all existing attached resources */ 1057 if (!attach->modify) 1058 rvu_detach_rsrcs(rvu, NULL, pcifunc); 1059 1060 /* Check if this is for a RVU PF or VF */ 1061 if (pcifunc & RVU_PFVF_FUNC_MASK) { 1062 is_pf = false; 1063 devnum = rvu_get_hwvf(rvu, pcifunc); 1064 } else { 1065 is_pf = true; 1066 devnum = rvu_get_pf(pcifunc); 1067 } 1068 1069 spin_lock(&rvu->rsrc_lock); 1070 1071 /* Check if the request can be accommodated */ 1072 err = rvu_check_rsrc_availability(rvu, attach, pcifunc); 1073 if (err) 1074 goto exit; 1075 1076 /* Now attach the requested resources */ 1077 if (attach->npalf) 1078 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1); 1079 1080 if (attach->nixlf) 1081 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1); 1082 1083 if (attach->sso) { 1084 /* RVU func doesn't know which exact LF or slot is attached 1085 * to it, it always sees as slot 0,1,2. So for a 'modify' 1086 * request, simply detach all existing attached LFs/slots 1087 * and attach a fresh. 1088 */ 1089 if (attach->modify) 1090 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO); 1091 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso); 1092 } 1093 1094 if (attach->ssow) { 1095 if (attach->modify) 1096 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW); 1097 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow); 1098 } 1099 1100 if (attach->timlfs) { 1101 if (attach->modify) 1102 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM); 1103 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs); 1104 } 1105 1106 if (attach->cptlfs) { 1107 if (attach->modify) 1108 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT); 1109 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs); 1110 } 1111 1112 exit: 1113 spin_unlock(&rvu->rsrc_lock); 1114 return err; 1115 } 1116 1117 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1118 int blkaddr, int lf) 1119 { 1120 u16 vec; 1121 1122 if (lf < 0) 1123 return MSIX_VECTOR_INVALID; 1124 1125 for (vec = 0; vec < pfvf->msix.max; vec++) { 1126 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf)) 1127 return vec; 1128 } 1129 return MSIX_VECTOR_INVALID; 1130 } 1131 1132 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1133 struct rvu_block *block, int lf) 1134 { 1135 u16 nvecs, vec, offset; 1136 u64 cfg; 1137 1138 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | 1139 (lf << block->lfshift)); 1140 nvecs = (cfg >> 12) & 0xFF; 1141 1142 /* Check and alloc MSIX vectors, must be contiguous */ 1143 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs)) 1144 return; 1145 1146 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 1147 1148 /* Config MSIX offset in LF */ 1149 rvu_write64(rvu, block->addr, block->msixcfg_reg | 1150 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset); 1151 1152 /* Update the bitmap as well */ 1153 for (vec = 0; vec < nvecs; vec++) 1154 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf); 1155 } 1156 1157 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1158 struct rvu_block *block, int lf) 1159 { 1160 u16 nvecs, vec, offset; 1161 u64 cfg; 1162 1163 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | 1164 (lf << block->lfshift)); 1165 nvecs = (cfg >> 12) & 0xFF; 1166 1167 /* Clear MSIX offset in LF */ 1168 rvu_write64(rvu, block->addr, block->msixcfg_reg | 1169 (lf << block->lfshift), cfg & ~0x7FFULL); 1170 1171 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf); 1172 1173 /* Update the mapping */ 1174 for (vec = 0; vec < nvecs; vec++) 1175 pfvf->msix_lfmap[offset + vec] = 0; 1176 1177 /* Free the same in MSIX bitmap */ 1178 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); 1179 } 1180 1181 static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req, 1182 struct msix_offset_rsp *rsp) 1183 { 1184 struct rvu_hwinfo *hw = rvu->hw; 1185 u16 pcifunc = req->hdr.pcifunc; 1186 struct rvu_pfvf *pfvf; 1187 int lf, slot; 1188 1189 pfvf = rvu_get_pfvf(rvu, pcifunc); 1190 if (!pfvf->msix.bmap) 1191 return 0; 1192 1193 /* Set MSIX offsets for each block's LFs attached to this PF/VF */ 1194 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0); 1195 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf); 1196 1197 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0); 1198 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf); 1199 1200 rsp->sso = pfvf->sso; 1201 for (slot = 0; slot < rsp->sso; slot++) { 1202 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot); 1203 rsp->sso_msixoff[slot] = 1204 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf); 1205 } 1206 1207 rsp->ssow = pfvf->ssow; 1208 for (slot = 0; slot < rsp->ssow; slot++) { 1209 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot); 1210 rsp->ssow_msixoff[slot] = 1211 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf); 1212 } 1213 1214 rsp->timlfs = pfvf->timlfs; 1215 for (slot = 0; slot < rsp->timlfs; slot++) { 1216 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot); 1217 rsp->timlf_msixoff[slot] = 1218 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf); 1219 } 1220 1221 rsp->cptlfs = pfvf->cptlfs; 1222 for (slot = 0; slot < rsp->cptlfs; slot++) { 1223 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot); 1224 rsp->cptlf_msixoff[slot] = 1225 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf); 1226 } 1227 return 0; 1228 } 1229 1230 static int rvu_process_mbox_msg(struct rvu *rvu, int devid, 1231 struct mbox_msghdr *req) 1232 { 1233 /* Check if valid, if not reply with a invalid msg */ 1234 if (req->sig != OTX2_MBOX_REQ_SIG) 1235 goto bad_message; 1236 1237 switch (req->id) { 1238 #define M(_name, _id, _req_type, _rsp_type) \ 1239 case _id: { \ 1240 struct _rsp_type *rsp; \ 1241 int err; \ 1242 \ 1243 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ 1244 &rvu->mbox, devid, \ 1245 sizeof(struct _rsp_type)); \ 1246 if (rsp) { \ 1247 rsp->hdr.id = _id; \ 1248 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ 1249 rsp->hdr.pcifunc = req->pcifunc; \ 1250 rsp->hdr.rc = 0; \ 1251 } \ 1252 \ 1253 err = rvu_mbox_handler_ ## _name(rvu, \ 1254 (struct _req_type *)req, \ 1255 rsp); \ 1256 if (rsp && err) \ 1257 rsp->hdr.rc = err; \ 1258 \ 1259 return rsp ? err : -ENOMEM; \ 1260 } 1261 MBOX_MESSAGES 1262 #undef M 1263 break; 1264 bad_message: 1265 default: 1266 otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc, 1267 req->id); 1268 return -ENODEV; 1269 } 1270 } 1271 1272 static void rvu_mbox_handler(struct work_struct *work) 1273 { 1274 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1275 struct rvu *rvu = mwork->rvu; 1276 struct otx2_mbox_dev *mdev; 1277 struct mbox_hdr *req_hdr; 1278 struct mbox_msghdr *msg; 1279 struct otx2_mbox *mbox; 1280 int offset, id, err; 1281 u16 pf; 1282 1283 mbox = &rvu->mbox; 1284 pf = mwork - rvu->mbox_wrk; 1285 mdev = &mbox->dev[pf]; 1286 1287 /* Process received mbox messages */ 1288 req_hdr = mdev->mbase + mbox->rx_start; 1289 if (req_hdr->num_msgs == 0) 1290 return; 1291 1292 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 1293 1294 for (id = 0; id < req_hdr->num_msgs; id++) { 1295 msg = mdev->mbase + offset; 1296 1297 /* Set which PF sent this message based on mbox IRQ */ 1298 msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); 1299 msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT); 1300 err = rvu_process_mbox_msg(rvu, pf, msg); 1301 if (!err) { 1302 offset = mbox->rx_start + msg->next_msgoff; 1303 continue; 1304 } 1305 1306 if (msg->pcifunc & RVU_PFVF_FUNC_MASK) 1307 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", 1308 err, otx2_mbox_id2name(msg->id), msg->id, pf, 1309 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); 1310 else 1311 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", 1312 err, otx2_mbox_id2name(msg->id), msg->id, pf); 1313 } 1314 1315 /* Send mbox responses to PF */ 1316 otx2_mbox_msg_send(mbox, pf); 1317 } 1318 1319 static int rvu_mbox_init(struct rvu *rvu) 1320 { 1321 struct rvu_hwinfo *hw = rvu->hw; 1322 void __iomem *hwbase = NULL; 1323 struct rvu_work *mwork; 1324 u64 bar4_addr; 1325 int err, pf; 1326 1327 rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox", 1328 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1329 hw->total_pfs); 1330 if (!rvu->mbox_wq) 1331 return -ENOMEM; 1332 1333 rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs, 1334 sizeof(struct rvu_work), GFP_KERNEL); 1335 if (!rvu->mbox_wrk) { 1336 err = -ENOMEM; 1337 goto exit; 1338 } 1339 1340 /* Map mbox region shared with PFs */ 1341 bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); 1342 /* Mailbox is a reserved memory (in RAM) region shared between 1343 * RVU devices, shouldn't be mapped as device memory to allow 1344 * unaligned accesses. 1345 */ 1346 hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs); 1347 if (!hwbase) { 1348 dev_err(rvu->dev, "Unable to map mailbox region\n"); 1349 err = -ENOMEM; 1350 goto exit; 1351 } 1352 1353 err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base, 1354 MBOX_DIR_AFPF, hw->total_pfs); 1355 if (err) 1356 goto exit; 1357 1358 for (pf = 0; pf < hw->total_pfs; pf++) { 1359 mwork = &rvu->mbox_wrk[pf]; 1360 mwork->rvu = rvu; 1361 INIT_WORK(&mwork->work, rvu_mbox_handler); 1362 } 1363 1364 return 0; 1365 exit: 1366 if (hwbase) 1367 iounmap((void __iomem *)hwbase); 1368 destroy_workqueue(rvu->mbox_wq); 1369 return err; 1370 } 1371 1372 static void rvu_mbox_destroy(struct rvu *rvu) 1373 { 1374 if (rvu->mbox_wq) { 1375 flush_workqueue(rvu->mbox_wq); 1376 destroy_workqueue(rvu->mbox_wq); 1377 rvu->mbox_wq = NULL; 1378 } 1379 1380 if (rvu->mbox.hwbase) 1381 iounmap((void __iomem *)rvu->mbox.hwbase); 1382 1383 otx2_mbox_destroy(&rvu->mbox); 1384 } 1385 1386 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) 1387 { 1388 struct rvu *rvu = (struct rvu *)rvu_irq; 1389 struct otx2_mbox_dev *mdev; 1390 struct otx2_mbox *mbox; 1391 struct mbox_hdr *hdr; 1392 u64 intr; 1393 u8 pf; 1394 1395 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); 1396 /* Clear interrupts */ 1397 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); 1398 1399 /* Sync with mbox memory region */ 1400 smp_wmb(); 1401 1402 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 1403 if (intr & (1ULL << pf)) { 1404 mbox = &rvu->mbox; 1405 mdev = &mbox->dev[pf]; 1406 hdr = mdev->mbase + mbox->rx_start; 1407 if (hdr->num_msgs) 1408 queue_work(rvu->mbox_wq, 1409 &rvu->mbox_wrk[pf].work); 1410 } 1411 } 1412 1413 return IRQ_HANDLED; 1414 } 1415 1416 static void rvu_enable_mbox_intr(struct rvu *rvu) 1417 { 1418 struct rvu_hwinfo *hw = rvu->hw; 1419 1420 /* Clear spurious irqs, if any */ 1421 rvu_write64(rvu, BLKADDR_RVUM, 1422 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); 1423 1424 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ 1425 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S, 1426 INTR_MASK(hw->total_pfs) & ~1ULL); 1427 } 1428 1429 static void rvu_unregister_interrupts(struct rvu *rvu) 1430 { 1431 int irq; 1432 1433 /* Disable the Mbox interrupt */ 1434 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, 1435 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 1436 1437 for (irq = 0; irq < rvu->num_vec; irq++) { 1438 if (rvu->irq_allocated[irq]) 1439 free_irq(pci_irq_vector(rvu->pdev, irq), rvu); 1440 } 1441 1442 pci_free_irq_vectors(rvu->pdev); 1443 rvu->num_vec = 0; 1444 } 1445 1446 static int rvu_register_interrupts(struct rvu *rvu) 1447 { 1448 int ret; 1449 1450 rvu->num_vec = pci_msix_vec_count(rvu->pdev); 1451 1452 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec, 1453 NAME_SIZE, GFP_KERNEL); 1454 if (!rvu->irq_name) 1455 return -ENOMEM; 1456 1457 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec, 1458 sizeof(bool), GFP_KERNEL); 1459 if (!rvu->irq_allocated) 1460 return -ENOMEM; 1461 1462 /* Enable MSI-X */ 1463 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec, 1464 rvu->num_vec, PCI_IRQ_MSIX); 1465 if (ret < 0) { 1466 dev_err(rvu->dev, 1467 "RVUAF: Request for %d msix vectors failed, ret %d\n", 1468 rvu->num_vec, ret); 1469 return ret; 1470 } 1471 1472 /* Register mailbox interrupt handler */ 1473 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); 1474 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), 1475 rvu_mbox_intr_handler, 0, 1476 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); 1477 if (ret) { 1478 dev_err(rvu->dev, 1479 "RVUAF: IRQ registration failed for mbox irq\n"); 1480 goto fail; 1481 } 1482 1483 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; 1484 1485 /* Enable mailbox interrupts from all PFs */ 1486 rvu_enable_mbox_intr(rvu); 1487 1488 return 0; 1489 1490 fail: 1491 pci_free_irq_vectors(rvu->pdev); 1492 return ret; 1493 } 1494 1495 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1496 { 1497 struct device *dev = &pdev->dev; 1498 struct rvu *rvu; 1499 int err; 1500 1501 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL); 1502 if (!rvu) 1503 return -ENOMEM; 1504 1505 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL); 1506 if (!rvu->hw) { 1507 devm_kfree(dev, rvu); 1508 return -ENOMEM; 1509 } 1510 1511 pci_set_drvdata(pdev, rvu); 1512 rvu->pdev = pdev; 1513 rvu->dev = &pdev->dev; 1514 1515 err = pci_enable_device(pdev); 1516 if (err) { 1517 dev_err(dev, "Failed to enable PCI device\n"); 1518 goto err_freemem; 1519 } 1520 1521 err = pci_request_regions(pdev, DRV_NAME); 1522 if (err) { 1523 dev_err(dev, "PCI request regions failed 0x%x\n", err); 1524 goto err_disable_device; 1525 } 1526 1527 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); 1528 if (err) { 1529 dev_err(dev, "Unable to set DMA mask\n"); 1530 goto err_release_regions; 1531 } 1532 1533 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); 1534 if (err) { 1535 dev_err(dev, "Unable to set consistent DMA mask\n"); 1536 goto err_release_regions; 1537 } 1538 1539 /* Map Admin function CSRs */ 1540 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0); 1541 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); 1542 if (!rvu->afreg_base || !rvu->pfreg_base) { 1543 dev_err(dev, "Unable to map admin function CSRs, aborting\n"); 1544 err = -ENOMEM; 1545 goto err_release_regions; 1546 } 1547 1548 /* Check which blocks the HW supports */ 1549 rvu_check_block_implemented(rvu); 1550 1551 rvu_reset_all_blocks(rvu); 1552 1553 err = rvu_setup_hw_resources(rvu); 1554 if (err) 1555 goto err_release_regions; 1556 1557 err = rvu_mbox_init(rvu); 1558 if (err) 1559 goto err_hwsetup; 1560 1561 err = rvu_cgx_probe(rvu); 1562 if (err) 1563 goto err_mbox; 1564 1565 err = rvu_register_interrupts(rvu); 1566 if (err) 1567 goto err_cgx; 1568 1569 return 0; 1570 err_cgx: 1571 rvu_cgx_wq_destroy(rvu); 1572 err_mbox: 1573 rvu_mbox_destroy(rvu); 1574 err_hwsetup: 1575 rvu_reset_all_blocks(rvu); 1576 rvu_free_hw_resources(rvu); 1577 err_release_regions: 1578 pci_release_regions(pdev); 1579 err_disable_device: 1580 pci_disable_device(pdev); 1581 err_freemem: 1582 pci_set_drvdata(pdev, NULL); 1583 devm_kfree(&pdev->dev, rvu->hw); 1584 devm_kfree(dev, rvu); 1585 return err; 1586 } 1587 1588 static void rvu_remove(struct pci_dev *pdev) 1589 { 1590 struct rvu *rvu = pci_get_drvdata(pdev); 1591 1592 rvu_unregister_interrupts(rvu); 1593 rvu_cgx_wq_destroy(rvu); 1594 rvu_mbox_destroy(rvu); 1595 rvu_reset_all_blocks(rvu); 1596 rvu_free_hw_resources(rvu); 1597 1598 pci_release_regions(pdev); 1599 pci_disable_device(pdev); 1600 pci_set_drvdata(pdev, NULL); 1601 1602 devm_kfree(&pdev->dev, rvu->hw); 1603 devm_kfree(&pdev->dev, rvu); 1604 } 1605 1606 static struct pci_driver rvu_driver = { 1607 .name = DRV_NAME, 1608 .id_table = rvu_id_table, 1609 .probe = rvu_probe, 1610 .remove = rvu_remove, 1611 }; 1612 1613 static int __init rvu_init_module(void) 1614 { 1615 int err; 1616 1617 pr_info("%s: %s\n", DRV_NAME, DRV_STRING); 1618 1619 err = pci_register_driver(&cgx_driver); 1620 if (err < 0) 1621 return err; 1622 1623 err = pci_register_driver(&rvu_driver); 1624 if (err < 0) 1625 pci_unregister_driver(&cgx_driver); 1626 1627 return err; 1628 } 1629 1630 static void __exit rvu_cleanup_module(void) 1631 { 1632 pci_unregister_driver(&rvu_driver); 1633 pci_unregister_driver(&cgx_driver); 1634 } 1635 1636 module_init(rvu_init_module); 1637 module_exit(rvu_cleanup_module); 1638