1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/interrupt.h> 13 #include <linux/delay.h> 14 #include <linux/irq.h> 15 #include <linux/pci.h> 16 #include <linux/sysfs.h> 17 18 #include "cgx.h" 19 #include "rvu.h" 20 #include "rvu_reg.h" 21 #include "ptp.h" 22 23 #include "rvu_trace.h" 24 25 #define DRV_NAME "rvu_af" 26 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" 27 28 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); 29 30 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 31 struct rvu_block *block, int lf); 32 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 33 struct rvu_block *block, int lf); 34 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc); 35 36 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, 37 int type, int num, 38 void (mbox_handler)(struct work_struct *), 39 void (mbox_up_handler)(struct work_struct *)); 40 enum { 41 TYPE_AFVF, 42 TYPE_AFPF, 43 }; 44 45 /* Supported devices */ 46 static const struct pci_device_id rvu_id_table[] = { 47 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) }, 48 { 0, } /* end of table */ 49 }; 50 51 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>"); 52 MODULE_DESCRIPTION(DRV_STRING); 53 MODULE_LICENSE("GPL v2"); 54 MODULE_DEVICE_TABLE(pci, rvu_id_table); 55 56 static char *mkex_profile; /* MKEX profile name */ 57 module_param(mkex_profile, charp, 0000); 58 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string"); 59 60 static char *kpu_profile; /* KPU profile name */ 61 module_param(kpu_profile, charp, 0000); 62 MODULE_PARM_DESC(kpu_profile, "KPU profile name string"); 63 64 static void rvu_setup_hw_capabilities(struct rvu *rvu) 65 { 66 struct rvu_hwinfo *hw = rvu->hw; 67 68 hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1; 69 hw->cap.nix_fixed_txschq_mapping = false; 70 hw->cap.nix_shaping = true; 71 hw->cap.nix_tx_link_bp = true; 72 hw->cap.nix_rx_multicast = true; 73 hw->rvu = rvu; 74 75 if (is_rvu_96xx_B0(rvu)) { 76 hw->cap.nix_fixed_txschq_mapping = true; 77 hw->cap.nix_txsch_per_cgx_lmac = 4; 78 hw->cap.nix_txsch_per_lbk_lmac = 132; 79 hw->cap.nix_txsch_per_sdp_lmac = 76; 80 hw->cap.nix_shaping = false; 81 hw->cap.nix_tx_link_bp = false; 82 if (is_rvu_96xx_A0(rvu)) 83 hw->cap.nix_rx_multicast = false; 84 } 85 86 if (!is_rvu_otx2(rvu)) 87 hw->cap.per_pf_mbox_regs = true; 88 } 89 90 /* Poll a RVU block's register 'offset', for a 'zero' 91 * or 'nonzero' at bits specified by 'mask' 92 */ 93 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) 94 { 95 unsigned long timeout = jiffies + usecs_to_jiffies(10000); 96 void __iomem *reg; 97 u64 reg_val; 98 99 reg = rvu->afreg_base + ((block << 28) | offset); 100 again: 101 reg_val = readq(reg); 102 if (zero && !(reg_val & mask)) 103 return 0; 104 if (!zero && (reg_val & mask)) 105 return 0; 106 if (time_before(jiffies, timeout)) { 107 usleep_range(1, 5); 108 goto again; 109 } 110 return -EBUSY; 111 } 112 113 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc) 114 { 115 int id; 116 117 if (!rsrc->bmap) 118 return -EINVAL; 119 120 id = find_first_zero_bit(rsrc->bmap, rsrc->max); 121 if (id >= rsrc->max) 122 return -ENOSPC; 123 124 __set_bit(id, rsrc->bmap); 125 126 return id; 127 } 128 129 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc) 130 { 131 int start; 132 133 if (!rsrc->bmap) 134 return -EINVAL; 135 136 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); 137 if (start >= rsrc->max) 138 return -ENOSPC; 139 140 bitmap_set(rsrc->bmap, start, nrsrc); 141 return start; 142 } 143 144 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) 145 { 146 if (!rsrc->bmap) 147 return; 148 if (start >= rsrc->max) 149 return; 150 151 bitmap_clear(rsrc->bmap, start, nrsrc); 152 } 153 154 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc) 155 { 156 int start; 157 158 if (!rsrc->bmap) 159 return false; 160 161 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); 162 if (start >= rsrc->max) 163 return false; 164 165 return true; 166 } 167 168 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id) 169 { 170 if (!rsrc->bmap) 171 return; 172 173 __clear_bit(id, rsrc->bmap); 174 } 175 176 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc) 177 { 178 int used; 179 180 if (!rsrc->bmap) 181 return 0; 182 183 used = bitmap_weight(rsrc->bmap, rsrc->max); 184 return (rsrc->max - used); 185 } 186 187 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc) 188 { 189 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max), 190 sizeof(long), GFP_KERNEL); 191 if (!rsrc->bmap) 192 return -ENOMEM; 193 return 0; 194 } 195 196 /* Get block LF's HW index from a PF_FUNC's block slot number */ 197 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) 198 { 199 u16 match = 0; 200 int lf; 201 202 mutex_lock(&rvu->rsrc_lock); 203 for (lf = 0; lf < block->lf.max; lf++) { 204 if (block->fn_map[lf] == pcifunc) { 205 if (slot == match) { 206 mutex_unlock(&rvu->rsrc_lock); 207 return lf; 208 } 209 match++; 210 } 211 } 212 mutex_unlock(&rvu->rsrc_lock); 213 return -ENODEV; 214 } 215 216 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E. 217 * Some silicon variants of OcteonTX2 supports 218 * multiple blocks of same type. 219 * 220 * @pcifunc has to be zero when no LF is yet attached. 221 * 222 * For a pcifunc if LFs are attached from multiple blocks of same type, then 223 * return blkaddr of first encountered block. 224 */ 225 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) 226 { 227 int devnum, blkaddr = -ENODEV; 228 u64 cfg, reg; 229 bool is_pf; 230 231 switch (blktype) { 232 case BLKTYPE_NPC: 233 blkaddr = BLKADDR_NPC; 234 goto exit; 235 case BLKTYPE_NPA: 236 blkaddr = BLKADDR_NPA; 237 goto exit; 238 case BLKTYPE_NIX: 239 /* For now assume NIX0 */ 240 if (!pcifunc) { 241 blkaddr = BLKADDR_NIX0; 242 goto exit; 243 } 244 break; 245 case BLKTYPE_SSO: 246 blkaddr = BLKADDR_SSO; 247 goto exit; 248 case BLKTYPE_SSOW: 249 blkaddr = BLKADDR_SSOW; 250 goto exit; 251 case BLKTYPE_TIM: 252 blkaddr = BLKADDR_TIM; 253 goto exit; 254 case BLKTYPE_CPT: 255 /* For now assume CPT0 */ 256 if (!pcifunc) { 257 blkaddr = BLKADDR_CPT0; 258 goto exit; 259 } 260 break; 261 } 262 263 /* Check if this is a RVU PF or VF */ 264 if (pcifunc & RVU_PFVF_FUNC_MASK) { 265 is_pf = false; 266 devnum = rvu_get_hwvf(rvu, pcifunc); 267 } else { 268 is_pf = true; 269 devnum = rvu_get_pf(pcifunc); 270 } 271 272 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or 273 * 'BLKADDR_NIX1'. 274 */ 275 if (blktype == BLKTYPE_NIX) { 276 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) : 277 RVU_PRIV_HWVFX_NIXX_CFG(0); 278 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 279 if (cfg) { 280 blkaddr = BLKADDR_NIX0; 281 goto exit; 282 } 283 284 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) : 285 RVU_PRIV_HWVFX_NIXX_CFG(1); 286 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 287 if (cfg) 288 blkaddr = BLKADDR_NIX1; 289 } 290 291 if (blktype == BLKTYPE_CPT) { 292 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) : 293 RVU_PRIV_HWVFX_CPTX_CFG(0); 294 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 295 if (cfg) { 296 blkaddr = BLKADDR_CPT0; 297 goto exit; 298 } 299 300 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) : 301 RVU_PRIV_HWVFX_CPTX_CFG(1); 302 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 303 if (cfg) 304 blkaddr = BLKADDR_CPT1; 305 } 306 307 exit: 308 if (is_block_implemented(rvu->hw, blkaddr)) 309 return blkaddr; 310 return -ENODEV; 311 } 312 313 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, 314 struct rvu_block *block, u16 pcifunc, 315 u16 lf, bool attach) 316 { 317 int devnum, num_lfs = 0; 318 bool is_pf; 319 u64 reg; 320 321 if (lf >= block->lf.max) { 322 dev_err(&rvu->pdev->dev, 323 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n", 324 __func__, lf, block->name, block->lf.max); 325 return; 326 } 327 328 /* Check if this is for a RVU PF or VF */ 329 if (pcifunc & RVU_PFVF_FUNC_MASK) { 330 is_pf = false; 331 devnum = rvu_get_hwvf(rvu, pcifunc); 332 } else { 333 is_pf = true; 334 devnum = rvu_get_pf(pcifunc); 335 } 336 337 block->fn_map[lf] = attach ? pcifunc : 0; 338 339 switch (block->addr) { 340 case BLKADDR_NPA: 341 pfvf->npalf = attach ? true : false; 342 num_lfs = pfvf->npalf; 343 break; 344 case BLKADDR_NIX0: 345 case BLKADDR_NIX1: 346 pfvf->nixlf = attach ? true : false; 347 num_lfs = pfvf->nixlf; 348 break; 349 case BLKADDR_SSO: 350 attach ? pfvf->sso++ : pfvf->sso--; 351 num_lfs = pfvf->sso; 352 break; 353 case BLKADDR_SSOW: 354 attach ? pfvf->ssow++ : pfvf->ssow--; 355 num_lfs = pfvf->ssow; 356 break; 357 case BLKADDR_TIM: 358 attach ? pfvf->timlfs++ : pfvf->timlfs--; 359 num_lfs = pfvf->timlfs; 360 break; 361 case BLKADDR_CPT0: 362 attach ? pfvf->cptlfs++ : pfvf->cptlfs--; 363 num_lfs = pfvf->cptlfs; 364 break; 365 case BLKADDR_CPT1: 366 attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--; 367 num_lfs = pfvf->cpt1_lfs; 368 break; 369 } 370 371 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg; 372 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); 373 } 374 375 inline int rvu_get_pf(u16 pcifunc) 376 { 377 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 378 } 379 380 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) 381 { 382 u64 cfg; 383 384 /* Get numVFs attached to this PF and first HWVF */ 385 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 386 *numvfs = (cfg >> 12) & 0xFF; 387 *hwvf = cfg & 0xFFF; 388 } 389 390 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) 391 { 392 int pf, func; 393 u64 cfg; 394 395 pf = rvu_get_pf(pcifunc); 396 func = pcifunc & RVU_PFVF_FUNC_MASK; 397 398 /* Get first HWVF attached to this PF */ 399 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 400 401 return ((cfg & 0xFFF) + func - 1); 402 } 403 404 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) 405 { 406 /* Check if it is a PF or VF */ 407 if (pcifunc & RVU_PFVF_FUNC_MASK) 408 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; 409 else 410 return &rvu->pf[rvu_get_pf(pcifunc)]; 411 } 412 413 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) 414 { 415 int pf, vf, nvfs; 416 u64 cfg; 417 418 pf = rvu_get_pf(pcifunc); 419 if (pf >= rvu->hw->total_pfs) 420 return false; 421 422 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 423 return true; 424 425 /* Check if VF is within number of VFs attached to this PF */ 426 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 427 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 428 nvfs = (cfg >> 12) & 0xFF; 429 if (vf >= nvfs) 430 return false; 431 432 return true; 433 } 434 435 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr) 436 { 437 struct rvu_block *block; 438 439 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT) 440 return false; 441 442 block = &hw->block[blkaddr]; 443 return block->implemented; 444 } 445 446 static void rvu_check_block_implemented(struct rvu *rvu) 447 { 448 struct rvu_hwinfo *hw = rvu->hw; 449 struct rvu_block *block; 450 int blkid; 451 u64 cfg; 452 453 /* For each block check if 'implemented' bit is set */ 454 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 455 block = &hw->block[blkid]; 456 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid)); 457 if (cfg & BIT_ULL(11)) 458 block->implemented = true; 459 } 460 } 461 462 static void rvu_setup_rvum_blk_revid(struct rvu *rvu) 463 { 464 rvu_write64(rvu, BLKADDR_RVUM, 465 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 466 RVU_BLK_RVUM_REVID); 467 } 468 469 static void rvu_clear_rvum_blk_revid(struct rvu *rvu) 470 { 471 rvu_write64(rvu, BLKADDR_RVUM, 472 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00); 473 } 474 475 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf) 476 { 477 int err; 478 479 if (!block->implemented) 480 return 0; 481 482 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12)); 483 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12), 484 true); 485 return err; 486 } 487 488 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) 489 { 490 struct rvu_block *block = &rvu->hw->block[blkaddr]; 491 492 if (!block->implemented) 493 return; 494 495 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); 496 rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); 497 } 498 499 static void rvu_reset_all_blocks(struct rvu *rvu) 500 { 501 /* Do a HW reset of all RVU blocks */ 502 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST); 503 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST); 504 rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST); 505 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST); 506 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); 507 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); 508 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); 509 rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST); 510 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST); 511 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST); 512 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST); 513 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST); 514 rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST); 515 } 516 517 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) 518 { 519 struct rvu_pfvf *pfvf; 520 u64 cfg; 521 int lf; 522 523 for (lf = 0; lf < block->lf.max; lf++) { 524 cfg = rvu_read64(rvu, block->addr, 525 block->lfcfg_reg | (lf << block->lfshift)); 526 if (!(cfg & BIT_ULL(63))) 527 continue; 528 529 /* Set this resource as being used */ 530 __set_bit(lf, block->lf.bmap); 531 532 /* Get, to whom this LF is attached */ 533 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF); 534 rvu_update_rsrc_map(rvu, pfvf, block, 535 (cfg >> 8) & 0xFFFF, lf, true); 536 537 /* Set start MSIX vector for this LF within this PF/VF */ 538 rvu_set_msix_offset(rvu, pfvf, block, lf); 539 } 540 } 541 542 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf) 543 { 544 int min_vecs; 545 546 if (!vf) 547 goto check_pf; 548 549 if (!nvecs) { 550 dev_warn(rvu->dev, 551 "PF%d:VF%d is configured with zero msix vectors, %d\n", 552 pf, vf - 1, nvecs); 553 } 554 return; 555 556 check_pf: 557 if (pf == 0) 558 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT; 559 else 560 min_vecs = RVU_PF_INT_VEC_CNT; 561 562 if (!(nvecs < min_vecs)) 563 return; 564 dev_warn(rvu->dev, 565 "PF%d is configured with too few vectors, %d, min is %d\n", 566 pf, nvecs, min_vecs); 567 } 568 569 static int rvu_setup_msix_resources(struct rvu *rvu) 570 { 571 struct rvu_hwinfo *hw = rvu->hw; 572 int pf, vf, numvfs, hwvf, err; 573 int nvecs, offset, max_msix; 574 struct rvu_pfvf *pfvf; 575 u64 cfg, phy_addr; 576 dma_addr_t iova; 577 578 for (pf = 0; pf < hw->total_pfs; pf++) { 579 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 580 /* If PF is not enabled, nothing to do */ 581 if (!((cfg >> 20) & 0x01)) 582 continue; 583 584 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 585 586 pfvf = &rvu->pf[pf]; 587 /* Get num of MSIX vectors attached to this PF */ 588 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf)); 589 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1; 590 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0); 591 592 /* Alloc msix bitmap for this PF */ 593 err = rvu_alloc_bitmap(&pfvf->msix); 594 if (err) 595 return err; 596 597 /* Allocate memory for MSIX vector to RVU block LF mapping */ 598 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, 599 sizeof(u16), GFP_KERNEL); 600 if (!pfvf->msix_lfmap) 601 return -ENOMEM; 602 603 /* For PF0 (AF) firmware will set msix vector offsets for 604 * AF, block AF and PF0_INT vectors, so jump to VFs. 605 */ 606 if (!pf) 607 goto setup_vfmsix; 608 609 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors. 610 * These are allocated on driver init and never freed, 611 * so no need to set 'msix_lfmap' for these. 612 */ 613 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf)); 614 nvecs = (cfg >> 12) & 0xFF; 615 cfg &= ~0x7FFULL; 616 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 617 rvu_write64(rvu, BLKADDR_RVUM, 618 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset); 619 setup_vfmsix: 620 /* Alloc msix bitmap for VFs */ 621 for (vf = 0; vf < numvfs; vf++) { 622 pfvf = &rvu->hwvf[hwvf + vf]; 623 /* Get num of MSIX vectors attached to this VF */ 624 cfg = rvu_read64(rvu, BLKADDR_RVUM, 625 RVU_PRIV_PFX_MSIX_CFG(pf)); 626 pfvf->msix.max = (cfg & 0xFFF) + 1; 627 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1); 628 629 /* Alloc msix bitmap for this VF */ 630 err = rvu_alloc_bitmap(&pfvf->msix); 631 if (err) 632 return err; 633 634 pfvf->msix_lfmap = 635 devm_kcalloc(rvu->dev, pfvf->msix.max, 636 sizeof(u16), GFP_KERNEL); 637 if (!pfvf->msix_lfmap) 638 return -ENOMEM; 639 640 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors. 641 * These are allocated on driver init and never freed, 642 * so no need to set 'msix_lfmap' for these. 643 */ 644 cfg = rvu_read64(rvu, BLKADDR_RVUM, 645 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf)); 646 nvecs = (cfg >> 12) & 0xFF; 647 cfg &= ~0x7FFULL; 648 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 649 rvu_write64(rvu, BLKADDR_RVUM, 650 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf), 651 cfg | offset); 652 } 653 } 654 655 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence 656 * create an IOMMU mapping for the physical address configured by 657 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA. 658 */ 659 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 660 max_msix = cfg & 0xFFFFF; 661 if (rvu->fwdata && rvu->fwdata->msixtr_base) 662 phy_addr = rvu->fwdata->msixtr_base; 663 else 664 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE); 665 666 iova = dma_map_resource(rvu->dev, phy_addr, 667 max_msix * PCI_MSIX_ENTRY_SIZE, 668 DMA_BIDIRECTIONAL, 0); 669 670 if (dma_mapping_error(rvu->dev, iova)) 671 return -ENOMEM; 672 673 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova); 674 rvu->msix_base_iova = iova; 675 rvu->msixtr_base_phy = phy_addr; 676 677 return 0; 678 } 679 680 static void rvu_reset_msix(struct rvu *rvu) 681 { 682 /* Restore msixtr base register */ 683 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, 684 rvu->msixtr_base_phy); 685 } 686 687 static void rvu_free_hw_resources(struct rvu *rvu) 688 { 689 struct rvu_hwinfo *hw = rvu->hw; 690 struct rvu_block *block; 691 struct rvu_pfvf *pfvf; 692 int id, max_msix; 693 u64 cfg; 694 695 rvu_npa_freemem(rvu); 696 rvu_npc_freemem(rvu); 697 rvu_nix_freemem(rvu); 698 699 /* Free block LF bitmaps */ 700 for (id = 0; id < BLK_COUNT; id++) { 701 block = &hw->block[id]; 702 kfree(block->lf.bmap); 703 } 704 705 /* Free MSIX bitmaps */ 706 for (id = 0; id < hw->total_pfs; id++) { 707 pfvf = &rvu->pf[id]; 708 kfree(pfvf->msix.bmap); 709 } 710 711 for (id = 0; id < hw->total_vfs; id++) { 712 pfvf = &rvu->hwvf[id]; 713 kfree(pfvf->msix.bmap); 714 } 715 716 /* Unmap MSIX vector base IOVA mapping */ 717 if (!rvu->msix_base_iova) 718 return; 719 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 720 max_msix = cfg & 0xFFFFF; 721 dma_unmap_resource(rvu->dev, rvu->msix_base_iova, 722 max_msix * PCI_MSIX_ENTRY_SIZE, 723 DMA_BIDIRECTIONAL, 0); 724 725 rvu_reset_msix(rvu); 726 mutex_destroy(&rvu->rsrc_lock); 727 } 728 729 static void rvu_setup_pfvf_macaddress(struct rvu *rvu) 730 { 731 struct rvu_hwinfo *hw = rvu->hw; 732 int pf, vf, numvfs, hwvf; 733 struct rvu_pfvf *pfvf; 734 u64 *mac; 735 736 for (pf = 0; pf < hw->total_pfs; pf++) { 737 /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */ 738 if (!pf) 739 goto lbkvf; 740 741 if (!is_pf_cgxmapped(rvu, pf)) 742 continue; 743 /* Assign MAC address to PF */ 744 pfvf = &rvu->pf[pf]; 745 if (rvu->fwdata && pf < PF_MACNUM_MAX) { 746 mac = &rvu->fwdata->pf_macs[pf]; 747 if (*mac) 748 u64_to_ether_addr(*mac, pfvf->mac_addr); 749 else 750 eth_random_addr(pfvf->mac_addr); 751 } else { 752 eth_random_addr(pfvf->mac_addr); 753 } 754 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr); 755 756 lbkvf: 757 /* Assign MAC address to VFs*/ 758 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 759 for (vf = 0; vf < numvfs; vf++, hwvf++) { 760 pfvf = &rvu->hwvf[hwvf]; 761 if (rvu->fwdata && hwvf < VF_MACNUM_MAX) { 762 mac = &rvu->fwdata->vf_macs[hwvf]; 763 if (*mac) 764 u64_to_ether_addr(*mac, pfvf->mac_addr); 765 else 766 eth_random_addr(pfvf->mac_addr); 767 } else { 768 eth_random_addr(pfvf->mac_addr); 769 } 770 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr); 771 } 772 } 773 } 774 775 static int rvu_fwdata_init(struct rvu *rvu) 776 { 777 u64 fwdbase; 778 int err; 779 780 /* Get firmware data base address */ 781 err = cgx_get_fwdata_base(&fwdbase); 782 if (err) 783 goto fail; 784 rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata)); 785 if (!rvu->fwdata) 786 goto fail; 787 if (!is_rvu_fwdata_valid(rvu)) { 788 dev_err(rvu->dev, 789 "Mismatch in 'fwdata' struct btw kernel and firmware\n"); 790 iounmap(rvu->fwdata); 791 rvu->fwdata = NULL; 792 return -EINVAL; 793 } 794 return 0; 795 fail: 796 dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n"); 797 return -EIO; 798 } 799 800 static void rvu_fwdata_exit(struct rvu *rvu) 801 { 802 if (rvu->fwdata) 803 iounmap(rvu->fwdata); 804 } 805 806 static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr) 807 { 808 struct rvu_hwinfo *hw = rvu->hw; 809 struct rvu_block *block; 810 int blkid; 811 u64 cfg; 812 813 /* Init NIX LF's bitmap */ 814 block = &hw->block[blkaddr]; 815 if (!block->implemented) 816 return 0; 817 blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1; 818 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 819 block->lf.max = cfg & 0xFFF; 820 block->addr = blkaddr; 821 block->type = BLKTYPE_NIX; 822 block->lfshift = 8; 823 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG; 824 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid); 825 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid); 826 block->lfcfg_reg = NIX_PRIV_LFX_CFG; 827 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; 828 block->lfreset_reg = NIX_AF_LF_RST; 829 sprintf(block->name, "NIX%d", blkid); 830 rvu->nix_blkaddr[blkid] = blkaddr; 831 return rvu_alloc_bitmap(&block->lf); 832 } 833 834 static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr) 835 { 836 struct rvu_hwinfo *hw = rvu->hw; 837 struct rvu_block *block; 838 int blkid; 839 u64 cfg; 840 841 /* Init CPT LF's bitmap */ 842 block = &hw->block[blkaddr]; 843 if (!block->implemented) 844 return 0; 845 blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1; 846 cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0); 847 block->lf.max = cfg & 0xFF; 848 block->addr = blkaddr; 849 block->type = BLKTYPE_CPT; 850 block->multislot = true; 851 block->lfshift = 3; 852 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG; 853 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid); 854 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid); 855 block->lfcfg_reg = CPT_PRIV_LFX_CFG; 856 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; 857 block->lfreset_reg = CPT_AF_LF_RST; 858 sprintf(block->name, "CPT%d", blkid); 859 return rvu_alloc_bitmap(&block->lf); 860 } 861 862 static void rvu_get_lbk_bufsize(struct rvu *rvu) 863 { 864 struct pci_dev *pdev = NULL; 865 void __iomem *base; 866 u64 lbk_const; 867 868 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 869 PCI_DEVID_OCTEONTX2_LBK, pdev); 870 if (!pdev) 871 return; 872 873 base = pci_ioremap_bar(pdev, 0); 874 if (!base) 875 goto err_put; 876 877 lbk_const = readq(base + LBK_CONST); 878 879 /* cache fifo size */ 880 rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const); 881 882 iounmap(base); 883 err_put: 884 pci_dev_put(pdev); 885 } 886 887 static int rvu_setup_hw_resources(struct rvu *rvu) 888 { 889 struct rvu_hwinfo *hw = rvu->hw; 890 struct rvu_block *block; 891 int blkid, err; 892 u64 cfg; 893 894 /* Get HW supported max RVU PF & VF count */ 895 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 896 hw->total_pfs = (cfg >> 32) & 0xFF; 897 hw->total_vfs = (cfg >> 20) & 0xFFF; 898 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF; 899 900 /* Init NPA LF's bitmap */ 901 block = &hw->block[BLKADDR_NPA]; 902 if (!block->implemented) 903 goto nix; 904 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST); 905 block->lf.max = (cfg >> 16) & 0xFFF; 906 block->addr = BLKADDR_NPA; 907 block->type = BLKTYPE_NPA; 908 block->lfshift = 8; 909 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG; 910 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG; 911 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG; 912 block->lfcfg_reg = NPA_PRIV_LFX_CFG; 913 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG; 914 block->lfreset_reg = NPA_AF_LF_RST; 915 sprintf(block->name, "NPA"); 916 err = rvu_alloc_bitmap(&block->lf); 917 if (err) 918 return err; 919 920 nix: 921 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0); 922 if (err) 923 return err; 924 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1); 925 if (err) 926 return err; 927 928 /* Init SSO group's bitmap */ 929 block = &hw->block[BLKADDR_SSO]; 930 if (!block->implemented) 931 goto ssow; 932 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST); 933 block->lf.max = cfg & 0xFFFF; 934 block->addr = BLKADDR_SSO; 935 block->type = BLKTYPE_SSO; 936 block->multislot = true; 937 block->lfshift = 3; 938 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG; 939 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG; 940 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG; 941 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG; 942 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG; 943 block->lfreset_reg = SSO_AF_LF_HWGRP_RST; 944 sprintf(block->name, "SSO GROUP"); 945 err = rvu_alloc_bitmap(&block->lf); 946 if (err) 947 return err; 948 949 ssow: 950 /* Init SSO workslot's bitmap */ 951 block = &hw->block[BLKADDR_SSOW]; 952 if (!block->implemented) 953 goto tim; 954 block->lf.max = (cfg >> 56) & 0xFF; 955 block->addr = BLKADDR_SSOW; 956 block->type = BLKTYPE_SSOW; 957 block->multislot = true; 958 block->lfshift = 3; 959 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG; 960 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG; 961 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG; 962 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG; 963 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG; 964 block->lfreset_reg = SSOW_AF_LF_HWS_RST; 965 sprintf(block->name, "SSOWS"); 966 err = rvu_alloc_bitmap(&block->lf); 967 if (err) 968 return err; 969 970 tim: 971 /* Init TIM LF's bitmap */ 972 block = &hw->block[BLKADDR_TIM]; 973 if (!block->implemented) 974 goto cpt; 975 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST); 976 block->lf.max = cfg & 0xFFFF; 977 block->addr = BLKADDR_TIM; 978 block->type = BLKTYPE_TIM; 979 block->multislot = true; 980 block->lfshift = 3; 981 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG; 982 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG; 983 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG; 984 block->lfcfg_reg = TIM_PRIV_LFX_CFG; 985 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG; 986 block->lfreset_reg = TIM_AF_LF_RST; 987 sprintf(block->name, "TIM"); 988 err = rvu_alloc_bitmap(&block->lf); 989 if (err) 990 return err; 991 992 cpt: 993 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0); 994 if (err) 995 return err; 996 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1); 997 if (err) 998 return err; 999 1000 /* Allocate memory for PFVF data */ 1001 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, 1002 sizeof(struct rvu_pfvf), GFP_KERNEL); 1003 if (!rvu->pf) 1004 return -ENOMEM; 1005 1006 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, 1007 sizeof(struct rvu_pfvf), GFP_KERNEL); 1008 if (!rvu->hwvf) 1009 return -ENOMEM; 1010 1011 mutex_init(&rvu->rsrc_lock); 1012 1013 rvu_fwdata_init(rvu); 1014 1015 err = rvu_setup_msix_resources(rvu); 1016 if (err) 1017 return err; 1018 1019 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 1020 block = &hw->block[blkid]; 1021 if (!block->lf.bmap) 1022 continue; 1023 1024 /* Allocate memory for block LF/slot to pcifunc mapping info */ 1025 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max, 1026 sizeof(u16), GFP_KERNEL); 1027 if (!block->fn_map) { 1028 err = -ENOMEM; 1029 goto msix_err; 1030 } 1031 1032 /* Scan all blocks to check if low level firmware has 1033 * already provisioned any of the resources to a PF/VF. 1034 */ 1035 rvu_scan_block(rvu, block); 1036 } 1037 1038 err = rvu_set_channels_base(rvu); 1039 if (err) 1040 goto msix_err; 1041 1042 err = rvu_npc_init(rvu); 1043 if (err) 1044 goto npc_err; 1045 1046 err = rvu_cgx_init(rvu); 1047 if (err) 1048 goto cgx_err; 1049 1050 /* Assign MACs for CGX mapped functions */ 1051 rvu_setup_pfvf_macaddress(rvu); 1052 1053 err = rvu_npa_init(rvu); 1054 if (err) 1055 goto npa_err; 1056 1057 rvu_get_lbk_bufsize(rvu); 1058 1059 err = rvu_nix_init(rvu); 1060 if (err) 1061 goto nix_err; 1062 1063 rvu_program_channels(rvu); 1064 1065 return 0; 1066 1067 nix_err: 1068 rvu_nix_freemem(rvu); 1069 npa_err: 1070 rvu_npa_freemem(rvu); 1071 cgx_err: 1072 rvu_cgx_exit(rvu); 1073 npc_err: 1074 rvu_npc_freemem(rvu); 1075 rvu_fwdata_exit(rvu); 1076 msix_err: 1077 rvu_reset_msix(rvu); 1078 return err; 1079 } 1080 1081 /* NPA and NIX admin queue APIs */ 1082 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq) 1083 { 1084 if (!aq) 1085 return; 1086 1087 qmem_free(rvu->dev, aq->inst); 1088 qmem_free(rvu->dev, aq->res); 1089 devm_kfree(rvu->dev, aq); 1090 } 1091 1092 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, 1093 int qsize, int inst_size, int res_size) 1094 { 1095 struct admin_queue *aq; 1096 int err; 1097 1098 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL); 1099 if (!*ad_queue) 1100 return -ENOMEM; 1101 aq = *ad_queue; 1102 1103 /* Alloc memory for instructions i.e AQ */ 1104 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size); 1105 if (err) { 1106 devm_kfree(rvu->dev, aq); 1107 return err; 1108 } 1109 1110 /* Alloc memory for results */ 1111 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size); 1112 if (err) { 1113 rvu_aq_free(rvu, aq); 1114 return err; 1115 } 1116 1117 spin_lock_init(&aq->lock); 1118 return 0; 1119 } 1120 1121 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, 1122 struct ready_msg_rsp *rsp) 1123 { 1124 if (rvu->fwdata) { 1125 rsp->rclk_freq = rvu->fwdata->rclk; 1126 rsp->sclk_freq = rvu->fwdata->sclk; 1127 } 1128 return 0; 1129 } 1130 1131 /* Get current count of a RVU block's LF/slots 1132 * provisioned to a given RVU func. 1133 */ 1134 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr) 1135 { 1136 switch (blkaddr) { 1137 case BLKADDR_NPA: 1138 return pfvf->npalf ? 1 : 0; 1139 case BLKADDR_NIX0: 1140 case BLKADDR_NIX1: 1141 return pfvf->nixlf ? 1 : 0; 1142 case BLKADDR_SSO: 1143 return pfvf->sso; 1144 case BLKADDR_SSOW: 1145 return pfvf->ssow; 1146 case BLKADDR_TIM: 1147 return pfvf->timlfs; 1148 case BLKADDR_CPT0: 1149 return pfvf->cptlfs; 1150 case BLKADDR_CPT1: 1151 return pfvf->cpt1_lfs; 1152 } 1153 return 0; 1154 } 1155 1156 /* Return true if LFs of block type are attached to pcifunc */ 1157 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype) 1158 { 1159 switch (blktype) { 1160 case BLKTYPE_NPA: 1161 return pfvf->npalf ? 1 : 0; 1162 case BLKTYPE_NIX: 1163 return pfvf->nixlf ? 1 : 0; 1164 case BLKTYPE_SSO: 1165 return !!pfvf->sso; 1166 case BLKTYPE_SSOW: 1167 return !!pfvf->ssow; 1168 case BLKTYPE_TIM: 1169 return !!pfvf->timlfs; 1170 case BLKTYPE_CPT: 1171 return pfvf->cptlfs || pfvf->cpt1_lfs; 1172 } 1173 1174 return false; 1175 } 1176 1177 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) 1178 { 1179 struct rvu_pfvf *pfvf; 1180 1181 if (!is_pf_func_valid(rvu, pcifunc)) 1182 return false; 1183 1184 pfvf = rvu_get_pfvf(rvu, pcifunc); 1185 1186 /* Check if this PFFUNC has a LF of type blktype attached */ 1187 if (!is_blktype_attached(pfvf, blktype)) 1188 return false; 1189 1190 return true; 1191 } 1192 1193 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, 1194 int pcifunc, int slot) 1195 { 1196 u64 val; 1197 1198 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13); 1199 rvu_write64(rvu, block->addr, block->lookup_reg, val); 1200 /* Wait for the lookup to finish */ 1201 /* TODO: put some timeout here */ 1202 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13)) 1203 ; 1204 1205 val = rvu_read64(rvu, block->addr, block->lookup_reg); 1206 1207 /* Check LF valid bit */ 1208 if (!(val & (1ULL << 12))) 1209 return -1; 1210 1211 return (val & 0xFFF); 1212 } 1213 1214 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) 1215 { 1216 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1217 struct rvu_hwinfo *hw = rvu->hw; 1218 struct rvu_block *block; 1219 int slot, lf, num_lfs; 1220 int blkaddr; 1221 1222 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc); 1223 if (blkaddr < 0) 1224 return; 1225 1226 if (blktype == BLKTYPE_NIX) 1227 rvu_nix_reset_mac(pfvf, pcifunc); 1228 1229 block = &hw->block[blkaddr]; 1230 1231 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr); 1232 if (!num_lfs) 1233 return; 1234 1235 for (slot = 0; slot < num_lfs; slot++) { 1236 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot); 1237 if (lf < 0) /* This should never happen */ 1238 continue; 1239 1240 /* Disable the LF */ 1241 rvu_write64(rvu, blkaddr, block->lfcfg_reg | 1242 (lf << block->lfshift), 0x00ULL); 1243 1244 /* Update SW maintained mapping info as well */ 1245 rvu_update_rsrc_map(rvu, pfvf, block, 1246 pcifunc, lf, false); 1247 1248 /* Free the resource */ 1249 rvu_free_rsrc(&block->lf, lf); 1250 1251 /* Clear MSIX vector offset for this LF */ 1252 rvu_clear_msix_offset(rvu, pfvf, block, lf); 1253 } 1254 } 1255 1256 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, 1257 u16 pcifunc) 1258 { 1259 struct rvu_hwinfo *hw = rvu->hw; 1260 bool detach_all = true; 1261 struct rvu_block *block; 1262 int blkid; 1263 1264 mutex_lock(&rvu->rsrc_lock); 1265 1266 /* Check for partial resource detach */ 1267 if (detach && detach->partial) 1268 detach_all = false; 1269 1270 /* Check for RVU block's LFs attached to this func, 1271 * if so, detach them. 1272 */ 1273 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 1274 block = &hw->block[blkid]; 1275 if (!block->lf.bmap) 1276 continue; 1277 if (!detach_all && detach) { 1278 if (blkid == BLKADDR_NPA && !detach->npalf) 1279 continue; 1280 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf) 1281 continue; 1282 else if ((blkid == BLKADDR_NIX1) && !detach->nixlf) 1283 continue; 1284 else if ((blkid == BLKADDR_SSO) && !detach->sso) 1285 continue; 1286 else if ((blkid == BLKADDR_SSOW) && !detach->ssow) 1287 continue; 1288 else if ((blkid == BLKADDR_TIM) && !detach->timlfs) 1289 continue; 1290 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs) 1291 continue; 1292 else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs) 1293 continue; 1294 } 1295 rvu_detach_block(rvu, pcifunc, block->type); 1296 } 1297 1298 mutex_unlock(&rvu->rsrc_lock); 1299 return 0; 1300 } 1301 1302 int rvu_mbox_handler_detach_resources(struct rvu *rvu, 1303 struct rsrc_detach *detach, 1304 struct msg_rsp *rsp) 1305 { 1306 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); 1307 } 1308 1309 static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) 1310 { 1311 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1312 int blkaddr = BLKADDR_NIX0, vf; 1313 struct rvu_pfvf *pf; 1314 1315 /* All CGX mapped PFs are set with assigned NIX block during init */ 1316 if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { 1317 pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 1318 blkaddr = pf->nix_blkaddr; 1319 } else if (is_afvf(pcifunc)) { 1320 vf = pcifunc - 1; 1321 /* Assign NIX based on VF number. All even numbered VFs get 1322 * NIX0 and odd numbered gets NIX1 1323 */ 1324 blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0; 1325 /* NIX1 is not present on all silicons */ 1326 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) 1327 blkaddr = BLKADDR_NIX0; 1328 } 1329 1330 switch (blkaddr) { 1331 case BLKADDR_NIX1: 1332 pfvf->nix_blkaddr = BLKADDR_NIX1; 1333 pfvf->nix_rx_intf = NIX_INTFX_RX(1); 1334 pfvf->nix_tx_intf = NIX_INTFX_TX(1); 1335 break; 1336 case BLKADDR_NIX0: 1337 default: 1338 pfvf->nix_blkaddr = BLKADDR_NIX0; 1339 pfvf->nix_rx_intf = NIX_INTFX_RX(0); 1340 pfvf->nix_tx_intf = NIX_INTFX_TX(0); 1341 break; 1342 } 1343 1344 return pfvf->nix_blkaddr; 1345 } 1346 1347 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype, 1348 u16 pcifunc, struct rsrc_attach *attach) 1349 { 1350 int blkaddr; 1351 1352 switch (blktype) { 1353 case BLKTYPE_NIX: 1354 blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc); 1355 break; 1356 case BLKTYPE_CPT: 1357 if (attach->hdr.ver < RVU_MULTI_BLK_VER) 1358 return rvu_get_blkaddr(rvu, blktype, 0); 1359 blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr : 1360 BLKADDR_CPT0; 1361 if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) 1362 return -ENODEV; 1363 break; 1364 default: 1365 return rvu_get_blkaddr(rvu, blktype, 0); 1366 } 1367 1368 if (is_block_implemented(rvu->hw, blkaddr)) 1369 return blkaddr; 1370 1371 return -ENODEV; 1372 } 1373 1374 static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype, 1375 int num_lfs, struct rsrc_attach *attach) 1376 { 1377 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1378 struct rvu_hwinfo *hw = rvu->hw; 1379 struct rvu_block *block; 1380 int slot, lf; 1381 int blkaddr; 1382 u64 cfg; 1383 1384 if (!num_lfs) 1385 return; 1386 1387 blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach); 1388 if (blkaddr < 0) 1389 return; 1390 1391 block = &hw->block[blkaddr]; 1392 if (!block->lf.bmap) 1393 return; 1394 1395 for (slot = 0; slot < num_lfs; slot++) { 1396 /* Allocate the resource */ 1397 lf = rvu_alloc_rsrc(&block->lf); 1398 if (lf < 0) 1399 return; 1400 1401 cfg = (1ULL << 63) | (pcifunc << 8) | slot; 1402 rvu_write64(rvu, blkaddr, block->lfcfg_reg | 1403 (lf << block->lfshift), cfg); 1404 rvu_update_rsrc_map(rvu, pfvf, block, 1405 pcifunc, lf, true); 1406 1407 /* Set start MSIX vector for this LF within this PF/VF */ 1408 rvu_set_msix_offset(rvu, pfvf, block, lf); 1409 } 1410 } 1411 1412 static int rvu_check_rsrc_availability(struct rvu *rvu, 1413 struct rsrc_attach *req, u16 pcifunc) 1414 { 1415 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1416 int free_lfs, mappedlfs, blkaddr; 1417 struct rvu_hwinfo *hw = rvu->hw; 1418 struct rvu_block *block; 1419 1420 /* Only one NPA LF can be attached */ 1421 if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) { 1422 block = &hw->block[BLKADDR_NPA]; 1423 free_lfs = rvu_rsrc_free_count(&block->lf); 1424 if (!free_lfs) 1425 goto fail; 1426 } else if (req->npalf) { 1427 dev_err(&rvu->pdev->dev, 1428 "Func 0x%x: Invalid req, already has NPA\n", 1429 pcifunc); 1430 return -EINVAL; 1431 } 1432 1433 /* Only one NIX LF can be attached */ 1434 if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) { 1435 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX, 1436 pcifunc, req); 1437 if (blkaddr < 0) 1438 return blkaddr; 1439 block = &hw->block[blkaddr]; 1440 free_lfs = rvu_rsrc_free_count(&block->lf); 1441 if (!free_lfs) 1442 goto fail; 1443 } else if (req->nixlf) { 1444 dev_err(&rvu->pdev->dev, 1445 "Func 0x%x: Invalid req, already has NIX\n", 1446 pcifunc); 1447 return -EINVAL; 1448 } 1449 1450 if (req->sso) { 1451 block = &hw->block[BLKADDR_SSO]; 1452 /* Is request within limits ? */ 1453 if (req->sso > block->lf.max) { 1454 dev_err(&rvu->pdev->dev, 1455 "Func 0x%x: Invalid SSO req, %d > max %d\n", 1456 pcifunc, req->sso, block->lf.max); 1457 return -EINVAL; 1458 } 1459 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); 1460 free_lfs = rvu_rsrc_free_count(&block->lf); 1461 /* Check if additional resources are available */ 1462 if (req->sso > mappedlfs && 1463 ((req->sso - mappedlfs) > free_lfs)) 1464 goto fail; 1465 } 1466 1467 if (req->ssow) { 1468 block = &hw->block[BLKADDR_SSOW]; 1469 if (req->ssow > block->lf.max) { 1470 dev_err(&rvu->pdev->dev, 1471 "Func 0x%x: Invalid SSOW req, %d > max %d\n", 1472 pcifunc, req->sso, block->lf.max); 1473 return -EINVAL; 1474 } 1475 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); 1476 free_lfs = rvu_rsrc_free_count(&block->lf); 1477 if (req->ssow > mappedlfs && 1478 ((req->ssow - mappedlfs) > free_lfs)) 1479 goto fail; 1480 } 1481 1482 if (req->timlfs) { 1483 block = &hw->block[BLKADDR_TIM]; 1484 if (req->timlfs > block->lf.max) { 1485 dev_err(&rvu->pdev->dev, 1486 "Func 0x%x: Invalid TIMLF req, %d > max %d\n", 1487 pcifunc, req->timlfs, block->lf.max); 1488 return -EINVAL; 1489 } 1490 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); 1491 free_lfs = rvu_rsrc_free_count(&block->lf); 1492 if (req->timlfs > mappedlfs && 1493 ((req->timlfs - mappedlfs) > free_lfs)) 1494 goto fail; 1495 } 1496 1497 if (req->cptlfs) { 1498 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT, 1499 pcifunc, req); 1500 if (blkaddr < 0) 1501 return blkaddr; 1502 block = &hw->block[blkaddr]; 1503 if (req->cptlfs > block->lf.max) { 1504 dev_err(&rvu->pdev->dev, 1505 "Func 0x%x: Invalid CPTLF req, %d > max %d\n", 1506 pcifunc, req->cptlfs, block->lf.max); 1507 return -EINVAL; 1508 } 1509 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); 1510 free_lfs = rvu_rsrc_free_count(&block->lf); 1511 if (req->cptlfs > mappedlfs && 1512 ((req->cptlfs - mappedlfs) > free_lfs)) 1513 goto fail; 1514 } 1515 1516 return 0; 1517 1518 fail: 1519 dev_info(rvu->dev, "Request for %s failed\n", block->name); 1520 return -ENOSPC; 1521 } 1522 1523 static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype, 1524 struct rsrc_attach *attach) 1525 { 1526 int blkaddr, num_lfs; 1527 1528 blkaddr = rvu_get_attach_blkaddr(rvu, blktype, 1529 attach->hdr.pcifunc, attach); 1530 if (blkaddr < 0) 1531 return false; 1532 1533 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc), 1534 blkaddr); 1535 /* Requester already has LFs from given block ? */ 1536 return !!num_lfs; 1537 } 1538 1539 int rvu_mbox_handler_attach_resources(struct rvu *rvu, 1540 struct rsrc_attach *attach, 1541 struct msg_rsp *rsp) 1542 { 1543 u16 pcifunc = attach->hdr.pcifunc; 1544 int err; 1545 1546 /* If first request, detach all existing attached resources */ 1547 if (!attach->modify) 1548 rvu_detach_rsrcs(rvu, NULL, pcifunc); 1549 1550 mutex_lock(&rvu->rsrc_lock); 1551 1552 /* Check if the request can be accommodated */ 1553 err = rvu_check_rsrc_availability(rvu, attach, pcifunc); 1554 if (err) 1555 goto exit; 1556 1557 /* Now attach the requested resources */ 1558 if (attach->npalf) 1559 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach); 1560 1561 if (attach->nixlf) 1562 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach); 1563 1564 if (attach->sso) { 1565 /* RVU func doesn't know which exact LF or slot is attached 1566 * to it, it always sees as slot 0,1,2. So for a 'modify' 1567 * request, simply detach all existing attached LFs/slots 1568 * and attach a fresh. 1569 */ 1570 if (attach->modify) 1571 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO); 1572 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, 1573 attach->sso, attach); 1574 } 1575 1576 if (attach->ssow) { 1577 if (attach->modify) 1578 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW); 1579 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, 1580 attach->ssow, attach); 1581 } 1582 1583 if (attach->timlfs) { 1584 if (attach->modify) 1585 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM); 1586 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, 1587 attach->timlfs, attach); 1588 } 1589 1590 if (attach->cptlfs) { 1591 if (attach->modify && 1592 rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach)) 1593 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT); 1594 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, 1595 attach->cptlfs, attach); 1596 } 1597 1598 exit: 1599 mutex_unlock(&rvu->rsrc_lock); 1600 return err; 1601 } 1602 1603 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1604 int blkaddr, int lf) 1605 { 1606 u16 vec; 1607 1608 if (lf < 0) 1609 return MSIX_VECTOR_INVALID; 1610 1611 for (vec = 0; vec < pfvf->msix.max; vec++) { 1612 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf)) 1613 return vec; 1614 } 1615 return MSIX_VECTOR_INVALID; 1616 } 1617 1618 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1619 struct rvu_block *block, int lf) 1620 { 1621 u16 nvecs, vec, offset; 1622 u64 cfg; 1623 1624 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | 1625 (lf << block->lfshift)); 1626 nvecs = (cfg >> 12) & 0xFF; 1627 1628 /* Check and alloc MSIX vectors, must be contiguous */ 1629 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs)) 1630 return; 1631 1632 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 1633 1634 /* Config MSIX offset in LF */ 1635 rvu_write64(rvu, block->addr, block->msixcfg_reg | 1636 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset); 1637 1638 /* Update the bitmap as well */ 1639 for (vec = 0; vec < nvecs; vec++) 1640 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf); 1641 } 1642 1643 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1644 struct rvu_block *block, int lf) 1645 { 1646 u16 nvecs, vec, offset; 1647 u64 cfg; 1648 1649 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | 1650 (lf << block->lfshift)); 1651 nvecs = (cfg >> 12) & 0xFF; 1652 1653 /* Clear MSIX offset in LF */ 1654 rvu_write64(rvu, block->addr, block->msixcfg_reg | 1655 (lf << block->lfshift), cfg & ~0x7FFULL); 1656 1657 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf); 1658 1659 /* Update the mapping */ 1660 for (vec = 0; vec < nvecs; vec++) 1661 pfvf->msix_lfmap[offset + vec] = 0; 1662 1663 /* Free the same in MSIX bitmap */ 1664 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); 1665 } 1666 1667 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, 1668 struct msix_offset_rsp *rsp) 1669 { 1670 struct rvu_hwinfo *hw = rvu->hw; 1671 u16 pcifunc = req->hdr.pcifunc; 1672 struct rvu_pfvf *pfvf; 1673 int lf, slot, blkaddr; 1674 1675 pfvf = rvu_get_pfvf(rvu, pcifunc); 1676 if (!pfvf->msix.bmap) 1677 return 0; 1678 1679 /* Set MSIX offsets for each block's LFs attached to this PF/VF */ 1680 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0); 1681 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf); 1682 1683 /* Get BLKADDR from which LFs are attached to pcifunc */ 1684 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1685 if (blkaddr < 0) { 1686 rsp->nix_msixoff = MSIX_VECTOR_INVALID; 1687 } else { 1688 lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1689 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf); 1690 } 1691 1692 rsp->sso = pfvf->sso; 1693 for (slot = 0; slot < rsp->sso; slot++) { 1694 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot); 1695 rsp->sso_msixoff[slot] = 1696 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf); 1697 } 1698 1699 rsp->ssow = pfvf->ssow; 1700 for (slot = 0; slot < rsp->ssow; slot++) { 1701 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot); 1702 rsp->ssow_msixoff[slot] = 1703 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf); 1704 } 1705 1706 rsp->timlfs = pfvf->timlfs; 1707 for (slot = 0; slot < rsp->timlfs; slot++) { 1708 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot); 1709 rsp->timlf_msixoff[slot] = 1710 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf); 1711 } 1712 1713 rsp->cptlfs = pfvf->cptlfs; 1714 for (slot = 0; slot < rsp->cptlfs; slot++) { 1715 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot); 1716 rsp->cptlf_msixoff[slot] = 1717 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf); 1718 } 1719 1720 rsp->cpt1_lfs = pfvf->cpt1_lfs; 1721 for (slot = 0; slot < rsp->cpt1_lfs; slot++) { 1722 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot); 1723 rsp->cpt1_lf_msixoff[slot] = 1724 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf); 1725 } 1726 1727 return 0; 1728 } 1729 1730 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, 1731 struct msg_rsp *rsp) 1732 { 1733 u16 pcifunc = req->hdr.pcifunc; 1734 u16 vf, numvfs; 1735 u64 cfg; 1736 1737 vf = pcifunc & RVU_PFVF_FUNC_MASK; 1738 cfg = rvu_read64(rvu, BLKADDR_RVUM, 1739 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc))); 1740 numvfs = (cfg >> 12) & 0xFF; 1741 1742 if (vf && vf <= numvfs) 1743 __rvu_flr_handler(rvu, pcifunc); 1744 else 1745 return RVU_INVALID_VF_ID; 1746 1747 return 0; 1748 } 1749 1750 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, 1751 struct get_hw_cap_rsp *rsp) 1752 { 1753 struct rvu_hwinfo *hw = rvu->hw; 1754 1755 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping; 1756 rsp->nix_shaping = hw->cap.nix_shaping; 1757 1758 return 0; 1759 } 1760 1761 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, 1762 struct mbox_msghdr *req) 1763 { 1764 struct rvu *rvu = pci_get_drvdata(mbox->pdev); 1765 1766 /* Check if valid, if not reply with a invalid msg */ 1767 if (req->sig != OTX2_MBOX_REQ_SIG) 1768 goto bad_message; 1769 1770 switch (req->id) { 1771 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 1772 case _id: { \ 1773 struct _rsp_type *rsp; \ 1774 int err; \ 1775 \ 1776 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ 1777 mbox, devid, \ 1778 sizeof(struct _rsp_type)); \ 1779 /* some handlers should complete even if reply */ \ 1780 /* could not be allocated */ \ 1781 if (!rsp && \ 1782 _id != MBOX_MSG_DETACH_RESOURCES && \ 1783 _id != MBOX_MSG_NIX_TXSCH_FREE && \ 1784 _id != MBOX_MSG_VF_FLR) \ 1785 return -ENOMEM; \ 1786 if (rsp) { \ 1787 rsp->hdr.id = _id; \ 1788 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ 1789 rsp->hdr.pcifunc = req->pcifunc; \ 1790 rsp->hdr.rc = 0; \ 1791 } \ 1792 \ 1793 err = rvu_mbox_handler_ ## _fn_name(rvu, \ 1794 (struct _req_type *)req, \ 1795 rsp); \ 1796 if (rsp && err) \ 1797 rsp->hdr.rc = err; \ 1798 \ 1799 trace_otx2_msg_process(mbox->pdev, _id, err); \ 1800 return rsp ? err : -ENOMEM; \ 1801 } 1802 MBOX_MESSAGES 1803 #undef M 1804 1805 bad_message: 1806 default: 1807 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id); 1808 return -ENODEV; 1809 } 1810 } 1811 1812 static void __rvu_mbox_handler(struct rvu_work *mwork, int type) 1813 { 1814 struct rvu *rvu = mwork->rvu; 1815 int offset, err, id, devid; 1816 struct otx2_mbox_dev *mdev; 1817 struct mbox_hdr *req_hdr; 1818 struct mbox_msghdr *msg; 1819 struct mbox_wq_info *mw; 1820 struct otx2_mbox *mbox; 1821 1822 switch (type) { 1823 case TYPE_AFPF: 1824 mw = &rvu->afpf_wq_info; 1825 break; 1826 case TYPE_AFVF: 1827 mw = &rvu->afvf_wq_info; 1828 break; 1829 default: 1830 return; 1831 } 1832 1833 devid = mwork - mw->mbox_wrk; 1834 mbox = &mw->mbox; 1835 mdev = &mbox->dev[devid]; 1836 1837 /* Process received mbox messages */ 1838 req_hdr = mdev->mbase + mbox->rx_start; 1839 if (mw->mbox_wrk[devid].num_msgs == 0) 1840 return; 1841 1842 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 1843 1844 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { 1845 msg = mdev->mbase + offset; 1846 1847 /* Set which PF/VF sent this message based on mbox IRQ */ 1848 switch (type) { 1849 case TYPE_AFPF: 1850 msg->pcifunc &= 1851 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); 1852 msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT); 1853 break; 1854 case TYPE_AFVF: 1855 msg->pcifunc &= 1856 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT); 1857 msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1; 1858 break; 1859 } 1860 1861 err = rvu_process_mbox_msg(mbox, devid, msg); 1862 if (!err) { 1863 offset = mbox->rx_start + msg->next_msgoff; 1864 continue; 1865 } 1866 1867 if (msg->pcifunc & RVU_PFVF_FUNC_MASK) 1868 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", 1869 err, otx2_mbox_id2name(msg->id), 1870 msg->id, rvu_get_pf(msg->pcifunc), 1871 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); 1872 else 1873 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", 1874 err, otx2_mbox_id2name(msg->id), 1875 msg->id, devid); 1876 } 1877 mw->mbox_wrk[devid].num_msgs = 0; 1878 1879 /* Send mbox responses to VF/PF */ 1880 otx2_mbox_msg_send(mbox, devid); 1881 } 1882 1883 static inline void rvu_afpf_mbox_handler(struct work_struct *work) 1884 { 1885 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1886 1887 __rvu_mbox_handler(mwork, TYPE_AFPF); 1888 } 1889 1890 static inline void rvu_afvf_mbox_handler(struct work_struct *work) 1891 { 1892 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1893 1894 __rvu_mbox_handler(mwork, TYPE_AFVF); 1895 } 1896 1897 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type) 1898 { 1899 struct rvu *rvu = mwork->rvu; 1900 struct otx2_mbox_dev *mdev; 1901 struct mbox_hdr *rsp_hdr; 1902 struct mbox_msghdr *msg; 1903 struct mbox_wq_info *mw; 1904 struct otx2_mbox *mbox; 1905 int offset, id, devid; 1906 1907 switch (type) { 1908 case TYPE_AFPF: 1909 mw = &rvu->afpf_wq_info; 1910 break; 1911 case TYPE_AFVF: 1912 mw = &rvu->afvf_wq_info; 1913 break; 1914 default: 1915 return; 1916 } 1917 1918 devid = mwork - mw->mbox_wrk_up; 1919 mbox = &mw->mbox_up; 1920 mdev = &mbox->dev[devid]; 1921 1922 rsp_hdr = mdev->mbase + mbox->rx_start; 1923 if (mw->mbox_wrk_up[devid].up_num_msgs == 0) { 1924 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n"); 1925 return; 1926 } 1927 1928 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 1929 1930 for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) { 1931 msg = mdev->mbase + offset; 1932 1933 if (msg->id >= MBOX_MSG_MAX) { 1934 dev_err(rvu->dev, 1935 "Mbox msg with unknown ID 0x%x\n", msg->id); 1936 goto end; 1937 } 1938 1939 if (msg->sig != OTX2_MBOX_RSP_SIG) { 1940 dev_err(rvu->dev, 1941 "Mbox msg with wrong signature %x, ID 0x%x\n", 1942 msg->sig, msg->id); 1943 goto end; 1944 } 1945 1946 switch (msg->id) { 1947 case MBOX_MSG_CGX_LINK_EVENT: 1948 break; 1949 default: 1950 if (msg->rc) 1951 dev_err(rvu->dev, 1952 "Mbox msg response has err %d, ID 0x%x\n", 1953 msg->rc, msg->id); 1954 break; 1955 } 1956 end: 1957 offset = mbox->rx_start + msg->next_msgoff; 1958 mdev->msgs_acked++; 1959 } 1960 mw->mbox_wrk_up[devid].up_num_msgs = 0; 1961 1962 otx2_mbox_reset(mbox, devid); 1963 } 1964 1965 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work) 1966 { 1967 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1968 1969 __rvu_mbox_up_handler(mwork, TYPE_AFPF); 1970 } 1971 1972 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) 1973 { 1974 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1975 1976 __rvu_mbox_up_handler(mwork, TYPE_AFVF); 1977 } 1978 1979 static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, 1980 int num, int type) 1981 { 1982 struct rvu_hwinfo *hw = rvu->hw; 1983 int region; 1984 u64 bar4; 1985 1986 /* For cn10k platform VF mailbox regions of a PF follows after the 1987 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from 1988 * RVU_PF_VF_BAR4_ADDR register. 1989 */ 1990 if (type == TYPE_AFVF) { 1991 for (region = 0; region < num; region++) { 1992 if (hw->cap.per_pf_mbox_regs) { 1993 bar4 = rvu_read64(rvu, BLKADDR_RVUM, 1994 RVU_AF_PFX_BAR4_ADDR(0)) + 1995 MBOX_SIZE; 1996 bar4 += region * MBOX_SIZE; 1997 } else { 1998 bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); 1999 bar4 += region * MBOX_SIZE; 2000 } 2001 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); 2002 if (!mbox_addr[region]) 2003 goto error; 2004 } 2005 return 0; 2006 } 2007 2008 /* For cn10k platform AF <-> PF mailbox region of a PF is read from per 2009 * PF registers. Whereas for Octeontx2 it is read from 2010 * RVU_AF_PF_BAR4_ADDR register. 2011 */ 2012 for (region = 0; region < num; region++) { 2013 if (hw->cap.per_pf_mbox_regs) { 2014 bar4 = rvu_read64(rvu, BLKADDR_RVUM, 2015 RVU_AF_PFX_BAR4_ADDR(region)); 2016 } else { 2017 bar4 = rvu_read64(rvu, BLKADDR_RVUM, 2018 RVU_AF_PF_BAR4_ADDR); 2019 bar4 += region * MBOX_SIZE; 2020 } 2021 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); 2022 if (!mbox_addr[region]) 2023 goto error; 2024 } 2025 return 0; 2026 2027 error: 2028 while (region--) 2029 iounmap((void __iomem *)mbox_addr[region]); 2030 return -ENOMEM; 2031 } 2032 2033 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, 2034 int type, int num, 2035 void (mbox_handler)(struct work_struct *), 2036 void (mbox_up_handler)(struct work_struct *)) 2037 { 2038 int err = -EINVAL, i, dir, dir_up; 2039 void __iomem *reg_base; 2040 struct rvu_work *mwork; 2041 void **mbox_regions; 2042 const char *name; 2043 2044 mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL); 2045 if (!mbox_regions) 2046 return -ENOMEM; 2047 2048 switch (type) { 2049 case TYPE_AFPF: 2050 name = "rvu_afpf_mailbox"; 2051 dir = MBOX_DIR_AFPF; 2052 dir_up = MBOX_DIR_AFPF_UP; 2053 reg_base = rvu->afreg_base; 2054 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF); 2055 if (err) 2056 goto free_regions; 2057 break; 2058 case TYPE_AFVF: 2059 name = "rvu_afvf_mailbox"; 2060 dir = MBOX_DIR_PFVF; 2061 dir_up = MBOX_DIR_PFVF_UP; 2062 reg_base = rvu->pfreg_base; 2063 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF); 2064 if (err) 2065 goto free_regions; 2066 break; 2067 default: 2068 return err; 2069 } 2070 2071 mw->mbox_wq = alloc_workqueue(name, 2072 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 2073 num); 2074 if (!mw->mbox_wq) { 2075 err = -ENOMEM; 2076 goto unmap_regions; 2077 } 2078 2079 mw->mbox_wrk = devm_kcalloc(rvu->dev, num, 2080 sizeof(struct rvu_work), GFP_KERNEL); 2081 if (!mw->mbox_wrk) { 2082 err = -ENOMEM; 2083 goto exit; 2084 } 2085 2086 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num, 2087 sizeof(struct rvu_work), GFP_KERNEL); 2088 if (!mw->mbox_wrk_up) { 2089 err = -ENOMEM; 2090 goto exit; 2091 } 2092 2093 err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev, 2094 reg_base, dir, num); 2095 if (err) 2096 goto exit; 2097 2098 err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev, 2099 reg_base, dir_up, num); 2100 if (err) 2101 goto exit; 2102 2103 for (i = 0; i < num; i++) { 2104 mwork = &mw->mbox_wrk[i]; 2105 mwork->rvu = rvu; 2106 INIT_WORK(&mwork->work, mbox_handler); 2107 2108 mwork = &mw->mbox_wrk_up[i]; 2109 mwork->rvu = rvu; 2110 INIT_WORK(&mwork->work, mbox_up_handler); 2111 } 2112 kfree(mbox_regions); 2113 return 0; 2114 2115 exit: 2116 destroy_workqueue(mw->mbox_wq); 2117 unmap_regions: 2118 while (num--) 2119 iounmap((void __iomem *)mbox_regions[num]); 2120 free_regions: 2121 kfree(mbox_regions); 2122 return err; 2123 } 2124 2125 static void rvu_mbox_destroy(struct mbox_wq_info *mw) 2126 { 2127 struct otx2_mbox *mbox = &mw->mbox; 2128 struct otx2_mbox_dev *mdev; 2129 int devid; 2130 2131 if (mw->mbox_wq) { 2132 flush_workqueue(mw->mbox_wq); 2133 destroy_workqueue(mw->mbox_wq); 2134 mw->mbox_wq = NULL; 2135 } 2136 2137 for (devid = 0; devid < mbox->ndevs; devid++) { 2138 mdev = &mbox->dev[devid]; 2139 if (mdev->hwbase) 2140 iounmap((void __iomem *)mdev->hwbase); 2141 } 2142 2143 otx2_mbox_destroy(&mw->mbox); 2144 otx2_mbox_destroy(&mw->mbox_up); 2145 } 2146 2147 static void rvu_queue_work(struct mbox_wq_info *mw, int first, 2148 int mdevs, u64 intr) 2149 { 2150 struct otx2_mbox_dev *mdev; 2151 struct otx2_mbox *mbox; 2152 struct mbox_hdr *hdr; 2153 int i; 2154 2155 for (i = first; i < mdevs; i++) { 2156 /* start from 0 */ 2157 if (!(intr & BIT_ULL(i - first))) 2158 continue; 2159 2160 mbox = &mw->mbox; 2161 mdev = &mbox->dev[i]; 2162 hdr = mdev->mbase + mbox->rx_start; 2163 2164 /*The hdr->num_msgs is set to zero immediately in the interrupt 2165 * handler to ensure that it holds a correct value next time 2166 * when the interrupt handler is called. 2167 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler 2168 * pf>mbox.up_num_msgs holds the data for use in 2169 * pfaf_mbox_up_handler. 2170 */ 2171 2172 if (hdr->num_msgs) { 2173 mw->mbox_wrk[i].num_msgs = hdr->num_msgs; 2174 hdr->num_msgs = 0; 2175 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work); 2176 } 2177 mbox = &mw->mbox_up; 2178 mdev = &mbox->dev[i]; 2179 hdr = mdev->mbase + mbox->rx_start; 2180 if (hdr->num_msgs) { 2181 mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs; 2182 hdr->num_msgs = 0; 2183 queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work); 2184 } 2185 } 2186 } 2187 2188 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) 2189 { 2190 struct rvu *rvu = (struct rvu *)rvu_irq; 2191 int vfs = rvu->vfs; 2192 u64 intr; 2193 2194 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); 2195 /* Clear interrupts */ 2196 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); 2197 if (intr) 2198 trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr); 2199 2200 /* Sync with mbox memory region */ 2201 rmb(); 2202 2203 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr); 2204 2205 /* Handle VF interrupts */ 2206 if (vfs > 64) { 2207 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1)); 2208 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr); 2209 2210 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); 2211 vfs -= 64; 2212 } 2213 2214 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); 2215 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr); 2216 if (intr) 2217 trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr); 2218 2219 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr); 2220 2221 return IRQ_HANDLED; 2222 } 2223 2224 static void rvu_enable_mbox_intr(struct rvu *rvu) 2225 { 2226 struct rvu_hwinfo *hw = rvu->hw; 2227 2228 /* Clear spurious irqs, if any */ 2229 rvu_write64(rvu, BLKADDR_RVUM, 2230 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); 2231 2232 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ 2233 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S, 2234 INTR_MASK(hw->total_pfs) & ~1ULL); 2235 } 2236 2237 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) 2238 { 2239 struct rvu_block *block; 2240 int slot, lf, num_lfs; 2241 int err; 2242 2243 block = &rvu->hw->block[blkaddr]; 2244 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), 2245 block->addr); 2246 if (!num_lfs) 2247 return; 2248 for (slot = 0; slot < num_lfs; slot++) { 2249 lf = rvu_get_lf(rvu, block, pcifunc, slot); 2250 if (lf < 0) 2251 continue; 2252 2253 /* Cleanup LF and reset it */ 2254 if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1) 2255 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf); 2256 else if (block->addr == BLKADDR_NPA) 2257 rvu_npa_lf_teardown(rvu, pcifunc, lf); 2258 else if ((block->addr == BLKADDR_CPT0) || 2259 (block->addr == BLKADDR_CPT1)) 2260 rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot); 2261 2262 err = rvu_lf_reset(rvu, block, lf); 2263 if (err) { 2264 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", 2265 block->addr, lf); 2266 } 2267 } 2268 } 2269 2270 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) 2271 { 2272 mutex_lock(&rvu->flr_lock); 2273 /* Reset order should reflect inter-block dependencies: 2274 * 1. Reset any packet/work sources (NIX, CPT, TIM) 2275 * 2. Flush and reset SSO/SSOW 2276 * 3. Cleanup pools (NPA) 2277 */ 2278 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0); 2279 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1); 2280 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0); 2281 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1); 2282 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM); 2283 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); 2284 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); 2285 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); 2286 rvu_detach_rsrcs(rvu, NULL, pcifunc); 2287 mutex_unlock(&rvu->flr_lock); 2288 } 2289 2290 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf) 2291 { 2292 int reg = 0; 2293 2294 /* pcifunc = 0(PF0) | (vf + 1) */ 2295 __rvu_flr_handler(rvu, vf + 1); 2296 2297 if (vf >= 64) { 2298 reg = 1; 2299 vf = vf - 64; 2300 } 2301 2302 /* Signal FLR finish and enable IRQ */ 2303 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); 2304 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); 2305 } 2306 2307 static void rvu_flr_handler(struct work_struct *work) 2308 { 2309 struct rvu_work *flrwork = container_of(work, struct rvu_work, work); 2310 struct rvu *rvu = flrwork->rvu; 2311 u16 pcifunc, numvfs, vf; 2312 u64 cfg; 2313 int pf; 2314 2315 pf = flrwork - rvu->flr_wrk; 2316 if (pf >= rvu->hw->total_pfs) { 2317 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs); 2318 return; 2319 } 2320 2321 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2322 numvfs = (cfg >> 12) & 0xFF; 2323 pcifunc = pf << RVU_PFVF_PF_SHIFT; 2324 2325 for (vf = 0; vf < numvfs; vf++) 2326 __rvu_flr_handler(rvu, (pcifunc | (vf + 1))); 2327 2328 __rvu_flr_handler(rvu, pcifunc); 2329 2330 /* Signal FLR finish */ 2331 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf)); 2332 2333 /* Enable interrupt */ 2334 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf)); 2335 } 2336 2337 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs) 2338 { 2339 int dev, vf, reg = 0; 2340 u64 intr; 2341 2342 if (start_vf >= 64) 2343 reg = 1; 2344 2345 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg)); 2346 if (!intr) 2347 return; 2348 2349 for (vf = 0; vf < numvfs; vf++) { 2350 if (!(intr & BIT_ULL(vf))) 2351 continue; 2352 dev = vf + start_vf + rvu->hw->total_pfs; 2353 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); 2354 /* Clear and disable the interrupt */ 2355 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); 2356 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); 2357 } 2358 } 2359 2360 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq) 2361 { 2362 struct rvu *rvu = (struct rvu *)rvu_irq; 2363 u64 intr; 2364 u8 pf; 2365 2366 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT); 2367 if (!intr) 2368 goto afvf_flr; 2369 2370 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2371 if (intr & (1ULL << pf)) { 2372 /* PF is already dead do only AF related operations */ 2373 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); 2374 /* clear interrupt */ 2375 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, 2376 BIT_ULL(pf)); 2377 /* Disable the interrupt */ 2378 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, 2379 BIT_ULL(pf)); 2380 } 2381 } 2382 2383 afvf_flr: 2384 rvu_afvf_queue_flr_work(rvu, 0, 64); 2385 if (rvu->vfs > 64) 2386 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64); 2387 2388 return IRQ_HANDLED; 2389 } 2390 2391 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr) 2392 { 2393 int vf; 2394 2395 /* Nothing to be done here other than clearing the 2396 * TRPEND bit. 2397 */ 2398 for (vf = 0; vf < 64; vf++) { 2399 if (intr & (1ULL << vf)) { 2400 /* clear the trpend due to ME(master enable) */ 2401 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf)); 2402 /* clear interrupt */ 2403 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf)); 2404 } 2405 } 2406 } 2407 2408 /* Handles ME interrupts from VFs of AF */ 2409 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq) 2410 { 2411 struct rvu *rvu = (struct rvu *)rvu_irq; 2412 int vfset; 2413 u64 intr; 2414 2415 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); 2416 2417 for (vfset = 0; vfset <= 1; vfset++) { 2418 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset)); 2419 if (intr) 2420 rvu_me_handle_vfset(rvu, vfset, intr); 2421 } 2422 2423 return IRQ_HANDLED; 2424 } 2425 2426 /* Handles ME interrupts from PFs */ 2427 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq) 2428 { 2429 struct rvu *rvu = (struct rvu *)rvu_irq; 2430 u64 intr; 2431 u8 pf; 2432 2433 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); 2434 2435 /* Nothing to be done here other than clearing the 2436 * TRPEND bit. 2437 */ 2438 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2439 if (intr & (1ULL << pf)) { 2440 /* clear the trpend due to ME(master enable) */ 2441 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, 2442 BIT_ULL(pf)); 2443 /* clear interrupt */ 2444 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT, 2445 BIT_ULL(pf)); 2446 } 2447 } 2448 2449 return IRQ_HANDLED; 2450 } 2451 2452 static void rvu_unregister_interrupts(struct rvu *rvu) 2453 { 2454 int irq; 2455 2456 /* Disable the Mbox interrupt */ 2457 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, 2458 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2459 2460 /* Disable the PF FLR interrupt */ 2461 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, 2462 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2463 2464 /* Disable the PF ME interrupt */ 2465 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C, 2466 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2467 2468 for (irq = 0; irq < rvu->num_vec; irq++) { 2469 if (rvu->irq_allocated[irq]) { 2470 free_irq(pci_irq_vector(rvu->pdev, irq), rvu); 2471 rvu->irq_allocated[irq] = false; 2472 } 2473 } 2474 2475 pci_free_irq_vectors(rvu->pdev); 2476 rvu->num_vec = 0; 2477 } 2478 2479 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu) 2480 { 2481 struct rvu_pfvf *pfvf = &rvu->pf[0]; 2482 int offset; 2483 2484 pfvf = &rvu->pf[0]; 2485 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; 2486 2487 /* Make sure there are enough MSIX vectors configured so that 2488 * VF interrupts can be handled. Offset equal to zero means 2489 * that PF vectors are not configured and overlapping AF vectors. 2490 */ 2491 return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) && 2492 offset; 2493 } 2494 2495 static int rvu_register_interrupts(struct rvu *rvu) 2496 { 2497 int ret, offset, pf_vec_start; 2498 2499 rvu->num_vec = pci_msix_vec_count(rvu->pdev); 2500 2501 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec, 2502 NAME_SIZE, GFP_KERNEL); 2503 if (!rvu->irq_name) 2504 return -ENOMEM; 2505 2506 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec, 2507 sizeof(bool), GFP_KERNEL); 2508 if (!rvu->irq_allocated) 2509 return -ENOMEM; 2510 2511 /* Enable MSI-X */ 2512 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec, 2513 rvu->num_vec, PCI_IRQ_MSIX); 2514 if (ret < 0) { 2515 dev_err(rvu->dev, 2516 "RVUAF: Request for %d msix vectors failed, ret %d\n", 2517 rvu->num_vec, ret); 2518 return ret; 2519 } 2520 2521 /* Register mailbox interrupt handler */ 2522 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); 2523 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), 2524 rvu_mbox_intr_handler, 0, 2525 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); 2526 if (ret) { 2527 dev_err(rvu->dev, 2528 "RVUAF: IRQ registration failed for mbox irq\n"); 2529 goto fail; 2530 } 2531 2532 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; 2533 2534 /* Enable mailbox interrupts from all PFs */ 2535 rvu_enable_mbox_intr(rvu); 2536 2537 /* Register FLR interrupt handler */ 2538 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], 2539 "RVUAF FLR"); 2540 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR), 2541 rvu_flr_intr_handler, 0, 2542 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], 2543 rvu); 2544 if (ret) { 2545 dev_err(rvu->dev, 2546 "RVUAF: IRQ registration failed for FLR\n"); 2547 goto fail; 2548 } 2549 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true; 2550 2551 /* Enable FLR interrupt for all PFs*/ 2552 rvu_write64(rvu, BLKADDR_RVUM, 2553 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs)); 2554 2555 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, 2556 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2557 2558 /* Register ME interrupt handler */ 2559 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], 2560 "RVUAF ME"); 2561 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME), 2562 rvu_me_pf_intr_handler, 0, 2563 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], 2564 rvu); 2565 if (ret) { 2566 dev_err(rvu->dev, 2567 "RVUAF: IRQ registration failed for ME\n"); 2568 } 2569 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true; 2570 2571 /* Clear TRPEND bit for all PF */ 2572 rvu_write64(rvu, BLKADDR_RVUM, 2573 RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs)); 2574 /* Enable ME interrupt for all PFs*/ 2575 rvu_write64(rvu, BLKADDR_RVUM, 2576 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs)); 2577 2578 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S, 2579 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2580 2581 if (!rvu_afvf_msix_vectors_num_ok(rvu)) 2582 return 0; 2583 2584 /* Get PF MSIX vectors offset. */ 2585 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM, 2586 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; 2587 2588 /* Register MBOX0 interrupt. */ 2589 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; 2590 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); 2591 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2592 rvu_mbox_intr_handler, 0, 2593 &rvu->irq_name[offset * NAME_SIZE], 2594 rvu); 2595 if (ret) 2596 dev_err(rvu->dev, 2597 "RVUAF: IRQ registration failed for Mbox0\n"); 2598 2599 rvu->irq_allocated[offset] = true; 2600 2601 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so 2602 * simply increment current offset by 1. 2603 */ 2604 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; 2605 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); 2606 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2607 rvu_mbox_intr_handler, 0, 2608 &rvu->irq_name[offset * NAME_SIZE], 2609 rvu); 2610 if (ret) 2611 dev_err(rvu->dev, 2612 "RVUAF: IRQ registration failed for Mbox1\n"); 2613 2614 rvu->irq_allocated[offset] = true; 2615 2616 /* Register FLR interrupt handler for AF's VFs */ 2617 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0; 2618 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0"); 2619 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2620 rvu_flr_intr_handler, 0, 2621 &rvu->irq_name[offset * NAME_SIZE], rvu); 2622 if (ret) { 2623 dev_err(rvu->dev, 2624 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n"); 2625 goto fail; 2626 } 2627 rvu->irq_allocated[offset] = true; 2628 2629 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1; 2630 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1"); 2631 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2632 rvu_flr_intr_handler, 0, 2633 &rvu->irq_name[offset * NAME_SIZE], rvu); 2634 if (ret) { 2635 dev_err(rvu->dev, 2636 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n"); 2637 goto fail; 2638 } 2639 rvu->irq_allocated[offset] = true; 2640 2641 /* Register ME interrupt handler for AF's VFs */ 2642 offset = pf_vec_start + RVU_PF_INT_VEC_VFME0; 2643 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0"); 2644 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2645 rvu_me_vf_intr_handler, 0, 2646 &rvu->irq_name[offset * NAME_SIZE], rvu); 2647 if (ret) { 2648 dev_err(rvu->dev, 2649 "RVUAF: IRQ registration failed for RVUAFVF ME0\n"); 2650 goto fail; 2651 } 2652 rvu->irq_allocated[offset] = true; 2653 2654 offset = pf_vec_start + RVU_PF_INT_VEC_VFME1; 2655 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1"); 2656 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2657 rvu_me_vf_intr_handler, 0, 2658 &rvu->irq_name[offset * NAME_SIZE], rvu); 2659 if (ret) { 2660 dev_err(rvu->dev, 2661 "RVUAF: IRQ registration failed for RVUAFVF ME1\n"); 2662 goto fail; 2663 } 2664 rvu->irq_allocated[offset] = true; 2665 return 0; 2666 2667 fail: 2668 rvu_unregister_interrupts(rvu); 2669 return ret; 2670 } 2671 2672 static void rvu_flr_wq_destroy(struct rvu *rvu) 2673 { 2674 if (rvu->flr_wq) { 2675 flush_workqueue(rvu->flr_wq); 2676 destroy_workqueue(rvu->flr_wq); 2677 rvu->flr_wq = NULL; 2678 } 2679 } 2680 2681 static int rvu_flr_init(struct rvu *rvu) 2682 { 2683 int dev, num_devs; 2684 u64 cfg; 2685 int pf; 2686 2687 /* Enable FLR for all PFs*/ 2688 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2689 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2690 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf), 2691 cfg | BIT_ULL(22)); 2692 } 2693 2694 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr", 2695 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 2696 1); 2697 if (!rvu->flr_wq) 2698 return -ENOMEM; 2699 2700 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev); 2701 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs, 2702 sizeof(struct rvu_work), GFP_KERNEL); 2703 if (!rvu->flr_wrk) { 2704 destroy_workqueue(rvu->flr_wq); 2705 return -ENOMEM; 2706 } 2707 2708 for (dev = 0; dev < num_devs; dev++) { 2709 rvu->flr_wrk[dev].rvu = rvu; 2710 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler); 2711 } 2712 2713 mutex_init(&rvu->flr_lock); 2714 2715 return 0; 2716 } 2717 2718 static void rvu_disable_afvf_intr(struct rvu *rvu) 2719 { 2720 int vfs = rvu->vfs; 2721 2722 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs)); 2723 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); 2724 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); 2725 if (vfs <= 64) 2726 return; 2727 2728 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), 2729 INTR_MASK(vfs - 64)); 2730 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); 2731 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); 2732 } 2733 2734 static void rvu_enable_afvf_intr(struct rvu *rvu) 2735 { 2736 int vfs = rvu->vfs; 2737 2738 /* Clear any pending interrupts and enable AF VF interrupts for 2739 * the first 64 VFs. 2740 */ 2741 /* Mbox */ 2742 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs)); 2743 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs)); 2744 2745 /* FLR */ 2746 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); 2747 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); 2748 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs)); 2749 2750 /* Same for remaining VFs, if any. */ 2751 if (vfs <= 64) 2752 return; 2753 2754 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64)); 2755 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), 2756 INTR_MASK(vfs - 64)); 2757 2758 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64)); 2759 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); 2760 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); 2761 } 2762 2763 int rvu_get_num_lbk_chans(void) 2764 { 2765 struct pci_dev *pdev; 2766 void __iomem *base; 2767 int ret = -EIO; 2768 2769 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK, 2770 NULL); 2771 if (!pdev) 2772 goto err; 2773 2774 base = pci_ioremap_bar(pdev, 0); 2775 if (!base) 2776 goto err_put; 2777 2778 /* Read number of available LBK channels from LBK(0)_CONST register. */ 2779 ret = (readq(base + 0x10) >> 32) & 0xffff; 2780 iounmap(base); 2781 err_put: 2782 pci_dev_put(pdev); 2783 err: 2784 return ret; 2785 } 2786 2787 static int rvu_enable_sriov(struct rvu *rvu) 2788 { 2789 struct pci_dev *pdev = rvu->pdev; 2790 int err, chans, vfs; 2791 2792 if (!rvu_afvf_msix_vectors_num_ok(rvu)) { 2793 dev_warn(&pdev->dev, 2794 "Skipping SRIOV enablement since not enough IRQs are available\n"); 2795 return 0; 2796 } 2797 2798 chans = rvu_get_num_lbk_chans(); 2799 if (chans < 0) 2800 return chans; 2801 2802 vfs = pci_sriov_get_totalvfs(pdev); 2803 2804 /* Limit VFs in case we have more VFs than LBK channels available. */ 2805 if (vfs > chans) 2806 vfs = chans; 2807 2808 if (!vfs) 2809 return 0; 2810 2811 /* Save VFs number for reference in VF interrupts handlers. 2812 * Since interrupts might start arriving during SRIOV enablement 2813 * ordinary API cannot be used to get number of enabled VFs. 2814 */ 2815 rvu->vfs = vfs; 2816 2817 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs, 2818 rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler); 2819 if (err) 2820 return err; 2821 2822 rvu_enable_afvf_intr(rvu); 2823 /* Make sure IRQs are enabled before SRIOV. */ 2824 mb(); 2825 2826 err = pci_enable_sriov(pdev, vfs); 2827 if (err) { 2828 rvu_disable_afvf_intr(rvu); 2829 rvu_mbox_destroy(&rvu->afvf_wq_info); 2830 return err; 2831 } 2832 2833 return 0; 2834 } 2835 2836 static void rvu_disable_sriov(struct rvu *rvu) 2837 { 2838 rvu_disable_afvf_intr(rvu); 2839 rvu_mbox_destroy(&rvu->afvf_wq_info); 2840 pci_disable_sriov(rvu->pdev); 2841 } 2842 2843 static void rvu_update_module_params(struct rvu *rvu) 2844 { 2845 const char *default_pfl_name = "default"; 2846 2847 strscpy(rvu->mkex_pfl_name, 2848 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN); 2849 strscpy(rvu->kpu_pfl_name, 2850 kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN); 2851 } 2852 2853 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2854 { 2855 struct device *dev = &pdev->dev; 2856 struct rvu *rvu; 2857 int err; 2858 2859 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL); 2860 if (!rvu) 2861 return -ENOMEM; 2862 2863 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL); 2864 if (!rvu->hw) { 2865 devm_kfree(dev, rvu); 2866 return -ENOMEM; 2867 } 2868 2869 pci_set_drvdata(pdev, rvu); 2870 rvu->pdev = pdev; 2871 rvu->dev = &pdev->dev; 2872 2873 err = pci_enable_device(pdev); 2874 if (err) { 2875 dev_err(dev, "Failed to enable PCI device\n"); 2876 goto err_freemem; 2877 } 2878 2879 err = pci_request_regions(pdev, DRV_NAME); 2880 if (err) { 2881 dev_err(dev, "PCI request regions failed 0x%x\n", err); 2882 goto err_disable_device; 2883 } 2884 2885 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 2886 if (err) { 2887 dev_err(dev, "DMA mask config failed, abort\n"); 2888 goto err_release_regions; 2889 } 2890 2891 pci_set_master(pdev); 2892 2893 rvu->ptp = ptp_get(); 2894 if (IS_ERR(rvu->ptp)) { 2895 err = PTR_ERR(rvu->ptp); 2896 if (err == -EPROBE_DEFER) 2897 goto err_release_regions; 2898 rvu->ptp = NULL; 2899 } 2900 2901 /* Map Admin function CSRs */ 2902 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0); 2903 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); 2904 if (!rvu->afreg_base || !rvu->pfreg_base) { 2905 dev_err(dev, "Unable to map admin function CSRs, aborting\n"); 2906 err = -ENOMEM; 2907 goto err_put_ptp; 2908 } 2909 2910 /* Store module params in rvu structure */ 2911 rvu_update_module_params(rvu); 2912 2913 /* Check which blocks the HW supports */ 2914 rvu_check_block_implemented(rvu); 2915 2916 rvu_reset_all_blocks(rvu); 2917 2918 rvu_setup_hw_capabilities(rvu); 2919 2920 err = rvu_setup_hw_resources(rvu); 2921 if (err) 2922 goto err_put_ptp; 2923 2924 /* Init mailbox btw AF and PFs */ 2925 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF, 2926 rvu->hw->total_pfs, rvu_afpf_mbox_handler, 2927 rvu_afpf_mbox_up_handler); 2928 if (err) 2929 goto err_hwsetup; 2930 2931 err = rvu_flr_init(rvu); 2932 if (err) 2933 goto err_mbox; 2934 2935 err = rvu_register_interrupts(rvu); 2936 if (err) 2937 goto err_flr; 2938 2939 err = rvu_register_dl(rvu); 2940 if (err) 2941 goto err_irq; 2942 2943 rvu_setup_rvum_blk_revid(rvu); 2944 2945 /* Enable AF's VFs (if any) */ 2946 err = rvu_enable_sriov(rvu); 2947 if (err) 2948 goto err_dl; 2949 2950 /* Initialize debugfs */ 2951 rvu_dbg_init(rvu); 2952 2953 return 0; 2954 err_dl: 2955 rvu_unregister_dl(rvu); 2956 err_irq: 2957 rvu_unregister_interrupts(rvu); 2958 err_flr: 2959 rvu_flr_wq_destroy(rvu); 2960 err_mbox: 2961 rvu_mbox_destroy(&rvu->afpf_wq_info); 2962 err_hwsetup: 2963 rvu_cgx_exit(rvu); 2964 rvu_fwdata_exit(rvu); 2965 rvu_reset_all_blocks(rvu); 2966 rvu_free_hw_resources(rvu); 2967 rvu_clear_rvum_blk_revid(rvu); 2968 err_put_ptp: 2969 ptp_put(rvu->ptp); 2970 err_release_regions: 2971 pci_release_regions(pdev); 2972 err_disable_device: 2973 pci_disable_device(pdev); 2974 err_freemem: 2975 pci_set_drvdata(pdev, NULL); 2976 devm_kfree(&pdev->dev, rvu->hw); 2977 devm_kfree(dev, rvu); 2978 return err; 2979 } 2980 2981 static void rvu_remove(struct pci_dev *pdev) 2982 { 2983 struct rvu *rvu = pci_get_drvdata(pdev); 2984 2985 rvu_dbg_exit(rvu); 2986 rvu_unregister_dl(rvu); 2987 rvu_unregister_interrupts(rvu); 2988 rvu_flr_wq_destroy(rvu); 2989 rvu_cgx_exit(rvu); 2990 rvu_fwdata_exit(rvu); 2991 rvu_mbox_destroy(&rvu->afpf_wq_info); 2992 rvu_disable_sriov(rvu); 2993 rvu_reset_all_blocks(rvu); 2994 rvu_free_hw_resources(rvu); 2995 rvu_clear_rvum_blk_revid(rvu); 2996 ptp_put(rvu->ptp); 2997 pci_release_regions(pdev); 2998 pci_disable_device(pdev); 2999 pci_set_drvdata(pdev, NULL); 3000 3001 devm_kfree(&pdev->dev, rvu->hw); 3002 devm_kfree(&pdev->dev, rvu); 3003 } 3004 3005 static struct pci_driver rvu_driver = { 3006 .name = DRV_NAME, 3007 .id_table = rvu_id_table, 3008 .probe = rvu_probe, 3009 .remove = rvu_remove, 3010 }; 3011 3012 static int __init rvu_init_module(void) 3013 { 3014 int err; 3015 3016 pr_info("%s: %s\n", DRV_NAME, DRV_STRING); 3017 3018 err = pci_register_driver(&cgx_driver); 3019 if (err < 0) 3020 return err; 3021 3022 err = pci_register_driver(&ptp_driver); 3023 if (err < 0) 3024 goto ptp_err; 3025 3026 err = pci_register_driver(&rvu_driver); 3027 if (err < 0) 3028 goto rvu_err; 3029 3030 return 0; 3031 rvu_err: 3032 pci_unregister_driver(&ptp_driver); 3033 ptp_err: 3034 pci_unregister_driver(&cgx_driver); 3035 3036 return err; 3037 } 3038 3039 static void __exit rvu_cleanup_module(void) 3040 { 3041 pci_unregister_driver(&rvu_driver); 3042 pci_unregister_driver(&ptp_driver); 3043 pci_unregister_driver(&cgx_driver); 3044 } 3045 3046 module_init(rvu_init_module); 3047 module_exit(rvu_cleanup_module); 3048