1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/interrupt.h> 13 #include <linux/delay.h> 14 #include <linux/irq.h> 15 #include <linux/pci.h> 16 #include <linux/sysfs.h> 17 18 #include "cgx.h" 19 #include "rvu.h" 20 #include "rvu_reg.h" 21 #include "ptp.h" 22 23 #include "rvu_trace.h" 24 25 #define DRV_NAME "rvu_af" 26 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" 27 28 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); 29 30 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 31 struct rvu_block *block, int lf); 32 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 33 struct rvu_block *block, int lf); 34 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc); 35 36 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, 37 int type, int num, 38 void (mbox_handler)(struct work_struct *), 39 void (mbox_up_handler)(struct work_struct *)); 40 enum { 41 TYPE_AFVF, 42 TYPE_AFPF, 43 }; 44 45 /* Supported devices */ 46 static const struct pci_device_id rvu_id_table[] = { 47 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) }, 48 { 0, } /* end of table */ 49 }; 50 51 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>"); 52 MODULE_DESCRIPTION(DRV_STRING); 53 MODULE_LICENSE("GPL v2"); 54 MODULE_DEVICE_TABLE(pci, rvu_id_table); 55 56 static char *mkex_profile; /* MKEX profile name */ 57 module_param(mkex_profile, charp, 0000); 58 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string"); 59 60 static char *kpu_profile; /* KPU profile name */ 61 module_param(kpu_profile, charp, 0000); 62 MODULE_PARM_DESC(kpu_profile, "KPU profile name string"); 63 64 static void rvu_setup_hw_capabilities(struct rvu *rvu) 65 { 66 struct rvu_hwinfo *hw = rvu->hw; 67 68 hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1; 69 hw->cap.nix_fixed_txschq_mapping = false; 70 hw->cap.nix_shaping = true; 71 hw->cap.nix_tx_link_bp = true; 72 hw->cap.nix_rx_multicast = true; 73 hw->rvu = rvu; 74 75 if (is_rvu_96xx_B0(rvu)) { 76 hw->cap.nix_fixed_txschq_mapping = true; 77 hw->cap.nix_txsch_per_cgx_lmac = 4; 78 hw->cap.nix_txsch_per_lbk_lmac = 132; 79 hw->cap.nix_txsch_per_sdp_lmac = 76; 80 hw->cap.nix_shaping = false; 81 hw->cap.nix_tx_link_bp = false; 82 if (is_rvu_96xx_A0(rvu)) 83 hw->cap.nix_rx_multicast = false; 84 } 85 86 if (!is_rvu_otx2(rvu)) 87 hw->cap.per_pf_mbox_regs = true; 88 } 89 90 /* Poll a RVU block's register 'offset', for a 'zero' 91 * or 'nonzero' at bits specified by 'mask' 92 */ 93 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) 94 { 95 unsigned long timeout = jiffies + usecs_to_jiffies(10000); 96 void __iomem *reg; 97 u64 reg_val; 98 99 reg = rvu->afreg_base + ((block << 28) | offset); 100 again: 101 reg_val = readq(reg); 102 if (zero && !(reg_val & mask)) 103 return 0; 104 if (!zero && (reg_val & mask)) 105 return 0; 106 if (time_before(jiffies, timeout)) { 107 usleep_range(1, 5); 108 goto again; 109 } 110 return -EBUSY; 111 } 112 113 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc) 114 { 115 int id; 116 117 if (!rsrc->bmap) 118 return -EINVAL; 119 120 id = find_first_zero_bit(rsrc->bmap, rsrc->max); 121 if (id >= rsrc->max) 122 return -ENOSPC; 123 124 __set_bit(id, rsrc->bmap); 125 126 return id; 127 } 128 129 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc) 130 { 131 int start; 132 133 if (!rsrc->bmap) 134 return -EINVAL; 135 136 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); 137 if (start >= rsrc->max) 138 return -ENOSPC; 139 140 bitmap_set(rsrc->bmap, start, nrsrc); 141 return start; 142 } 143 144 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) 145 { 146 if (!rsrc->bmap) 147 return; 148 if (start >= rsrc->max) 149 return; 150 151 bitmap_clear(rsrc->bmap, start, nrsrc); 152 } 153 154 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc) 155 { 156 int start; 157 158 if (!rsrc->bmap) 159 return false; 160 161 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); 162 if (start >= rsrc->max) 163 return false; 164 165 return true; 166 } 167 168 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id) 169 { 170 if (!rsrc->bmap) 171 return; 172 173 __clear_bit(id, rsrc->bmap); 174 } 175 176 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc) 177 { 178 int used; 179 180 if (!rsrc->bmap) 181 return 0; 182 183 used = bitmap_weight(rsrc->bmap, rsrc->max); 184 return (rsrc->max - used); 185 } 186 187 bool is_rsrc_free(struct rsrc_bmap *rsrc, int id) 188 { 189 if (!rsrc->bmap) 190 return false; 191 192 return !test_bit(id, rsrc->bmap); 193 } 194 195 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc) 196 { 197 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max), 198 sizeof(long), GFP_KERNEL); 199 if (!rsrc->bmap) 200 return -ENOMEM; 201 return 0; 202 } 203 204 /* Get block LF's HW index from a PF_FUNC's block slot number */ 205 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) 206 { 207 u16 match = 0; 208 int lf; 209 210 mutex_lock(&rvu->rsrc_lock); 211 for (lf = 0; lf < block->lf.max; lf++) { 212 if (block->fn_map[lf] == pcifunc) { 213 if (slot == match) { 214 mutex_unlock(&rvu->rsrc_lock); 215 return lf; 216 } 217 match++; 218 } 219 } 220 mutex_unlock(&rvu->rsrc_lock); 221 return -ENODEV; 222 } 223 224 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E. 225 * Some silicon variants of OcteonTX2 supports 226 * multiple blocks of same type. 227 * 228 * @pcifunc has to be zero when no LF is yet attached. 229 * 230 * For a pcifunc if LFs are attached from multiple blocks of same type, then 231 * return blkaddr of first encountered block. 232 */ 233 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) 234 { 235 int devnum, blkaddr = -ENODEV; 236 u64 cfg, reg; 237 bool is_pf; 238 239 switch (blktype) { 240 case BLKTYPE_NPC: 241 blkaddr = BLKADDR_NPC; 242 goto exit; 243 case BLKTYPE_NPA: 244 blkaddr = BLKADDR_NPA; 245 goto exit; 246 case BLKTYPE_NIX: 247 /* For now assume NIX0 */ 248 if (!pcifunc) { 249 blkaddr = BLKADDR_NIX0; 250 goto exit; 251 } 252 break; 253 case BLKTYPE_SSO: 254 blkaddr = BLKADDR_SSO; 255 goto exit; 256 case BLKTYPE_SSOW: 257 blkaddr = BLKADDR_SSOW; 258 goto exit; 259 case BLKTYPE_TIM: 260 blkaddr = BLKADDR_TIM; 261 goto exit; 262 case BLKTYPE_CPT: 263 /* For now assume CPT0 */ 264 if (!pcifunc) { 265 blkaddr = BLKADDR_CPT0; 266 goto exit; 267 } 268 break; 269 } 270 271 /* Check if this is a RVU PF or VF */ 272 if (pcifunc & RVU_PFVF_FUNC_MASK) { 273 is_pf = false; 274 devnum = rvu_get_hwvf(rvu, pcifunc); 275 } else { 276 is_pf = true; 277 devnum = rvu_get_pf(pcifunc); 278 } 279 280 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or 281 * 'BLKADDR_NIX1'. 282 */ 283 if (blktype == BLKTYPE_NIX) { 284 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) : 285 RVU_PRIV_HWVFX_NIXX_CFG(0); 286 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 287 if (cfg) { 288 blkaddr = BLKADDR_NIX0; 289 goto exit; 290 } 291 292 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) : 293 RVU_PRIV_HWVFX_NIXX_CFG(1); 294 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 295 if (cfg) 296 blkaddr = BLKADDR_NIX1; 297 } 298 299 if (blktype == BLKTYPE_CPT) { 300 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) : 301 RVU_PRIV_HWVFX_CPTX_CFG(0); 302 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 303 if (cfg) { 304 blkaddr = BLKADDR_CPT0; 305 goto exit; 306 } 307 308 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) : 309 RVU_PRIV_HWVFX_CPTX_CFG(1); 310 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); 311 if (cfg) 312 blkaddr = BLKADDR_CPT1; 313 } 314 315 exit: 316 if (is_block_implemented(rvu->hw, blkaddr)) 317 return blkaddr; 318 return -ENODEV; 319 } 320 321 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, 322 struct rvu_block *block, u16 pcifunc, 323 u16 lf, bool attach) 324 { 325 int devnum, num_lfs = 0; 326 bool is_pf; 327 u64 reg; 328 329 if (lf >= block->lf.max) { 330 dev_err(&rvu->pdev->dev, 331 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n", 332 __func__, lf, block->name, block->lf.max); 333 return; 334 } 335 336 /* Check if this is for a RVU PF or VF */ 337 if (pcifunc & RVU_PFVF_FUNC_MASK) { 338 is_pf = false; 339 devnum = rvu_get_hwvf(rvu, pcifunc); 340 } else { 341 is_pf = true; 342 devnum = rvu_get_pf(pcifunc); 343 } 344 345 block->fn_map[lf] = attach ? pcifunc : 0; 346 347 switch (block->addr) { 348 case BLKADDR_NPA: 349 pfvf->npalf = attach ? true : false; 350 num_lfs = pfvf->npalf; 351 break; 352 case BLKADDR_NIX0: 353 case BLKADDR_NIX1: 354 pfvf->nixlf = attach ? true : false; 355 num_lfs = pfvf->nixlf; 356 break; 357 case BLKADDR_SSO: 358 attach ? pfvf->sso++ : pfvf->sso--; 359 num_lfs = pfvf->sso; 360 break; 361 case BLKADDR_SSOW: 362 attach ? pfvf->ssow++ : pfvf->ssow--; 363 num_lfs = pfvf->ssow; 364 break; 365 case BLKADDR_TIM: 366 attach ? pfvf->timlfs++ : pfvf->timlfs--; 367 num_lfs = pfvf->timlfs; 368 break; 369 case BLKADDR_CPT0: 370 attach ? pfvf->cptlfs++ : pfvf->cptlfs--; 371 num_lfs = pfvf->cptlfs; 372 break; 373 case BLKADDR_CPT1: 374 attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--; 375 num_lfs = pfvf->cpt1_lfs; 376 break; 377 } 378 379 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg; 380 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); 381 } 382 383 inline int rvu_get_pf(u16 pcifunc) 384 { 385 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 386 } 387 388 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) 389 { 390 u64 cfg; 391 392 /* Get numVFs attached to this PF and first HWVF */ 393 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 394 *numvfs = (cfg >> 12) & 0xFF; 395 *hwvf = cfg & 0xFFF; 396 } 397 398 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) 399 { 400 int pf, func; 401 u64 cfg; 402 403 pf = rvu_get_pf(pcifunc); 404 func = pcifunc & RVU_PFVF_FUNC_MASK; 405 406 /* Get first HWVF attached to this PF */ 407 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 408 409 return ((cfg & 0xFFF) + func - 1); 410 } 411 412 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) 413 { 414 /* Check if it is a PF or VF */ 415 if (pcifunc & RVU_PFVF_FUNC_MASK) 416 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; 417 else 418 return &rvu->pf[rvu_get_pf(pcifunc)]; 419 } 420 421 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) 422 { 423 int pf, vf, nvfs; 424 u64 cfg; 425 426 pf = rvu_get_pf(pcifunc); 427 if (pf >= rvu->hw->total_pfs) 428 return false; 429 430 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 431 return true; 432 433 /* Check if VF is within number of VFs attached to this PF */ 434 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 435 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 436 nvfs = (cfg >> 12) & 0xFF; 437 if (vf >= nvfs) 438 return false; 439 440 return true; 441 } 442 443 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr) 444 { 445 struct rvu_block *block; 446 447 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT) 448 return false; 449 450 block = &hw->block[blkaddr]; 451 return block->implemented; 452 } 453 454 static void rvu_check_block_implemented(struct rvu *rvu) 455 { 456 struct rvu_hwinfo *hw = rvu->hw; 457 struct rvu_block *block; 458 int blkid; 459 u64 cfg; 460 461 /* For each block check if 'implemented' bit is set */ 462 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 463 block = &hw->block[blkid]; 464 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid)); 465 if (cfg & BIT_ULL(11)) 466 block->implemented = true; 467 } 468 } 469 470 static void rvu_setup_rvum_blk_revid(struct rvu *rvu) 471 { 472 rvu_write64(rvu, BLKADDR_RVUM, 473 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 474 RVU_BLK_RVUM_REVID); 475 } 476 477 static void rvu_clear_rvum_blk_revid(struct rvu *rvu) 478 { 479 rvu_write64(rvu, BLKADDR_RVUM, 480 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00); 481 } 482 483 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf) 484 { 485 int err; 486 487 if (!block->implemented) 488 return 0; 489 490 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12)); 491 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12), 492 true); 493 return err; 494 } 495 496 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) 497 { 498 struct rvu_block *block = &rvu->hw->block[blkaddr]; 499 500 if (!block->implemented) 501 return; 502 503 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); 504 rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); 505 } 506 507 static void rvu_reset_all_blocks(struct rvu *rvu) 508 { 509 /* Do a HW reset of all RVU blocks */ 510 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST); 511 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST); 512 rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST); 513 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST); 514 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); 515 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); 516 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); 517 rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST); 518 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST); 519 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST); 520 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST); 521 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST); 522 rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST); 523 } 524 525 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) 526 { 527 struct rvu_pfvf *pfvf; 528 u64 cfg; 529 int lf; 530 531 for (lf = 0; lf < block->lf.max; lf++) { 532 cfg = rvu_read64(rvu, block->addr, 533 block->lfcfg_reg | (lf << block->lfshift)); 534 if (!(cfg & BIT_ULL(63))) 535 continue; 536 537 /* Set this resource as being used */ 538 __set_bit(lf, block->lf.bmap); 539 540 /* Get, to whom this LF is attached */ 541 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF); 542 rvu_update_rsrc_map(rvu, pfvf, block, 543 (cfg >> 8) & 0xFFFF, lf, true); 544 545 /* Set start MSIX vector for this LF within this PF/VF */ 546 rvu_set_msix_offset(rvu, pfvf, block, lf); 547 } 548 } 549 550 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf) 551 { 552 int min_vecs; 553 554 if (!vf) 555 goto check_pf; 556 557 if (!nvecs) { 558 dev_warn(rvu->dev, 559 "PF%d:VF%d is configured with zero msix vectors, %d\n", 560 pf, vf - 1, nvecs); 561 } 562 return; 563 564 check_pf: 565 if (pf == 0) 566 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT; 567 else 568 min_vecs = RVU_PF_INT_VEC_CNT; 569 570 if (!(nvecs < min_vecs)) 571 return; 572 dev_warn(rvu->dev, 573 "PF%d is configured with too few vectors, %d, min is %d\n", 574 pf, nvecs, min_vecs); 575 } 576 577 static int rvu_setup_msix_resources(struct rvu *rvu) 578 { 579 struct rvu_hwinfo *hw = rvu->hw; 580 int pf, vf, numvfs, hwvf, err; 581 int nvecs, offset, max_msix; 582 struct rvu_pfvf *pfvf; 583 u64 cfg, phy_addr; 584 dma_addr_t iova; 585 586 for (pf = 0; pf < hw->total_pfs; pf++) { 587 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 588 /* If PF is not enabled, nothing to do */ 589 if (!((cfg >> 20) & 0x01)) 590 continue; 591 592 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 593 594 pfvf = &rvu->pf[pf]; 595 /* Get num of MSIX vectors attached to this PF */ 596 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf)); 597 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1; 598 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0); 599 600 /* Alloc msix bitmap for this PF */ 601 err = rvu_alloc_bitmap(&pfvf->msix); 602 if (err) 603 return err; 604 605 /* Allocate memory for MSIX vector to RVU block LF mapping */ 606 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, 607 sizeof(u16), GFP_KERNEL); 608 if (!pfvf->msix_lfmap) 609 return -ENOMEM; 610 611 /* For PF0 (AF) firmware will set msix vector offsets for 612 * AF, block AF and PF0_INT vectors, so jump to VFs. 613 */ 614 if (!pf) 615 goto setup_vfmsix; 616 617 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors. 618 * These are allocated on driver init and never freed, 619 * so no need to set 'msix_lfmap' for these. 620 */ 621 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf)); 622 nvecs = (cfg >> 12) & 0xFF; 623 cfg &= ~0x7FFULL; 624 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 625 rvu_write64(rvu, BLKADDR_RVUM, 626 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset); 627 setup_vfmsix: 628 /* Alloc msix bitmap for VFs */ 629 for (vf = 0; vf < numvfs; vf++) { 630 pfvf = &rvu->hwvf[hwvf + vf]; 631 /* Get num of MSIX vectors attached to this VF */ 632 cfg = rvu_read64(rvu, BLKADDR_RVUM, 633 RVU_PRIV_PFX_MSIX_CFG(pf)); 634 pfvf->msix.max = (cfg & 0xFFF) + 1; 635 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1); 636 637 /* Alloc msix bitmap for this VF */ 638 err = rvu_alloc_bitmap(&pfvf->msix); 639 if (err) 640 return err; 641 642 pfvf->msix_lfmap = 643 devm_kcalloc(rvu->dev, pfvf->msix.max, 644 sizeof(u16), GFP_KERNEL); 645 if (!pfvf->msix_lfmap) 646 return -ENOMEM; 647 648 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors. 649 * These are allocated on driver init and never freed, 650 * so no need to set 'msix_lfmap' for these. 651 */ 652 cfg = rvu_read64(rvu, BLKADDR_RVUM, 653 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf)); 654 nvecs = (cfg >> 12) & 0xFF; 655 cfg &= ~0x7FFULL; 656 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 657 rvu_write64(rvu, BLKADDR_RVUM, 658 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf), 659 cfg | offset); 660 } 661 } 662 663 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence 664 * create an IOMMU mapping for the physical address configured by 665 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA. 666 */ 667 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 668 max_msix = cfg & 0xFFFFF; 669 if (rvu->fwdata && rvu->fwdata->msixtr_base) 670 phy_addr = rvu->fwdata->msixtr_base; 671 else 672 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE); 673 674 iova = dma_map_resource(rvu->dev, phy_addr, 675 max_msix * PCI_MSIX_ENTRY_SIZE, 676 DMA_BIDIRECTIONAL, 0); 677 678 if (dma_mapping_error(rvu->dev, iova)) 679 return -ENOMEM; 680 681 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova); 682 rvu->msix_base_iova = iova; 683 rvu->msixtr_base_phy = phy_addr; 684 685 return 0; 686 } 687 688 static void rvu_reset_msix(struct rvu *rvu) 689 { 690 /* Restore msixtr base register */ 691 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, 692 rvu->msixtr_base_phy); 693 } 694 695 static void rvu_free_hw_resources(struct rvu *rvu) 696 { 697 struct rvu_hwinfo *hw = rvu->hw; 698 struct rvu_block *block; 699 struct rvu_pfvf *pfvf; 700 int id, max_msix; 701 u64 cfg; 702 703 rvu_npa_freemem(rvu); 704 rvu_npc_freemem(rvu); 705 rvu_nix_freemem(rvu); 706 707 /* Free block LF bitmaps */ 708 for (id = 0; id < BLK_COUNT; id++) { 709 block = &hw->block[id]; 710 kfree(block->lf.bmap); 711 } 712 713 /* Free MSIX bitmaps */ 714 for (id = 0; id < hw->total_pfs; id++) { 715 pfvf = &rvu->pf[id]; 716 kfree(pfvf->msix.bmap); 717 } 718 719 for (id = 0; id < hw->total_vfs; id++) { 720 pfvf = &rvu->hwvf[id]; 721 kfree(pfvf->msix.bmap); 722 } 723 724 /* Unmap MSIX vector base IOVA mapping */ 725 if (!rvu->msix_base_iova) 726 return; 727 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 728 max_msix = cfg & 0xFFFFF; 729 dma_unmap_resource(rvu->dev, rvu->msix_base_iova, 730 max_msix * PCI_MSIX_ENTRY_SIZE, 731 DMA_BIDIRECTIONAL, 0); 732 733 rvu_reset_msix(rvu); 734 mutex_destroy(&rvu->rsrc_lock); 735 } 736 737 static void rvu_setup_pfvf_macaddress(struct rvu *rvu) 738 { 739 struct rvu_hwinfo *hw = rvu->hw; 740 int pf, vf, numvfs, hwvf; 741 struct rvu_pfvf *pfvf; 742 u64 *mac; 743 744 for (pf = 0; pf < hw->total_pfs; pf++) { 745 /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */ 746 if (!pf) 747 goto lbkvf; 748 749 if (!is_pf_cgxmapped(rvu, pf)) 750 continue; 751 /* Assign MAC address to PF */ 752 pfvf = &rvu->pf[pf]; 753 if (rvu->fwdata && pf < PF_MACNUM_MAX) { 754 mac = &rvu->fwdata->pf_macs[pf]; 755 if (*mac) 756 u64_to_ether_addr(*mac, pfvf->mac_addr); 757 else 758 eth_random_addr(pfvf->mac_addr); 759 } else { 760 eth_random_addr(pfvf->mac_addr); 761 } 762 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr); 763 764 lbkvf: 765 /* Assign MAC address to VFs*/ 766 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 767 for (vf = 0; vf < numvfs; vf++, hwvf++) { 768 pfvf = &rvu->hwvf[hwvf]; 769 if (rvu->fwdata && hwvf < VF_MACNUM_MAX) { 770 mac = &rvu->fwdata->vf_macs[hwvf]; 771 if (*mac) 772 u64_to_ether_addr(*mac, pfvf->mac_addr); 773 else 774 eth_random_addr(pfvf->mac_addr); 775 } else { 776 eth_random_addr(pfvf->mac_addr); 777 } 778 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr); 779 } 780 } 781 } 782 783 static int rvu_fwdata_init(struct rvu *rvu) 784 { 785 u64 fwdbase; 786 int err; 787 788 /* Get firmware data base address */ 789 err = cgx_get_fwdata_base(&fwdbase); 790 if (err) 791 goto fail; 792 rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata)); 793 if (!rvu->fwdata) 794 goto fail; 795 if (!is_rvu_fwdata_valid(rvu)) { 796 dev_err(rvu->dev, 797 "Mismatch in 'fwdata' struct btw kernel and firmware\n"); 798 iounmap(rvu->fwdata); 799 rvu->fwdata = NULL; 800 return -EINVAL; 801 } 802 return 0; 803 fail: 804 dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n"); 805 return -EIO; 806 } 807 808 static void rvu_fwdata_exit(struct rvu *rvu) 809 { 810 if (rvu->fwdata) 811 iounmap(rvu->fwdata); 812 } 813 814 static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr) 815 { 816 struct rvu_hwinfo *hw = rvu->hw; 817 struct rvu_block *block; 818 int blkid; 819 u64 cfg; 820 821 /* Init NIX LF's bitmap */ 822 block = &hw->block[blkaddr]; 823 if (!block->implemented) 824 return 0; 825 blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1; 826 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 827 block->lf.max = cfg & 0xFFF; 828 block->addr = blkaddr; 829 block->type = BLKTYPE_NIX; 830 block->lfshift = 8; 831 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG; 832 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid); 833 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid); 834 block->lfcfg_reg = NIX_PRIV_LFX_CFG; 835 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; 836 block->lfreset_reg = NIX_AF_LF_RST; 837 sprintf(block->name, "NIX%d", blkid); 838 rvu->nix_blkaddr[blkid] = blkaddr; 839 return rvu_alloc_bitmap(&block->lf); 840 } 841 842 static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr) 843 { 844 struct rvu_hwinfo *hw = rvu->hw; 845 struct rvu_block *block; 846 int blkid; 847 u64 cfg; 848 849 /* Init CPT LF's bitmap */ 850 block = &hw->block[blkaddr]; 851 if (!block->implemented) 852 return 0; 853 blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1; 854 cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0); 855 block->lf.max = cfg & 0xFF; 856 block->addr = blkaddr; 857 block->type = BLKTYPE_CPT; 858 block->multislot = true; 859 block->lfshift = 3; 860 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG; 861 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid); 862 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid); 863 block->lfcfg_reg = CPT_PRIV_LFX_CFG; 864 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; 865 block->lfreset_reg = CPT_AF_LF_RST; 866 sprintf(block->name, "CPT%d", blkid); 867 return rvu_alloc_bitmap(&block->lf); 868 } 869 870 static void rvu_get_lbk_bufsize(struct rvu *rvu) 871 { 872 struct pci_dev *pdev = NULL; 873 void __iomem *base; 874 u64 lbk_const; 875 876 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 877 PCI_DEVID_OCTEONTX2_LBK, pdev); 878 if (!pdev) 879 return; 880 881 base = pci_ioremap_bar(pdev, 0); 882 if (!base) 883 goto err_put; 884 885 lbk_const = readq(base + LBK_CONST); 886 887 /* cache fifo size */ 888 rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const); 889 890 iounmap(base); 891 err_put: 892 pci_dev_put(pdev); 893 } 894 895 static int rvu_setup_hw_resources(struct rvu *rvu) 896 { 897 struct rvu_hwinfo *hw = rvu->hw; 898 struct rvu_block *block; 899 int blkid, err; 900 u64 cfg; 901 902 /* Get HW supported max RVU PF & VF count */ 903 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); 904 hw->total_pfs = (cfg >> 32) & 0xFF; 905 hw->total_vfs = (cfg >> 20) & 0xFFF; 906 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF; 907 908 /* Init NPA LF's bitmap */ 909 block = &hw->block[BLKADDR_NPA]; 910 if (!block->implemented) 911 goto nix; 912 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST); 913 block->lf.max = (cfg >> 16) & 0xFFF; 914 block->addr = BLKADDR_NPA; 915 block->type = BLKTYPE_NPA; 916 block->lfshift = 8; 917 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG; 918 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG; 919 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG; 920 block->lfcfg_reg = NPA_PRIV_LFX_CFG; 921 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG; 922 block->lfreset_reg = NPA_AF_LF_RST; 923 sprintf(block->name, "NPA"); 924 err = rvu_alloc_bitmap(&block->lf); 925 if (err) 926 return err; 927 928 nix: 929 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0); 930 if (err) 931 return err; 932 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1); 933 if (err) 934 return err; 935 936 /* Init SSO group's bitmap */ 937 block = &hw->block[BLKADDR_SSO]; 938 if (!block->implemented) 939 goto ssow; 940 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST); 941 block->lf.max = cfg & 0xFFFF; 942 block->addr = BLKADDR_SSO; 943 block->type = BLKTYPE_SSO; 944 block->multislot = true; 945 block->lfshift = 3; 946 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG; 947 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG; 948 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG; 949 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG; 950 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG; 951 block->lfreset_reg = SSO_AF_LF_HWGRP_RST; 952 sprintf(block->name, "SSO GROUP"); 953 err = rvu_alloc_bitmap(&block->lf); 954 if (err) 955 return err; 956 957 ssow: 958 /* Init SSO workslot's bitmap */ 959 block = &hw->block[BLKADDR_SSOW]; 960 if (!block->implemented) 961 goto tim; 962 block->lf.max = (cfg >> 56) & 0xFF; 963 block->addr = BLKADDR_SSOW; 964 block->type = BLKTYPE_SSOW; 965 block->multislot = true; 966 block->lfshift = 3; 967 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG; 968 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG; 969 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG; 970 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG; 971 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG; 972 block->lfreset_reg = SSOW_AF_LF_HWS_RST; 973 sprintf(block->name, "SSOWS"); 974 err = rvu_alloc_bitmap(&block->lf); 975 if (err) 976 return err; 977 978 tim: 979 /* Init TIM LF's bitmap */ 980 block = &hw->block[BLKADDR_TIM]; 981 if (!block->implemented) 982 goto cpt; 983 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST); 984 block->lf.max = cfg & 0xFFFF; 985 block->addr = BLKADDR_TIM; 986 block->type = BLKTYPE_TIM; 987 block->multislot = true; 988 block->lfshift = 3; 989 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG; 990 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG; 991 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG; 992 block->lfcfg_reg = TIM_PRIV_LFX_CFG; 993 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG; 994 block->lfreset_reg = TIM_AF_LF_RST; 995 sprintf(block->name, "TIM"); 996 err = rvu_alloc_bitmap(&block->lf); 997 if (err) 998 return err; 999 1000 cpt: 1001 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0); 1002 if (err) 1003 return err; 1004 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1); 1005 if (err) 1006 return err; 1007 1008 /* Allocate memory for PFVF data */ 1009 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, 1010 sizeof(struct rvu_pfvf), GFP_KERNEL); 1011 if (!rvu->pf) 1012 return -ENOMEM; 1013 1014 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, 1015 sizeof(struct rvu_pfvf), GFP_KERNEL); 1016 if (!rvu->hwvf) 1017 return -ENOMEM; 1018 1019 mutex_init(&rvu->rsrc_lock); 1020 1021 rvu_fwdata_init(rvu); 1022 1023 err = rvu_setup_msix_resources(rvu); 1024 if (err) 1025 return err; 1026 1027 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 1028 block = &hw->block[blkid]; 1029 if (!block->lf.bmap) 1030 continue; 1031 1032 /* Allocate memory for block LF/slot to pcifunc mapping info */ 1033 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max, 1034 sizeof(u16), GFP_KERNEL); 1035 if (!block->fn_map) { 1036 err = -ENOMEM; 1037 goto msix_err; 1038 } 1039 1040 /* Scan all blocks to check if low level firmware has 1041 * already provisioned any of the resources to a PF/VF. 1042 */ 1043 rvu_scan_block(rvu, block); 1044 } 1045 1046 err = rvu_set_channels_base(rvu); 1047 if (err) 1048 goto msix_err; 1049 1050 err = rvu_npc_init(rvu); 1051 if (err) 1052 goto npc_err; 1053 1054 err = rvu_cgx_init(rvu); 1055 if (err) 1056 goto cgx_err; 1057 1058 /* Assign MACs for CGX mapped functions */ 1059 rvu_setup_pfvf_macaddress(rvu); 1060 1061 err = rvu_npa_init(rvu); 1062 if (err) 1063 goto npa_err; 1064 1065 rvu_get_lbk_bufsize(rvu); 1066 1067 err = rvu_nix_init(rvu); 1068 if (err) 1069 goto nix_err; 1070 1071 rvu_program_channels(rvu); 1072 1073 return 0; 1074 1075 nix_err: 1076 rvu_nix_freemem(rvu); 1077 npa_err: 1078 rvu_npa_freemem(rvu); 1079 cgx_err: 1080 rvu_cgx_exit(rvu); 1081 npc_err: 1082 rvu_npc_freemem(rvu); 1083 rvu_fwdata_exit(rvu); 1084 msix_err: 1085 rvu_reset_msix(rvu); 1086 return err; 1087 } 1088 1089 /* NPA and NIX admin queue APIs */ 1090 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq) 1091 { 1092 if (!aq) 1093 return; 1094 1095 qmem_free(rvu->dev, aq->inst); 1096 qmem_free(rvu->dev, aq->res); 1097 devm_kfree(rvu->dev, aq); 1098 } 1099 1100 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, 1101 int qsize, int inst_size, int res_size) 1102 { 1103 struct admin_queue *aq; 1104 int err; 1105 1106 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL); 1107 if (!*ad_queue) 1108 return -ENOMEM; 1109 aq = *ad_queue; 1110 1111 /* Alloc memory for instructions i.e AQ */ 1112 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size); 1113 if (err) { 1114 devm_kfree(rvu->dev, aq); 1115 return err; 1116 } 1117 1118 /* Alloc memory for results */ 1119 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size); 1120 if (err) { 1121 rvu_aq_free(rvu, aq); 1122 return err; 1123 } 1124 1125 spin_lock_init(&aq->lock); 1126 return 0; 1127 } 1128 1129 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, 1130 struct ready_msg_rsp *rsp) 1131 { 1132 if (rvu->fwdata) { 1133 rsp->rclk_freq = rvu->fwdata->rclk; 1134 rsp->sclk_freq = rvu->fwdata->sclk; 1135 } 1136 return 0; 1137 } 1138 1139 /* Get current count of a RVU block's LF/slots 1140 * provisioned to a given RVU func. 1141 */ 1142 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr) 1143 { 1144 switch (blkaddr) { 1145 case BLKADDR_NPA: 1146 return pfvf->npalf ? 1 : 0; 1147 case BLKADDR_NIX0: 1148 case BLKADDR_NIX1: 1149 return pfvf->nixlf ? 1 : 0; 1150 case BLKADDR_SSO: 1151 return pfvf->sso; 1152 case BLKADDR_SSOW: 1153 return pfvf->ssow; 1154 case BLKADDR_TIM: 1155 return pfvf->timlfs; 1156 case BLKADDR_CPT0: 1157 return pfvf->cptlfs; 1158 case BLKADDR_CPT1: 1159 return pfvf->cpt1_lfs; 1160 } 1161 return 0; 1162 } 1163 1164 /* Return true if LFs of block type are attached to pcifunc */ 1165 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype) 1166 { 1167 switch (blktype) { 1168 case BLKTYPE_NPA: 1169 return pfvf->npalf ? 1 : 0; 1170 case BLKTYPE_NIX: 1171 return pfvf->nixlf ? 1 : 0; 1172 case BLKTYPE_SSO: 1173 return !!pfvf->sso; 1174 case BLKTYPE_SSOW: 1175 return !!pfvf->ssow; 1176 case BLKTYPE_TIM: 1177 return !!pfvf->timlfs; 1178 case BLKTYPE_CPT: 1179 return pfvf->cptlfs || pfvf->cpt1_lfs; 1180 } 1181 1182 return false; 1183 } 1184 1185 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) 1186 { 1187 struct rvu_pfvf *pfvf; 1188 1189 if (!is_pf_func_valid(rvu, pcifunc)) 1190 return false; 1191 1192 pfvf = rvu_get_pfvf(rvu, pcifunc); 1193 1194 /* Check if this PFFUNC has a LF of type blktype attached */ 1195 if (!is_blktype_attached(pfvf, blktype)) 1196 return false; 1197 1198 return true; 1199 } 1200 1201 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, 1202 int pcifunc, int slot) 1203 { 1204 u64 val; 1205 1206 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13); 1207 rvu_write64(rvu, block->addr, block->lookup_reg, val); 1208 /* Wait for the lookup to finish */ 1209 /* TODO: put some timeout here */ 1210 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13)) 1211 ; 1212 1213 val = rvu_read64(rvu, block->addr, block->lookup_reg); 1214 1215 /* Check LF valid bit */ 1216 if (!(val & (1ULL << 12))) 1217 return -1; 1218 1219 return (val & 0xFFF); 1220 } 1221 1222 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) 1223 { 1224 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1225 struct rvu_hwinfo *hw = rvu->hw; 1226 struct rvu_block *block; 1227 int slot, lf, num_lfs; 1228 int blkaddr; 1229 1230 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc); 1231 if (blkaddr < 0) 1232 return; 1233 1234 if (blktype == BLKTYPE_NIX) 1235 rvu_nix_reset_mac(pfvf, pcifunc); 1236 1237 block = &hw->block[blkaddr]; 1238 1239 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr); 1240 if (!num_lfs) 1241 return; 1242 1243 for (slot = 0; slot < num_lfs; slot++) { 1244 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot); 1245 if (lf < 0) /* This should never happen */ 1246 continue; 1247 1248 /* Disable the LF */ 1249 rvu_write64(rvu, blkaddr, block->lfcfg_reg | 1250 (lf << block->lfshift), 0x00ULL); 1251 1252 /* Update SW maintained mapping info as well */ 1253 rvu_update_rsrc_map(rvu, pfvf, block, 1254 pcifunc, lf, false); 1255 1256 /* Free the resource */ 1257 rvu_free_rsrc(&block->lf, lf); 1258 1259 /* Clear MSIX vector offset for this LF */ 1260 rvu_clear_msix_offset(rvu, pfvf, block, lf); 1261 } 1262 } 1263 1264 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, 1265 u16 pcifunc) 1266 { 1267 struct rvu_hwinfo *hw = rvu->hw; 1268 bool detach_all = true; 1269 struct rvu_block *block; 1270 int blkid; 1271 1272 mutex_lock(&rvu->rsrc_lock); 1273 1274 /* Check for partial resource detach */ 1275 if (detach && detach->partial) 1276 detach_all = false; 1277 1278 /* Check for RVU block's LFs attached to this func, 1279 * if so, detach them. 1280 */ 1281 for (blkid = 0; blkid < BLK_COUNT; blkid++) { 1282 block = &hw->block[blkid]; 1283 if (!block->lf.bmap) 1284 continue; 1285 if (!detach_all && detach) { 1286 if (blkid == BLKADDR_NPA && !detach->npalf) 1287 continue; 1288 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf) 1289 continue; 1290 else if ((blkid == BLKADDR_NIX1) && !detach->nixlf) 1291 continue; 1292 else if ((blkid == BLKADDR_SSO) && !detach->sso) 1293 continue; 1294 else if ((blkid == BLKADDR_SSOW) && !detach->ssow) 1295 continue; 1296 else if ((blkid == BLKADDR_TIM) && !detach->timlfs) 1297 continue; 1298 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs) 1299 continue; 1300 else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs) 1301 continue; 1302 } 1303 rvu_detach_block(rvu, pcifunc, block->type); 1304 } 1305 1306 mutex_unlock(&rvu->rsrc_lock); 1307 return 0; 1308 } 1309 1310 int rvu_mbox_handler_detach_resources(struct rvu *rvu, 1311 struct rsrc_detach *detach, 1312 struct msg_rsp *rsp) 1313 { 1314 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); 1315 } 1316 1317 int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) 1318 { 1319 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1320 int blkaddr = BLKADDR_NIX0, vf; 1321 struct rvu_pfvf *pf; 1322 1323 /* All CGX mapped PFs are set with assigned NIX block during init */ 1324 if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { 1325 pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 1326 blkaddr = pf->nix_blkaddr; 1327 } else if (is_afvf(pcifunc)) { 1328 vf = pcifunc - 1; 1329 /* Assign NIX based on VF number. All even numbered VFs get 1330 * NIX0 and odd numbered gets NIX1 1331 */ 1332 blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0; 1333 /* NIX1 is not present on all silicons */ 1334 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) 1335 blkaddr = BLKADDR_NIX0; 1336 } 1337 1338 switch (blkaddr) { 1339 case BLKADDR_NIX1: 1340 pfvf->nix_blkaddr = BLKADDR_NIX1; 1341 pfvf->nix_rx_intf = NIX_INTFX_RX(1); 1342 pfvf->nix_tx_intf = NIX_INTFX_TX(1); 1343 break; 1344 case BLKADDR_NIX0: 1345 default: 1346 pfvf->nix_blkaddr = BLKADDR_NIX0; 1347 pfvf->nix_rx_intf = NIX_INTFX_RX(0); 1348 pfvf->nix_tx_intf = NIX_INTFX_TX(0); 1349 break; 1350 } 1351 1352 return pfvf->nix_blkaddr; 1353 } 1354 1355 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype, 1356 u16 pcifunc, struct rsrc_attach *attach) 1357 { 1358 int blkaddr; 1359 1360 switch (blktype) { 1361 case BLKTYPE_NIX: 1362 blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc); 1363 break; 1364 case BLKTYPE_CPT: 1365 if (attach->hdr.ver < RVU_MULTI_BLK_VER) 1366 return rvu_get_blkaddr(rvu, blktype, 0); 1367 blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr : 1368 BLKADDR_CPT0; 1369 if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) 1370 return -ENODEV; 1371 break; 1372 default: 1373 return rvu_get_blkaddr(rvu, blktype, 0); 1374 } 1375 1376 if (is_block_implemented(rvu->hw, blkaddr)) 1377 return blkaddr; 1378 1379 return -ENODEV; 1380 } 1381 1382 static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype, 1383 int num_lfs, struct rsrc_attach *attach) 1384 { 1385 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1386 struct rvu_hwinfo *hw = rvu->hw; 1387 struct rvu_block *block; 1388 int slot, lf; 1389 int blkaddr; 1390 u64 cfg; 1391 1392 if (!num_lfs) 1393 return; 1394 1395 blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach); 1396 if (blkaddr < 0) 1397 return; 1398 1399 block = &hw->block[blkaddr]; 1400 if (!block->lf.bmap) 1401 return; 1402 1403 for (slot = 0; slot < num_lfs; slot++) { 1404 /* Allocate the resource */ 1405 lf = rvu_alloc_rsrc(&block->lf); 1406 if (lf < 0) 1407 return; 1408 1409 cfg = (1ULL << 63) | (pcifunc << 8) | slot; 1410 rvu_write64(rvu, blkaddr, block->lfcfg_reg | 1411 (lf << block->lfshift), cfg); 1412 rvu_update_rsrc_map(rvu, pfvf, block, 1413 pcifunc, lf, true); 1414 1415 /* Set start MSIX vector for this LF within this PF/VF */ 1416 rvu_set_msix_offset(rvu, pfvf, block, lf); 1417 } 1418 } 1419 1420 static int rvu_check_rsrc_availability(struct rvu *rvu, 1421 struct rsrc_attach *req, u16 pcifunc) 1422 { 1423 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1424 int free_lfs, mappedlfs, blkaddr; 1425 struct rvu_hwinfo *hw = rvu->hw; 1426 struct rvu_block *block; 1427 1428 /* Only one NPA LF can be attached */ 1429 if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) { 1430 block = &hw->block[BLKADDR_NPA]; 1431 free_lfs = rvu_rsrc_free_count(&block->lf); 1432 if (!free_lfs) 1433 goto fail; 1434 } else if (req->npalf) { 1435 dev_err(&rvu->pdev->dev, 1436 "Func 0x%x: Invalid req, already has NPA\n", 1437 pcifunc); 1438 return -EINVAL; 1439 } 1440 1441 /* Only one NIX LF can be attached */ 1442 if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) { 1443 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX, 1444 pcifunc, req); 1445 if (blkaddr < 0) 1446 return blkaddr; 1447 block = &hw->block[blkaddr]; 1448 free_lfs = rvu_rsrc_free_count(&block->lf); 1449 if (!free_lfs) 1450 goto fail; 1451 } else if (req->nixlf) { 1452 dev_err(&rvu->pdev->dev, 1453 "Func 0x%x: Invalid req, already has NIX\n", 1454 pcifunc); 1455 return -EINVAL; 1456 } 1457 1458 if (req->sso) { 1459 block = &hw->block[BLKADDR_SSO]; 1460 /* Is request within limits ? */ 1461 if (req->sso > block->lf.max) { 1462 dev_err(&rvu->pdev->dev, 1463 "Func 0x%x: Invalid SSO req, %d > max %d\n", 1464 pcifunc, req->sso, block->lf.max); 1465 return -EINVAL; 1466 } 1467 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); 1468 free_lfs = rvu_rsrc_free_count(&block->lf); 1469 /* Check if additional resources are available */ 1470 if (req->sso > mappedlfs && 1471 ((req->sso - mappedlfs) > free_lfs)) 1472 goto fail; 1473 } 1474 1475 if (req->ssow) { 1476 block = &hw->block[BLKADDR_SSOW]; 1477 if (req->ssow > block->lf.max) { 1478 dev_err(&rvu->pdev->dev, 1479 "Func 0x%x: Invalid SSOW req, %d > max %d\n", 1480 pcifunc, req->sso, block->lf.max); 1481 return -EINVAL; 1482 } 1483 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); 1484 free_lfs = rvu_rsrc_free_count(&block->lf); 1485 if (req->ssow > mappedlfs && 1486 ((req->ssow - mappedlfs) > free_lfs)) 1487 goto fail; 1488 } 1489 1490 if (req->timlfs) { 1491 block = &hw->block[BLKADDR_TIM]; 1492 if (req->timlfs > block->lf.max) { 1493 dev_err(&rvu->pdev->dev, 1494 "Func 0x%x: Invalid TIMLF req, %d > max %d\n", 1495 pcifunc, req->timlfs, block->lf.max); 1496 return -EINVAL; 1497 } 1498 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); 1499 free_lfs = rvu_rsrc_free_count(&block->lf); 1500 if (req->timlfs > mappedlfs && 1501 ((req->timlfs - mappedlfs) > free_lfs)) 1502 goto fail; 1503 } 1504 1505 if (req->cptlfs) { 1506 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT, 1507 pcifunc, req); 1508 if (blkaddr < 0) 1509 return blkaddr; 1510 block = &hw->block[blkaddr]; 1511 if (req->cptlfs > block->lf.max) { 1512 dev_err(&rvu->pdev->dev, 1513 "Func 0x%x: Invalid CPTLF req, %d > max %d\n", 1514 pcifunc, req->cptlfs, block->lf.max); 1515 return -EINVAL; 1516 } 1517 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); 1518 free_lfs = rvu_rsrc_free_count(&block->lf); 1519 if (req->cptlfs > mappedlfs && 1520 ((req->cptlfs - mappedlfs) > free_lfs)) 1521 goto fail; 1522 } 1523 1524 return 0; 1525 1526 fail: 1527 dev_info(rvu->dev, "Request for %s failed\n", block->name); 1528 return -ENOSPC; 1529 } 1530 1531 static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype, 1532 struct rsrc_attach *attach) 1533 { 1534 int blkaddr, num_lfs; 1535 1536 blkaddr = rvu_get_attach_blkaddr(rvu, blktype, 1537 attach->hdr.pcifunc, attach); 1538 if (blkaddr < 0) 1539 return false; 1540 1541 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc), 1542 blkaddr); 1543 /* Requester already has LFs from given block ? */ 1544 return !!num_lfs; 1545 } 1546 1547 int rvu_mbox_handler_attach_resources(struct rvu *rvu, 1548 struct rsrc_attach *attach, 1549 struct msg_rsp *rsp) 1550 { 1551 u16 pcifunc = attach->hdr.pcifunc; 1552 int err; 1553 1554 /* If first request, detach all existing attached resources */ 1555 if (!attach->modify) 1556 rvu_detach_rsrcs(rvu, NULL, pcifunc); 1557 1558 mutex_lock(&rvu->rsrc_lock); 1559 1560 /* Check if the request can be accommodated */ 1561 err = rvu_check_rsrc_availability(rvu, attach, pcifunc); 1562 if (err) 1563 goto exit; 1564 1565 /* Now attach the requested resources */ 1566 if (attach->npalf) 1567 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach); 1568 1569 if (attach->nixlf) 1570 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach); 1571 1572 if (attach->sso) { 1573 /* RVU func doesn't know which exact LF or slot is attached 1574 * to it, it always sees as slot 0,1,2. So for a 'modify' 1575 * request, simply detach all existing attached LFs/slots 1576 * and attach a fresh. 1577 */ 1578 if (attach->modify) 1579 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO); 1580 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, 1581 attach->sso, attach); 1582 } 1583 1584 if (attach->ssow) { 1585 if (attach->modify) 1586 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW); 1587 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, 1588 attach->ssow, attach); 1589 } 1590 1591 if (attach->timlfs) { 1592 if (attach->modify) 1593 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM); 1594 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, 1595 attach->timlfs, attach); 1596 } 1597 1598 if (attach->cptlfs) { 1599 if (attach->modify && 1600 rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach)) 1601 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT); 1602 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, 1603 attach->cptlfs, attach); 1604 } 1605 1606 exit: 1607 mutex_unlock(&rvu->rsrc_lock); 1608 return err; 1609 } 1610 1611 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1612 int blkaddr, int lf) 1613 { 1614 u16 vec; 1615 1616 if (lf < 0) 1617 return MSIX_VECTOR_INVALID; 1618 1619 for (vec = 0; vec < pfvf->msix.max; vec++) { 1620 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf)) 1621 return vec; 1622 } 1623 return MSIX_VECTOR_INVALID; 1624 } 1625 1626 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1627 struct rvu_block *block, int lf) 1628 { 1629 u16 nvecs, vec, offset; 1630 u64 cfg; 1631 1632 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | 1633 (lf << block->lfshift)); 1634 nvecs = (cfg >> 12) & 0xFF; 1635 1636 /* Check and alloc MSIX vectors, must be contiguous */ 1637 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs)) 1638 return; 1639 1640 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); 1641 1642 /* Config MSIX offset in LF */ 1643 rvu_write64(rvu, block->addr, block->msixcfg_reg | 1644 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset); 1645 1646 /* Update the bitmap as well */ 1647 for (vec = 0; vec < nvecs; vec++) 1648 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf); 1649 } 1650 1651 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 1652 struct rvu_block *block, int lf) 1653 { 1654 u16 nvecs, vec, offset; 1655 u64 cfg; 1656 1657 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | 1658 (lf << block->lfshift)); 1659 nvecs = (cfg >> 12) & 0xFF; 1660 1661 /* Clear MSIX offset in LF */ 1662 rvu_write64(rvu, block->addr, block->msixcfg_reg | 1663 (lf << block->lfshift), cfg & ~0x7FFULL); 1664 1665 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf); 1666 1667 /* Update the mapping */ 1668 for (vec = 0; vec < nvecs; vec++) 1669 pfvf->msix_lfmap[offset + vec] = 0; 1670 1671 /* Free the same in MSIX bitmap */ 1672 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); 1673 } 1674 1675 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, 1676 struct msix_offset_rsp *rsp) 1677 { 1678 struct rvu_hwinfo *hw = rvu->hw; 1679 u16 pcifunc = req->hdr.pcifunc; 1680 struct rvu_pfvf *pfvf; 1681 int lf, slot, blkaddr; 1682 1683 pfvf = rvu_get_pfvf(rvu, pcifunc); 1684 if (!pfvf->msix.bmap) 1685 return 0; 1686 1687 /* Set MSIX offsets for each block's LFs attached to this PF/VF */ 1688 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0); 1689 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf); 1690 1691 /* Get BLKADDR from which LFs are attached to pcifunc */ 1692 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1693 if (blkaddr < 0) { 1694 rsp->nix_msixoff = MSIX_VECTOR_INVALID; 1695 } else { 1696 lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1697 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf); 1698 } 1699 1700 rsp->sso = pfvf->sso; 1701 for (slot = 0; slot < rsp->sso; slot++) { 1702 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot); 1703 rsp->sso_msixoff[slot] = 1704 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf); 1705 } 1706 1707 rsp->ssow = pfvf->ssow; 1708 for (slot = 0; slot < rsp->ssow; slot++) { 1709 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot); 1710 rsp->ssow_msixoff[slot] = 1711 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf); 1712 } 1713 1714 rsp->timlfs = pfvf->timlfs; 1715 for (slot = 0; slot < rsp->timlfs; slot++) { 1716 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot); 1717 rsp->timlf_msixoff[slot] = 1718 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf); 1719 } 1720 1721 rsp->cptlfs = pfvf->cptlfs; 1722 for (slot = 0; slot < rsp->cptlfs; slot++) { 1723 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot); 1724 rsp->cptlf_msixoff[slot] = 1725 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf); 1726 } 1727 1728 rsp->cpt1_lfs = pfvf->cpt1_lfs; 1729 for (slot = 0; slot < rsp->cpt1_lfs; slot++) { 1730 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot); 1731 rsp->cpt1_lf_msixoff[slot] = 1732 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf); 1733 } 1734 1735 return 0; 1736 } 1737 1738 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, 1739 struct msg_rsp *rsp) 1740 { 1741 u16 pcifunc = req->hdr.pcifunc; 1742 u16 vf, numvfs; 1743 u64 cfg; 1744 1745 vf = pcifunc & RVU_PFVF_FUNC_MASK; 1746 cfg = rvu_read64(rvu, BLKADDR_RVUM, 1747 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc))); 1748 numvfs = (cfg >> 12) & 0xFF; 1749 1750 if (vf && vf <= numvfs) 1751 __rvu_flr_handler(rvu, pcifunc); 1752 else 1753 return RVU_INVALID_VF_ID; 1754 1755 return 0; 1756 } 1757 1758 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, 1759 struct get_hw_cap_rsp *rsp) 1760 { 1761 struct rvu_hwinfo *hw = rvu->hw; 1762 1763 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping; 1764 rsp->nix_shaping = hw->cap.nix_shaping; 1765 1766 return 0; 1767 } 1768 1769 int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req, 1770 struct msg_rsp *rsp) 1771 { 1772 struct rvu_hwinfo *hw = rvu->hw; 1773 u16 pcifunc = req->hdr.pcifunc; 1774 struct rvu_pfvf *pfvf; 1775 int blkaddr, nixlf; 1776 u16 target; 1777 1778 /* Only PF can add VF permissions */ 1779 if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc)) 1780 return -EOPNOTSUPP; 1781 1782 target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1); 1783 pfvf = rvu_get_pfvf(rvu, target); 1784 1785 if (req->flags & RESET_VF_PERM) { 1786 pfvf->flags &= RVU_CLEAR_VF_PERM; 1787 } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^ 1788 (req->flags & VF_TRUSTED)) { 1789 change_bit(PF_SET_VF_TRUSTED, &pfvf->flags); 1790 /* disable multicast and promisc entries */ 1791 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) { 1792 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target); 1793 if (blkaddr < 0) 1794 return 0; 1795 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 1796 target, 0); 1797 if (nixlf < 0) 1798 return 0; 1799 npc_enadis_default_mce_entry(rvu, target, nixlf, 1800 NIXLF_ALLMULTI_ENTRY, 1801 false); 1802 npc_enadis_default_mce_entry(rvu, target, nixlf, 1803 NIXLF_PROMISC_ENTRY, 1804 false); 1805 } 1806 } 1807 1808 return 0; 1809 } 1810 1811 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, 1812 struct mbox_msghdr *req) 1813 { 1814 struct rvu *rvu = pci_get_drvdata(mbox->pdev); 1815 1816 /* Check if valid, if not reply with a invalid msg */ 1817 if (req->sig != OTX2_MBOX_REQ_SIG) 1818 goto bad_message; 1819 1820 switch (req->id) { 1821 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 1822 case _id: { \ 1823 struct _rsp_type *rsp; \ 1824 int err; \ 1825 \ 1826 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ 1827 mbox, devid, \ 1828 sizeof(struct _rsp_type)); \ 1829 /* some handlers should complete even if reply */ \ 1830 /* could not be allocated */ \ 1831 if (!rsp && \ 1832 _id != MBOX_MSG_DETACH_RESOURCES && \ 1833 _id != MBOX_MSG_NIX_TXSCH_FREE && \ 1834 _id != MBOX_MSG_VF_FLR) \ 1835 return -ENOMEM; \ 1836 if (rsp) { \ 1837 rsp->hdr.id = _id; \ 1838 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ 1839 rsp->hdr.pcifunc = req->pcifunc; \ 1840 rsp->hdr.rc = 0; \ 1841 } \ 1842 \ 1843 err = rvu_mbox_handler_ ## _fn_name(rvu, \ 1844 (struct _req_type *)req, \ 1845 rsp); \ 1846 if (rsp && err) \ 1847 rsp->hdr.rc = err; \ 1848 \ 1849 trace_otx2_msg_process(mbox->pdev, _id, err); \ 1850 return rsp ? err : -ENOMEM; \ 1851 } 1852 MBOX_MESSAGES 1853 #undef M 1854 1855 bad_message: 1856 default: 1857 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id); 1858 return -ENODEV; 1859 } 1860 } 1861 1862 static void __rvu_mbox_handler(struct rvu_work *mwork, int type) 1863 { 1864 struct rvu *rvu = mwork->rvu; 1865 int offset, err, id, devid; 1866 struct otx2_mbox_dev *mdev; 1867 struct mbox_hdr *req_hdr; 1868 struct mbox_msghdr *msg; 1869 struct mbox_wq_info *mw; 1870 struct otx2_mbox *mbox; 1871 1872 switch (type) { 1873 case TYPE_AFPF: 1874 mw = &rvu->afpf_wq_info; 1875 break; 1876 case TYPE_AFVF: 1877 mw = &rvu->afvf_wq_info; 1878 break; 1879 default: 1880 return; 1881 } 1882 1883 devid = mwork - mw->mbox_wrk; 1884 mbox = &mw->mbox; 1885 mdev = &mbox->dev[devid]; 1886 1887 /* Process received mbox messages */ 1888 req_hdr = mdev->mbase + mbox->rx_start; 1889 if (mw->mbox_wrk[devid].num_msgs == 0) 1890 return; 1891 1892 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 1893 1894 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { 1895 msg = mdev->mbase + offset; 1896 1897 /* Set which PF/VF sent this message based on mbox IRQ */ 1898 switch (type) { 1899 case TYPE_AFPF: 1900 msg->pcifunc &= 1901 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); 1902 msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT); 1903 break; 1904 case TYPE_AFVF: 1905 msg->pcifunc &= 1906 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT); 1907 msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1; 1908 break; 1909 } 1910 1911 err = rvu_process_mbox_msg(mbox, devid, msg); 1912 if (!err) { 1913 offset = mbox->rx_start + msg->next_msgoff; 1914 continue; 1915 } 1916 1917 if (msg->pcifunc & RVU_PFVF_FUNC_MASK) 1918 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", 1919 err, otx2_mbox_id2name(msg->id), 1920 msg->id, rvu_get_pf(msg->pcifunc), 1921 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); 1922 else 1923 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", 1924 err, otx2_mbox_id2name(msg->id), 1925 msg->id, devid); 1926 } 1927 mw->mbox_wrk[devid].num_msgs = 0; 1928 1929 /* Send mbox responses to VF/PF */ 1930 otx2_mbox_msg_send(mbox, devid); 1931 } 1932 1933 static inline void rvu_afpf_mbox_handler(struct work_struct *work) 1934 { 1935 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1936 1937 __rvu_mbox_handler(mwork, TYPE_AFPF); 1938 } 1939 1940 static inline void rvu_afvf_mbox_handler(struct work_struct *work) 1941 { 1942 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 1943 1944 __rvu_mbox_handler(mwork, TYPE_AFVF); 1945 } 1946 1947 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type) 1948 { 1949 struct rvu *rvu = mwork->rvu; 1950 struct otx2_mbox_dev *mdev; 1951 struct mbox_hdr *rsp_hdr; 1952 struct mbox_msghdr *msg; 1953 struct mbox_wq_info *mw; 1954 struct otx2_mbox *mbox; 1955 int offset, id, devid; 1956 1957 switch (type) { 1958 case TYPE_AFPF: 1959 mw = &rvu->afpf_wq_info; 1960 break; 1961 case TYPE_AFVF: 1962 mw = &rvu->afvf_wq_info; 1963 break; 1964 default: 1965 return; 1966 } 1967 1968 devid = mwork - mw->mbox_wrk_up; 1969 mbox = &mw->mbox_up; 1970 mdev = &mbox->dev[devid]; 1971 1972 rsp_hdr = mdev->mbase + mbox->rx_start; 1973 if (mw->mbox_wrk_up[devid].up_num_msgs == 0) { 1974 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n"); 1975 return; 1976 } 1977 1978 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 1979 1980 for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) { 1981 msg = mdev->mbase + offset; 1982 1983 if (msg->id >= MBOX_MSG_MAX) { 1984 dev_err(rvu->dev, 1985 "Mbox msg with unknown ID 0x%x\n", msg->id); 1986 goto end; 1987 } 1988 1989 if (msg->sig != OTX2_MBOX_RSP_SIG) { 1990 dev_err(rvu->dev, 1991 "Mbox msg with wrong signature %x, ID 0x%x\n", 1992 msg->sig, msg->id); 1993 goto end; 1994 } 1995 1996 switch (msg->id) { 1997 case MBOX_MSG_CGX_LINK_EVENT: 1998 break; 1999 default: 2000 if (msg->rc) 2001 dev_err(rvu->dev, 2002 "Mbox msg response has err %d, ID 0x%x\n", 2003 msg->rc, msg->id); 2004 break; 2005 } 2006 end: 2007 offset = mbox->rx_start + msg->next_msgoff; 2008 mdev->msgs_acked++; 2009 } 2010 mw->mbox_wrk_up[devid].up_num_msgs = 0; 2011 2012 otx2_mbox_reset(mbox, devid); 2013 } 2014 2015 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work) 2016 { 2017 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 2018 2019 __rvu_mbox_up_handler(mwork, TYPE_AFPF); 2020 } 2021 2022 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) 2023 { 2024 struct rvu_work *mwork = container_of(work, struct rvu_work, work); 2025 2026 __rvu_mbox_up_handler(mwork, TYPE_AFVF); 2027 } 2028 2029 static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, 2030 int num, int type) 2031 { 2032 struct rvu_hwinfo *hw = rvu->hw; 2033 int region; 2034 u64 bar4; 2035 2036 /* For cn10k platform VF mailbox regions of a PF follows after the 2037 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from 2038 * RVU_PF_VF_BAR4_ADDR register. 2039 */ 2040 if (type == TYPE_AFVF) { 2041 for (region = 0; region < num; region++) { 2042 if (hw->cap.per_pf_mbox_regs) { 2043 bar4 = rvu_read64(rvu, BLKADDR_RVUM, 2044 RVU_AF_PFX_BAR4_ADDR(0)) + 2045 MBOX_SIZE; 2046 bar4 += region * MBOX_SIZE; 2047 } else { 2048 bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); 2049 bar4 += region * MBOX_SIZE; 2050 } 2051 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); 2052 if (!mbox_addr[region]) 2053 goto error; 2054 } 2055 return 0; 2056 } 2057 2058 /* For cn10k platform AF <-> PF mailbox region of a PF is read from per 2059 * PF registers. Whereas for Octeontx2 it is read from 2060 * RVU_AF_PF_BAR4_ADDR register. 2061 */ 2062 for (region = 0; region < num; region++) { 2063 if (hw->cap.per_pf_mbox_regs) { 2064 bar4 = rvu_read64(rvu, BLKADDR_RVUM, 2065 RVU_AF_PFX_BAR4_ADDR(region)); 2066 } else { 2067 bar4 = rvu_read64(rvu, BLKADDR_RVUM, 2068 RVU_AF_PF_BAR4_ADDR); 2069 bar4 += region * MBOX_SIZE; 2070 } 2071 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); 2072 if (!mbox_addr[region]) 2073 goto error; 2074 } 2075 return 0; 2076 2077 error: 2078 while (region--) 2079 iounmap((void __iomem *)mbox_addr[region]); 2080 return -ENOMEM; 2081 } 2082 2083 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, 2084 int type, int num, 2085 void (mbox_handler)(struct work_struct *), 2086 void (mbox_up_handler)(struct work_struct *)) 2087 { 2088 int err = -EINVAL, i, dir, dir_up; 2089 void __iomem *reg_base; 2090 struct rvu_work *mwork; 2091 void **mbox_regions; 2092 const char *name; 2093 2094 mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL); 2095 if (!mbox_regions) 2096 return -ENOMEM; 2097 2098 switch (type) { 2099 case TYPE_AFPF: 2100 name = "rvu_afpf_mailbox"; 2101 dir = MBOX_DIR_AFPF; 2102 dir_up = MBOX_DIR_AFPF_UP; 2103 reg_base = rvu->afreg_base; 2104 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF); 2105 if (err) 2106 goto free_regions; 2107 break; 2108 case TYPE_AFVF: 2109 name = "rvu_afvf_mailbox"; 2110 dir = MBOX_DIR_PFVF; 2111 dir_up = MBOX_DIR_PFVF_UP; 2112 reg_base = rvu->pfreg_base; 2113 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF); 2114 if (err) 2115 goto free_regions; 2116 break; 2117 default: 2118 return err; 2119 } 2120 2121 mw->mbox_wq = alloc_workqueue(name, 2122 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 2123 num); 2124 if (!mw->mbox_wq) { 2125 err = -ENOMEM; 2126 goto unmap_regions; 2127 } 2128 2129 mw->mbox_wrk = devm_kcalloc(rvu->dev, num, 2130 sizeof(struct rvu_work), GFP_KERNEL); 2131 if (!mw->mbox_wrk) { 2132 err = -ENOMEM; 2133 goto exit; 2134 } 2135 2136 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num, 2137 sizeof(struct rvu_work), GFP_KERNEL); 2138 if (!mw->mbox_wrk_up) { 2139 err = -ENOMEM; 2140 goto exit; 2141 } 2142 2143 err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev, 2144 reg_base, dir, num); 2145 if (err) 2146 goto exit; 2147 2148 err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev, 2149 reg_base, dir_up, num); 2150 if (err) 2151 goto exit; 2152 2153 for (i = 0; i < num; i++) { 2154 mwork = &mw->mbox_wrk[i]; 2155 mwork->rvu = rvu; 2156 INIT_WORK(&mwork->work, mbox_handler); 2157 2158 mwork = &mw->mbox_wrk_up[i]; 2159 mwork->rvu = rvu; 2160 INIT_WORK(&mwork->work, mbox_up_handler); 2161 } 2162 kfree(mbox_regions); 2163 return 0; 2164 2165 exit: 2166 destroy_workqueue(mw->mbox_wq); 2167 unmap_regions: 2168 while (num--) 2169 iounmap((void __iomem *)mbox_regions[num]); 2170 free_regions: 2171 kfree(mbox_regions); 2172 return err; 2173 } 2174 2175 static void rvu_mbox_destroy(struct mbox_wq_info *mw) 2176 { 2177 struct otx2_mbox *mbox = &mw->mbox; 2178 struct otx2_mbox_dev *mdev; 2179 int devid; 2180 2181 if (mw->mbox_wq) { 2182 flush_workqueue(mw->mbox_wq); 2183 destroy_workqueue(mw->mbox_wq); 2184 mw->mbox_wq = NULL; 2185 } 2186 2187 for (devid = 0; devid < mbox->ndevs; devid++) { 2188 mdev = &mbox->dev[devid]; 2189 if (mdev->hwbase) 2190 iounmap((void __iomem *)mdev->hwbase); 2191 } 2192 2193 otx2_mbox_destroy(&mw->mbox); 2194 otx2_mbox_destroy(&mw->mbox_up); 2195 } 2196 2197 static void rvu_queue_work(struct mbox_wq_info *mw, int first, 2198 int mdevs, u64 intr) 2199 { 2200 struct otx2_mbox_dev *mdev; 2201 struct otx2_mbox *mbox; 2202 struct mbox_hdr *hdr; 2203 int i; 2204 2205 for (i = first; i < mdevs; i++) { 2206 /* start from 0 */ 2207 if (!(intr & BIT_ULL(i - first))) 2208 continue; 2209 2210 mbox = &mw->mbox; 2211 mdev = &mbox->dev[i]; 2212 hdr = mdev->mbase + mbox->rx_start; 2213 2214 /*The hdr->num_msgs is set to zero immediately in the interrupt 2215 * handler to ensure that it holds a correct value next time 2216 * when the interrupt handler is called. 2217 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler 2218 * pf>mbox.up_num_msgs holds the data for use in 2219 * pfaf_mbox_up_handler. 2220 */ 2221 2222 if (hdr->num_msgs) { 2223 mw->mbox_wrk[i].num_msgs = hdr->num_msgs; 2224 hdr->num_msgs = 0; 2225 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work); 2226 } 2227 mbox = &mw->mbox_up; 2228 mdev = &mbox->dev[i]; 2229 hdr = mdev->mbase + mbox->rx_start; 2230 if (hdr->num_msgs) { 2231 mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs; 2232 hdr->num_msgs = 0; 2233 queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work); 2234 } 2235 } 2236 } 2237 2238 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) 2239 { 2240 struct rvu *rvu = (struct rvu *)rvu_irq; 2241 int vfs = rvu->vfs; 2242 u64 intr; 2243 2244 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); 2245 /* Clear interrupts */ 2246 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); 2247 if (intr) 2248 trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr); 2249 2250 /* Sync with mbox memory region */ 2251 rmb(); 2252 2253 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr); 2254 2255 /* Handle VF interrupts */ 2256 if (vfs > 64) { 2257 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1)); 2258 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr); 2259 2260 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); 2261 vfs -= 64; 2262 } 2263 2264 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); 2265 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr); 2266 if (intr) 2267 trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr); 2268 2269 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr); 2270 2271 return IRQ_HANDLED; 2272 } 2273 2274 static void rvu_enable_mbox_intr(struct rvu *rvu) 2275 { 2276 struct rvu_hwinfo *hw = rvu->hw; 2277 2278 /* Clear spurious irqs, if any */ 2279 rvu_write64(rvu, BLKADDR_RVUM, 2280 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); 2281 2282 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ 2283 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S, 2284 INTR_MASK(hw->total_pfs) & ~1ULL); 2285 } 2286 2287 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) 2288 { 2289 struct rvu_block *block; 2290 int slot, lf, num_lfs; 2291 int err; 2292 2293 block = &rvu->hw->block[blkaddr]; 2294 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), 2295 block->addr); 2296 if (!num_lfs) 2297 return; 2298 for (slot = 0; slot < num_lfs; slot++) { 2299 lf = rvu_get_lf(rvu, block, pcifunc, slot); 2300 if (lf < 0) 2301 continue; 2302 2303 /* Cleanup LF and reset it */ 2304 if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1) 2305 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf); 2306 else if (block->addr == BLKADDR_NPA) 2307 rvu_npa_lf_teardown(rvu, pcifunc, lf); 2308 else if ((block->addr == BLKADDR_CPT0) || 2309 (block->addr == BLKADDR_CPT1)) 2310 rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot); 2311 2312 err = rvu_lf_reset(rvu, block, lf); 2313 if (err) { 2314 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", 2315 block->addr, lf); 2316 } 2317 } 2318 } 2319 2320 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) 2321 { 2322 mutex_lock(&rvu->flr_lock); 2323 /* Reset order should reflect inter-block dependencies: 2324 * 1. Reset any packet/work sources (NIX, CPT, TIM) 2325 * 2. Flush and reset SSO/SSOW 2326 * 3. Cleanup pools (NPA) 2327 */ 2328 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0); 2329 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1); 2330 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0); 2331 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1); 2332 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM); 2333 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); 2334 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); 2335 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); 2336 rvu_reset_lmt_map_tbl(rvu, pcifunc); 2337 rvu_detach_rsrcs(rvu, NULL, pcifunc); 2338 mutex_unlock(&rvu->flr_lock); 2339 } 2340 2341 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf) 2342 { 2343 int reg = 0; 2344 2345 /* pcifunc = 0(PF0) | (vf + 1) */ 2346 __rvu_flr_handler(rvu, vf + 1); 2347 2348 if (vf >= 64) { 2349 reg = 1; 2350 vf = vf - 64; 2351 } 2352 2353 /* Signal FLR finish and enable IRQ */ 2354 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); 2355 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); 2356 } 2357 2358 static void rvu_flr_handler(struct work_struct *work) 2359 { 2360 struct rvu_work *flrwork = container_of(work, struct rvu_work, work); 2361 struct rvu *rvu = flrwork->rvu; 2362 u16 pcifunc, numvfs, vf; 2363 u64 cfg; 2364 int pf; 2365 2366 pf = flrwork - rvu->flr_wrk; 2367 if (pf >= rvu->hw->total_pfs) { 2368 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs); 2369 return; 2370 } 2371 2372 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2373 numvfs = (cfg >> 12) & 0xFF; 2374 pcifunc = pf << RVU_PFVF_PF_SHIFT; 2375 2376 for (vf = 0; vf < numvfs; vf++) 2377 __rvu_flr_handler(rvu, (pcifunc | (vf + 1))); 2378 2379 __rvu_flr_handler(rvu, pcifunc); 2380 2381 /* Signal FLR finish */ 2382 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf)); 2383 2384 /* Enable interrupt */ 2385 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf)); 2386 } 2387 2388 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs) 2389 { 2390 int dev, vf, reg = 0; 2391 u64 intr; 2392 2393 if (start_vf >= 64) 2394 reg = 1; 2395 2396 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg)); 2397 if (!intr) 2398 return; 2399 2400 for (vf = 0; vf < numvfs; vf++) { 2401 if (!(intr & BIT_ULL(vf))) 2402 continue; 2403 dev = vf + start_vf + rvu->hw->total_pfs; 2404 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); 2405 /* Clear and disable the interrupt */ 2406 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); 2407 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); 2408 } 2409 } 2410 2411 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq) 2412 { 2413 struct rvu *rvu = (struct rvu *)rvu_irq; 2414 u64 intr; 2415 u8 pf; 2416 2417 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT); 2418 if (!intr) 2419 goto afvf_flr; 2420 2421 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2422 if (intr & (1ULL << pf)) { 2423 /* PF is already dead do only AF related operations */ 2424 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); 2425 /* clear interrupt */ 2426 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, 2427 BIT_ULL(pf)); 2428 /* Disable the interrupt */ 2429 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, 2430 BIT_ULL(pf)); 2431 } 2432 } 2433 2434 afvf_flr: 2435 rvu_afvf_queue_flr_work(rvu, 0, 64); 2436 if (rvu->vfs > 64) 2437 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64); 2438 2439 return IRQ_HANDLED; 2440 } 2441 2442 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr) 2443 { 2444 int vf; 2445 2446 /* Nothing to be done here other than clearing the 2447 * TRPEND bit. 2448 */ 2449 for (vf = 0; vf < 64; vf++) { 2450 if (intr & (1ULL << vf)) { 2451 /* clear the trpend due to ME(master enable) */ 2452 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf)); 2453 /* clear interrupt */ 2454 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf)); 2455 } 2456 } 2457 } 2458 2459 /* Handles ME interrupts from VFs of AF */ 2460 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq) 2461 { 2462 struct rvu *rvu = (struct rvu *)rvu_irq; 2463 int vfset; 2464 u64 intr; 2465 2466 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); 2467 2468 for (vfset = 0; vfset <= 1; vfset++) { 2469 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset)); 2470 if (intr) 2471 rvu_me_handle_vfset(rvu, vfset, intr); 2472 } 2473 2474 return IRQ_HANDLED; 2475 } 2476 2477 /* Handles ME interrupts from PFs */ 2478 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq) 2479 { 2480 struct rvu *rvu = (struct rvu *)rvu_irq; 2481 u64 intr; 2482 u8 pf; 2483 2484 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); 2485 2486 /* Nothing to be done here other than clearing the 2487 * TRPEND bit. 2488 */ 2489 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2490 if (intr & (1ULL << pf)) { 2491 /* clear the trpend due to ME(master enable) */ 2492 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, 2493 BIT_ULL(pf)); 2494 /* clear interrupt */ 2495 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT, 2496 BIT_ULL(pf)); 2497 } 2498 } 2499 2500 return IRQ_HANDLED; 2501 } 2502 2503 static void rvu_unregister_interrupts(struct rvu *rvu) 2504 { 2505 int irq; 2506 2507 /* Disable the Mbox interrupt */ 2508 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, 2509 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2510 2511 /* Disable the PF FLR interrupt */ 2512 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, 2513 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2514 2515 /* Disable the PF ME interrupt */ 2516 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C, 2517 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2518 2519 for (irq = 0; irq < rvu->num_vec; irq++) { 2520 if (rvu->irq_allocated[irq]) { 2521 free_irq(pci_irq_vector(rvu->pdev, irq), rvu); 2522 rvu->irq_allocated[irq] = false; 2523 } 2524 } 2525 2526 pci_free_irq_vectors(rvu->pdev); 2527 rvu->num_vec = 0; 2528 } 2529 2530 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu) 2531 { 2532 struct rvu_pfvf *pfvf = &rvu->pf[0]; 2533 int offset; 2534 2535 pfvf = &rvu->pf[0]; 2536 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; 2537 2538 /* Make sure there are enough MSIX vectors configured so that 2539 * VF interrupts can be handled. Offset equal to zero means 2540 * that PF vectors are not configured and overlapping AF vectors. 2541 */ 2542 return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) && 2543 offset; 2544 } 2545 2546 static int rvu_register_interrupts(struct rvu *rvu) 2547 { 2548 int ret, offset, pf_vec_start; 2549 2550 rvu->num_vec = pci_msix_vec_count(rvu->pdev); 2551 2552 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec, 2553 NAME_SIZE, GFP_KERNEL); 2554 if (!rvu->irq_name) 2555 return -ENOMEM; 2556 2557 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec, 2558 sizeof(bool), GFP_KERNEL); 2559 if (!rvu->irq_allocated) 2560 return -ENOMEM; 2561 2562 /* Enable MSI-X */ 2563 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec, 2564 rvu->num_vec, PCI_IRQ_MSIX); 2565 if (ret < 0) { 2566 dev_err(rvu->dev, 2567 "RVUAF: Request for %d msix vectors failed, ret %d\n", 2568 rvu->num_vec, ret); 2569 return ret; 2570 } 2571 2572 /* Register mailbox interrupt handler */ 2573 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); 2574 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), 2575 rvu_mbox_intr_handler, 0, 2576 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); 2577 if (ret) { 2578 dev_err(rvu->dev, 2579 "RVUAF: IRQ registration failed for mbox irq\n"); 2580 goto fail; 2581 } 2582 2583 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; 2584 2585 /* Enable mailbox interrupts from all PFs */ 2586 rvu_enable_mbox_intr(rvu); 2587 2588 /* Register FLR interrupt handler */ 2589 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], 2590 "RVUAF FLR"); 2591 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR), 2592 rvu_flr_intr_handler, 0, 2593 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], 2594 rvu); 2595 if (ret) { 2596 dev_err(rvu->dev, 2597 "RVUAF: IRQ registration failed for FLR\n"); 2598 goto fail; 2599 } 2600 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true; 2601 2602 /* Enable FLR interrupt for all PFs*/ 2603 rvu_write64(rvu, BLKADDR_RVUM, 2604 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs)); 2605 2606 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, 2607 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2608 2609 /* Register ME interrupt handler */ 2610 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], 2611 "RVUAF ME"); 2612 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME), 2613 rvu_me_pf_intr_handler, 0, 2614 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], 2615 rvu); 2616 if (ret) { 2617 dev_err(rvu->dev, 2618 "RVUAF: IRQ registration failed for ME\n"); 2619 } 2620 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true; 2621 2622 /* Clear TRPEND bit for all PF */ 2623 rvu_write64(rvu, BLKADDR_RVUM, 2624 RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs)); 2625 /* Enable ME interrupt for all PFs*/ 2626 rvu_write64(rvu, BLKADDR_RVUM, 2627 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs)); 2628 2629 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S, 2630 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2631 2632 if (!rvu_afvf_msix_vectors_num_ok(rvu)) 2633 return 0; 2634 2635 /* Get PF MSIX vectors offset. */ 2636 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM, 2637 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; 2638 2639 /* Register MBOX0 interrupt. */ 2640 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; 2641 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); 2642 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2643 rvu_mbox_intr_handler, 0, 2644 &rvu->irq_name[offset * NAME_SIZE], 2645 rvu); 2646 if (ret) 2647 dev_err(rvu->dev, 2648 "RVUAF: IRQ registration failed for Mbox0\n"); 2649 2650 rvu->irq_allocated[offset] = true; 2651 2652 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so 2653 * simply increment current offset by 1. 2654 */ 2655 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; 2656 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); 2657 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2658 rvu_mbox_intr_handler, 0, 2659 &rvu->irq_name[offset * NAME_SIZE], 2660 rvu); 2661 if (ret) 2662 dev_err(rvu->dev, 2663 "RVUAF: IRQ registration failed for Mbox1\n"); 2664 2665 rvu->irq_allocated[offset] = true; 2666 2667 /* Register FLR interrupt handler for AF's VFs */ 2668 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0; 2669 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0"); 2670 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2671 rvu_flr_intr_handler, 0, 2672 &rvu->irq_name[offset * NAME_SIZE], rvu); 2673 if (ret) { 2674 dev_err(rvu->dev, 2675 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n"); 2676 goto fail; 2677 } 2678 rvu->irq_allocated[offset] = true; 2679 2680 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1; 2681 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1"); 2682 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2683 rvu_flr_intr_handler, 0, 2684 &rvu->irq_name[offset * NAME_SIZE], rvu); 2685 if (ret) { 2686 dev_err(rvu->dev, 2687 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n"); 2688 goto fail; 2689 } 2690 rvu->irq_allocated[offset] = true; 2691 2692 /* Register ME interrupt handler for AF's VFs */ 2693 offset = pf_vec_start + RVU_PF_INT_VEC_VFME0; 2694 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0"); 2695 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2696 rvu_me_vf_intr_handler, 0, 2697 &rvu->irq_name[offset * NAME_SIZE], rvu); 2698 if (ret) { 2699 dev_err(rvu->dev, 2700 "RVUAF: IRQ registration failed for RVUAFVF ME0\n"); 2701 goto fail; 2702 } 2703 rvu->irq_allocated[offset] = true; 2704 2705 offset = pf_vec_start + RVU_PF_INT_VEC_VFME1; 2706 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1"); 2707 ret = request_irq(pci_irq_vector(rvu->pdev, offset), 2708 rvu_me_vf_intr_handler, 0, 2709 &rvu->irq_name[offset * NAME_SIZE], rvu); 2710 if (ret) { 2711 dev_err(rvu->dev, 2712 "RVUAF: IRQ registration failed for RVUAFVF ME1\n"); 2713 goto fail; 2714 } 2715 rvu->irq_allocated[offset] = true; 2716 return 0; 2717 2718 fail: 2719 rvu_unregister_interrupts(rvu); 2720 return ret; 2721 } 2722 2723 static void rvu_flr_wq_destroy(struct rvu *rvu) 2724 { 2725 if (rvu->flr_wq) { 2726 flush_workqueue(rvu->flr_wq); 2727 destroy_workqueue(rvu->flr_wq); 2728 rvu->flr_wq = NULL; 2729 } 2730 } 2731 2732 static int rvu_flr_init(struct rvu *rvu) 2733 { 2734 int dev, num_devs; 2735 u64 cfg; 2736 int pf; 2737 2738 /* Enable FLR for all PFs*/ 2739 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 2740 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2741 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf), 2742 cfg | BIT_ULL(22)); 2743 } 2744 2745 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr", 2746 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 2747 1); 2748 if (!rvu->flr_wq) 2749 return -ENOMEM; 2750 2751 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev); 2752 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs, 2753 sizeof(struct rvu_work), GFP_KERNEL); 2754 if (!rvu->flr_wrk) { 2755 destroy_workqueue(rvu->flr_wq); 2756 return -ENOMEM; 2757 } 2758 2759 for (dev = 0; dev < num_devs; dev++) { 2760 rvu->flr_wrk[dev].rvu = rvu; 2761 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler); 2762 } 2763 2764 mutex_init(&rvu->flr_lock); 2765 2766 return 0; 2767 } 2768 2769 static void rvu_disable_afvf_intr(struct rvu *rvu) 2770 { 2771 int vfs = rvu->vfs; 2772 2773 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs)); 2774 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); 2775 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); 2776 if (vfs <= 64) 2777 return; 2778 2779 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), 2780 INTR_MASK(vfs - 64)); 2781 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); 2782 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); 2783 } 2784 2785 static void rvu_enable_afvf_intr(struct rvu *rvu) 2786 { 2787 int vfs = rvu->vfs; 2788 2789 /* Clear any pending interrupts and enable AF VF interrupts for 2790 * the first 64 VFs. 2791 */ 2792 /* Mbox */ 2793 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs)); 2794 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs)); 2795 2796 /* FLR */ 2797 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); 2798 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); 2799 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs)); 2800 2801 /* Same for remaining VFs, if any. */ 2802 if (vfs <= 64) 2803 return; 2804 2805 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64)); 2806 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), 2807 INTR_MASK(vfs - 64)); 2808 2809 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64)); 2810 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); 2811 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); 2812 } 2813 2814 int rvu_get_num_lbk_chans(void) 2815 { 2816 struct pci_dev *pdev; 2817 void __iomem *base; 2818 int ret = -EIO; 2819 2820 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK, 2821 NULL); 2822 if (!pdev) 2823 goto err; 2824 2825 base = pci_ioremap_bar(pdev, 0); 2826 if (!base) 2827 goto err_put; 2828 2829 /* Read number of available LBK channels from LBK(0)_CONST register. */ 2830 ret = (readq(base + 0x10) >> 32) & 0xffff; 2831 iounmap(base); 2832 err_put: 2833 pci_dev_put(pdev); 2834 err: 2835 return ret; 2836 } 2837 2838 static int rvu_enable_sriov(struct rvu *rvu) 2839 { 2840 struct pci_dev *pdev = rvu->pdev; 2841 int err, chans, vfs; 2842 2843 if (!rvu_afvf_msix_vectors_num_ok(rvu)) { 2844 dev_warn(&pdev->dev, 2845 "Skipping SRIOV enablement since not enough IRQs are available\n"); 2846 return 0; 2847 } 2848 2849 chans = rvu_get_num_lbk_chans(); 2850 if (chans < 0) 2851 return chans; 2852 2853 vfs = pci_sriov_get_totalvfs(pdev); 2854 2855 /* Limit VFs in case we have more VFs than LBK channels available. */ 2856 if (vfs > chans) 2857 vfs = chans; 2858 2859 if (!vfs) 2860 return 0; 2861 2862 /* LBK channel number 63 is used for switching packets between 2863 * CGX mapped VFs. Hence limit LBK pairs till 62 only. 2864 */ 2865 if (vfs > 62) 2866 vfs = 62; 2867 2868 /* Save VFs number for reference in VF interrupts handlers. 2869 * Since interrupts might start arriving during SRIOV enablement 2870 * ordinary API cannot be used to get number of enabled VFs. 2871 */ 2872 rvu->vfs = vfs; 2873 2874 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs, 2875 rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler); 2876 if (err) 2877 return err; 2878 2879 rvu_enable_afvf_intr(rvu); 2880 /* Make sure IRQs are enabled before SRIOV. */ 2881 mb(); 2882 2883 err = pci_enable_sriov(pdev, vfs); 2884 if (err) { 2885 rvu_disable_afvf_intr(rvu); 2886 rvu_mbox_destroy(&rvu->afvf_wq_info); 2887 return err; 2888 } 2889 2890 return 0; 2891 } 2892 2893 static void rvu_disable_sriov(struct rvu *rvu) 2894 { 2895 rvu_disable_afvf_intr(rvu); 2896 rvu_mbox_destroy(&rvu->afvf_wq_info); 2897 pci_disable_sriov(rvu->pdev); 2898 } 2899 2900 static void rvu_update_module_params(struct rvu *rvu) 2901 { 2902 const char *default_pfl_name = "default"; 2903 2904 strscpy(rvu->mkex_pfl_name, 2905 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN); 2906 strscpy(rvu->kpu_pfl_name, 2907 kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN); 2908 } 2909 2910 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2911 { 2912 struct device *dev = &pdev->dev; 2913 struct rvu *rvu; 2914 int err; 2915 2916 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL); 2917 if (!rvu) 2918 return -ENOMEM; 2919 2920 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL); 2921 if (!rvu->hw) { 2922 devm_kfree(dev, rvu); 2923 return -ENOMEM; 2924 } 2925 2926 pci_set_drvdata(pdev, rvu); 2927 rvu->pdev = pdev; 2928 rvu->dev = &pdev->dev; 2929 2930 err = pci_enable_device(pdev); 2931 if (err) { 2932 dev_err(dev, "Failed to enable PCI device\n"); 2933 goto err_freemem; 2934 } 2935 2936 err = pci_request_regions(pdev, DRV_NAME); 2937 if (err) { 2938 dev_err(dev, "PCI request regions failed 0x%x\n", err); 2939 goto err_disable_device; 2940 } 2941 2942 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 2943 if (err) { 2944 dev_err(dev, "DMA mask config failed, abort\n"); 2945 goto err_release_regions; 2946 } 2947 2948 pci_set_master(pdev); 2949 2950 rvu->ptp = ptp_get(); 2951 if (IS_ERR(rvu->ptp)) { 2952 err = PTR_ERR(rvu->ptp); 2953 if (err == -EPROBE_DEFER) 2954 goto err_release_regions; 2955 rvu->ptp = NULL; 2956 } 2957 2958 /* Map Admin function CSRs */ 2959 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0); 2960 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); 2961 if (!rvu->afreg_base || !rvu->pfreg_base) { 2962 dev_err(dev, "Unable to map admin function CSRs, aborting\n"); 2963 err = -ENOMEM; 2964 goto err_put_ptp; 2965 } 2966 2967 /* Store module params in rvu structure */ 2968 rvu_update_module_params(rvu); 2969 2970 /* Check which blocks the HW supports */ 2971 rvu_check_block_implemented(rvu); 2972 2973 rvu_reset_all_blocks(rvu); 2974 2975 rvu_setup_hw_capabilities(rvu); 2976 2977 err = rvu_setup_hw_resources(rvu); 2978 if (err) 2979 goto err_put_ptp; 2980 2981 /* Init mailbox btw AF and PFs */ 2982 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF, 2983 rvu->hw->total_pfs, rvu_afpf_mbox_handler, 2984 rvu_afpf_mbox_up_handler); 2985 if (err) 2986 goto err_hwsetup; 2987 2988 err = rvu_flr_init(rvu); 2989 if (err) 2990 goto err_mbox; 2991 2992 err = rvu_register_interrupts(rvu); 2993 if (err) 2994 goto err_flr; 2995 2996 err = rvu_register_dl(rvu); 2997 if (err) 2998 goto err_irq; 2999 3000 rvu_setup_rvum_blk_revid(rvu); 3001 3002 /* Enable AF's VFs (if any) */ 3003 err = rvu_enable_sriov(rvu); 3004 if (err) 3005 goto err_dl; 3006 3007 /* Initialize debugfs */ 3008 rvu_dbg_init(rvu); 3009 3010 mutex_init(&rvu->rswitch.switch_lock); 3011 3012 return 0; 3013 err_dl: 3014 rvu_unregister_dl(rvu); 3015 err_irq: 3016 rvu_unregister_interrupts(rvu); 3017 err_flr: 3018 rvu_flr_wq_destroy(rvu); 3019 err_mbox: 3020 rvu_mbox_destroy(&rvu->afpf_wq_info); 3021 err_hwsetup: 3022 rvu_cgx_exit(rvu); 3023 rvu_fwdata_exit(rvu); 3024 rvu_reset_all_blocks(rvu); 3025 rvu_free_hw_resources(rvu); 3026 rvu_clear_rvum_blk_revid(rvu); 3027 err_put_ptp: 3028 ptp_put(rvu->ptp); 3029 err_release_regions: 3030 pci_release_regions(pdev); 3031 err_disable_device: 3032 pci_disable_device(pdev); 3033 err_freemem: 3034 pci_set_drvdata(pdev, NULL); 3035 devm_kfree(&pdev->dev, rvu->hw); 3036 devm_kfree(dev, rvu); 3037 return err; 3038 } 3039 3040 static void rvu_remove(struct pci_dev *pdev) 3041 { 3042 struct rvu *rvu = pci_get_drvdata(pdev); 3043 3044 rvu_dbg_exit(rvu); 3045 rvu_unregister_dl(rvu); 3046 rvu_unregister_interrupts(rvu); 3047 rvu_flr_wq_destroy(rvu); 3048 rvu_cgx_exit(rvu); 3049 rvu_fwdata_exit(rvu); 3050 rvu_mbox_destroy(&rvu->afpf_wq_info); 3051 rvu_disable_sriov(rvu); 3052 rvu_reset_all_blocks(rvu); 3053 rvu_free_hw_resources(rvu); 3054 rvu_clear_rvum_blk_revid(rvu); 3055 ptp_put(rvu->ptp); 3056 pci_release_regions(pdev); 3057 pci_disable_device(pdev); 3058 pci_set_drvdata(pdev, NULL); 3059 3060 devm_kfree(&pdev->dev, rvu->hw); 3061 devm_kfree(&pdev->dev, rvu); 3062 } 3063 3064 static struct pci_driver rvu_driver = { 3065 .name = DRV_NAME, 3066 .id_table = rvu_id_table, 3067 .probe = rvu_probe, 3068 .remove = rvu_remove, 3069 }; 3070 3071 static int __init rvu_init_module(void) 3072 { 3073 int err; 3074 3075 pr_info("%s: %s\n", DRV_NAME, DRV_STRING); 3076 3077 err = pci_register_driver(&cgx_driver); 3078 if (err < 0) 3079 return err; 3080 3081 err = pci_register_driver(&ptp_driver); 3082 if (err < 0) 3083 goto ptp_err; 3084 3085 err = pci_register_driver(&rvu_driver); 3086 if (err < 0) 3087 goto rvu_err; 3088 3089 return 0; 3090 rvu_err: 3091 pci_unregister_driver(&ptp_driver); 3092 ptp_err: 3093 pci_unregister_driver(&cgx_driver); 3094 3095 return err; 3096 } 3097 3098 static void __exit rvu_cleanup_module(void) 3099 { 3100 pci_unregister_driver(&rvu_driver); 3101 pci_unregister_driver(&ptp_driver); 3102 pci_unregister_driver(&cgx_driver); 3103 } 3104 3105 module_init(rvu_init_module); 3106 module_exit(rvu_cleanup_module); 3107