1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/pci.h> 10 #include "rvu_struct.h" 11 #include "rvu_reg.h" 12 #include "mbox.h" 13 #include "rvu.h" 14 15 /* CPT PF device id */ 16 #define PCI_DEVID_OTX2_CPT_PF 0xA0FD 17 #define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2 18 19 /* Length of initial context fetch in 128 byte words */ 20 #define CPT_CTX_ILEN 2ULL 21 22 #define cpt_get_eng_sts(e_min, e_max, rsp, etype) \ 23 ({ \ 24 u64 free_sts = 0, busy_sts = 0; \ 25 typeof(rsp) _rsp = rsp; \ 26 u32 e, i; \ 27 \ 28 for (e = (e_min), i = 0; e < (e_max); e++, i++) { \ 29 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \ 30 if (reg & 0x1) \ 31 busy_sts |= 1ULL << i; \ 32 \ 33 if (reg & 0x2) \ 34 free_sts |= 1ULL << i; \ 35 } \ 36 (_rsp)->busy_sts_##etype = busy_sts; \ 37 (_rsp)->free_sts_##etype = free_sts; \ 38 }) 39 40 static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr) 41 { 42 struct rvu_block *block = ptr; 43 struct rvu *rvu = block->rvu; 44 int blkaddr = block->addr; 45 u64 reg, val; 46 int i, eng; 47 u8 grp; 48 49 reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec)); 50 dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg); 51 52 i = -1; 53 while ((i = find_next_bit((unsigned long *)®, 64, i + 1)) < 64) { 54 switch (vec) { 55 case 0: 56 eng = i; 57 break; 58 case 1: 59 eng = i + 64; 60 break; 61 case 2: 62 eng = i + 128; 63 break; 64 } 65 grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF; 66 /* Disable and enable the engine which triggers fault */ 67 rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0); 68 val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng)); 69 rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL); 70 71 rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp); 72 rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL); 73 } 74 rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg); 75 76 return IRQ_HANDLED; 77 } 78 79 static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr) 80 { 81 return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr); 82 } 83 84 static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr) 85 { 86 return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr); 87 } 88 89 static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr) 90 { 91 return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr); 92 } 93 94 static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr) 95 { 96 struct rvu_block *block = ptr; 97 struct rvu *rvu = block->rvu; 98 int blkaddr = block->addr; 99 u64 reg; 100 101 reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT); 102 dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg); 103 104 rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg); 105 return IRQ_HANDLED; 106 } 107 108 static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr) 109 { 110 struct rvu_block *block = ptr; 111 struct rvu *rvu = block->rvu; 112 int blkaddr = block->addr; 113 u64 reg; 114 115 reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT); 116 dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg); 117 118 rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg); 119 return IRQ_HANDLED; 120 } 121 122 static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs, 123 irq_handler_t handler, 124 const char *name) 125 { 126 struct rvu *rvu = block->rvu; 127 int ret; 128 129 ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0, 130 name, block); 131 if (ret) { 132 dev_err(rvu->dev, "RVUAF: %s irq registration failed", name); 133 return ret; 134 } 135 136 WARN_ON(rvu->irq_allocated[irq_offs]); 137 rvu->irq_allocated[irq_offs] = true; 138 return 0; 139 } 140 141 static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off) 142 { 143 struct rvu *rvu = block->rvu; 144 int blkaddr = block->addr; 145 int i; 146 147 /* Disable all CPT AF interrupts */ 148 rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL); 149 rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL); 150 rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF); 151 152 rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1); 153 rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1); 154 155 for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++) 156 if (rvu->irq_allocated[off + i]) { 157 free_irq(pci_irq_vector(rvu->pdev, off + i), block); 158 rvu->irq_allocated[off + i] = false; 159 } 160 } 161 162 static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr) 163 { 164 struct rvu_hwinfo *hw = rvu->hw; 165 struct rvu_block *block; 166 int i, offs; 167 168 if (!is_block_implemented(rvu->hw, blkaddr)) 169 return; 170 offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF; 171 if (!offs) { 172 dev_warn(rvu->dev, 173 "Failed to get CPT_AF_INT vector offsets\n"); 174 return; 175 } 176 block = &hw->block[blkaddr]; 177 if (!is_rvu_otx2(rvu)) 178 return cpt_10k_unregister_interrupts(block, offs); 179 180 /* Disable all CPT AF interrupts */ 181 for (i = 0; i < CPT_AF_INT_VEC_RVU; i++) 182 rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL); 183 rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1); 184 rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1); 185 186 for (i = 0; i < CPT_AF_INT_VEC_CNT; i++) 187 if (rvu->irq_allocated[offs + i]) { 188 free_irq(pci_irq_vector(rvu->pdev, offs + i), block); 189 rvu->irq_allocated[offs + i] = false; 190 } 191 } 192 193 void rvu_cpt_unregister_interrupts(struct rvu *rvu) 194 { 195 cpt_unregister_interrupts(rvu, BLKADDR_CPT0); 196 cpt_unregister_interrupts(rvu, BLKADDR_CPT1); 197 } 198 199 static int cpt_10k_register_interrupts(struct rvu_block *block, int off) 200 { 201 struct rvu *rvu = block->rvu; 202 int blkaddr = block->addr; 203 irq_handler_t flt_fn; 204 int i, ret; 205 206 for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) { 207 sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i); 208 209 switch (i) { 210 case CPT_10K_AF_INT_VEC_FLT0: 211 flt_fn = rvu_cpt_af_flt0_intr_handler; 212 break; 213 case CPT_10K_AF_INT_VEC_FLT1: 214 flt_fn = rvu_cpt_af_flt1_intr_handler; 215 break; 216 case CPT_10K_AF_INT_VEC_FLT2: 217 flt_fn = rvu_cpt_af_flt2_intr_handler; 218 break; 219 } 220 ret = rvu_cpt_do_register_interrupt(block, off + i, 221 flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]); 222 if (ret) 223 goto err; 224 if (i == CPT_10K_AF_INT_VEC_FLT2) 225 rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF); 226 else 227 rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL); 228 } 229 230 ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU, 231 rvu_cpt_af_rvu_intr_handler, 232 "CPTAF RVU"); 233 if (ret) 234 goto err; 235 rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1); 236 237 ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS, 238 rvu_cpt_af_ras_intr_handler, 239 "CPTAF RAS"); 240 if (ret) 241 goto err; 242 rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1); 243 244 return 0; 245 err: 246 rvu_cpt_unregister_interrupts(rvu); 247 return ret; 248 } 249 250 static int cpt_register_interrupts(struct rvu *rvu, int blkaddr) 251 { 252 struct rvu_hwinfo *hw = rvu->hw; 253 struct rvu_block *block; 254 irq_handler_t flt_fn; 255 int i, offs, ret = 0; 256 257 if (!is_block_implemented(rvu->hw, blkaddr)) 258 return 0; 259 260 block = &hw->block[blkaddr]; 261 offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF; 262 if (!offs) { 263 dev_warn(rvu->dev, 264 "Failed to get CPT_AF_INT vector offsets\n"); 265 return 0; 266 } 267 268 if (!is_rvu_otx2(rvu)) 269 return cpt_10k_register_interrupts(block, offs); 270 271 for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) { 272 sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i); 273 switch (i) { 274 case CPT_AF_INT_VEC_FLT0: 275 flt_fn = rvu_cpt_af_flt0_intr_handler; 276 break; 277 case CPT_AF_INT_VEC_FLT1: 278 flt_fn = rvu_cpt_af_flt1_intr_handler; 279 break; 280 } 281 ret = rvu_cpt_do_register_interrupt(block, offs + i, 282 flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]); 283 if (ret) 284 goto err; 285 rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL); 286 } 287 288 ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU, 289 rvu_cpt_af_rvu_intr_handler, 290 "CPTAF RVU"); 291 if (ret) 292 goto err; 293 rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1); 294 295 ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS, 296 rvu_cpt_af_ras_intr_handler, 297 "CPTAF RAS"); 298 if (ret) 299 goto err; 300 rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1); 301 302 return 0; 303 err: 304 rvu_cpt_unregister_interrupts(rvu); 305 return ret; 306 } 307 308 int rvu_cpt_register_interrupts(struct rvu *rvu) 309 { 310 int ret; 311 312 ret = cpt_register_interrupts(rvu, BLKADDR_CPT0); 313 if (ret) 314 return ret; 315 316 return cpt_register_interrupts(rvu, BLKADDR_CPT1); 317 } 318 319 static int get_cpt_pf_num(struct rvu *rvu) 320 { 321 int i, domain_nr, cpt_pf_num = -1; 322 struct pci_dev *pdev; 323 324 domain_nr = pci_domain_nr(rvu->pdev->bus); 325 for (i = 0; i < rvu->hw->total_pfs; i++) { 326 pdev = pci_get_domain_bus_and_slot(domain_nr, i + 1, 0); 327 if (!pdev) 328 continue; 329 330 if (pdev->device == PCI_DEVID_OTX2_CPT_PF || 331 pdev->device == PCI_DEVID_OTX2_CPT10K_PF) { 332 cpt_pf_num = i; 333 put_device(&pdev->dev); 334 break; 335 } 336 put_device(&pdev->dev); 337 } 338 return cpt_pf_num; 339 } 340 341 static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc) 342 { 343 int cpt_pf_num = get_cpt_pf_num(rvu); 344 345 if (rvu_get_pf(pcifunc) != cpt_pf_num) 346 return false; 347 if (pcifunc & RVU_PFVF_FUNC_MASK) 348 return false; 349 350 return true; 351 } 352 353 static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc) 354 { 355 int cpt_pf_num = get_cpt_pf_num(rvu); 356 357 if (rvu_get_pf(pcifunc) != cpt_pf_num) 358 return false; 359 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 360 return false; 361 362 return true; 363 } 364 365 static int validate_and_get_cpt_blkaddr(int req_blkaddr) 366 { 367 int blkaddr; 368 369 blkaddr = req_blkaddr ? req_blkaddr : BLKADDR_CPT0; 370 if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) 371 return -EINVAL; 372 373 return blkaddr; 374 } 375 376 int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu, 377 struct cpt_lf_alloc_req_msg *req, 378 struct msg_rsp *rsp) 379 { 380 u16 pcifunc = req->hdr.pcifunc; 381 struct rvu_block *block; 382 int cptlf, blkaddr; 383 int num_lfs, slot; 384 u64 val; 385 386 blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); 387 if (blkaddr < 0) 388 return blkaddr; 389 390 if (req->eng_grpmsk == 0x0) 391 return CPT_AF_ERR_GRP_INVALID; 392 393 block = &rvu->hw->block[blkaddr]; 394 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), 395 block->addr); 396 if (!num_lfs) 397 return CPT_AF_ERR_LF_INVALID; 398 399 /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */ 400 if (req->nix_pf_func) { 401 /* If default, use 'this' CPTLF's PFFUNC */ 402 if (req->nix_pf_func == RVU_DEFAULT_PF_FUNC) 403 req->nix_pf_func = pcifunc; 404 if (!is_pffunc_map_valid(rvu, req->nix_pf_func, BLKTYPE_NIX)) 405 return CPT_AF_ERR_NIX_PF_FUNC_INVALID; 406 } 407 408 /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */ 409 if (req->sso_pf_func) { 410 /* If default, use 'this' CPTLF's PFFUNC */ 411 if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC) 412 req->sso_pf_func = pcifunc; 413 if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO)) 414 return CPT_AF_ERR_SSO_PF_FUNC_INVALID; 415 } 416 417 for (slot = 0; slot < num_lfs; slot++) { 418 cptlf = rvu_get_lf(rvu, block, pcifunc, slot); 419 if (cptlf < 0) 420 return CPT_AF_ERR_LF_INVALID; 421 422 /* Set CPT LF group and priority */ 423 val = (u64)req->eng_grpmsk << 48 | 1; 424 if (!is_rvu_otx2(rvu)) 425 val |= (CPT_CTX_ILEN << 17); 426 427 rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); 428 429 /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set 430 * on reset. 431 */ 432 val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); 433 val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32)); 434 val |= ((u64)req->nix_pf_func << 48 | 435 (u64)req->sso_pf_func << 32); 436 rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); 437 } 438 439 return 0; 440 } 441 442 static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr) 443 { 444 u16 pcifunc = req->hdr.pcifunc; 445 int num_lfs, cptlf, slot, err; 446 struct rvu_block *block; 447 448 block = &rvu->hw->block[blkaddr]; 449 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), 450 block->addr); 451 if (!num_lfs) 452 return 0; 453 454 for (slot = 0; slot < num_lfs; slot++) { 455 cptlf = rvu_get_lf(rvu, block, pcifunc, slot); 456 if (cptlf < 0) 457 return CPT_AF_ERR_LF_INVALID; 458 459 /* Perform teardown */ 460 rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot); 461 462 /* Reset LF */ 463 err = rvu_lf_reset(rvu, block, cptlf); 464 if (err) { 465 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", 466 block->addr, cptlf); 467 } 468 } 469 470 return 0; 471 } 472 473 int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req, 474 struct msg_rsp *rsp) 475 { 476 int ret; 477 478 ret = cpt_lf_free(rvu, req, BLKADDR_CPT0); 479 if (ret) 480 return ret; 481 482 if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) 483 ret = cpt_lf_free(rvu, req, BLKADDR_CPT1); 484 485 return ret; 486 } 487 488 static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf, 489 struct cpt_inline_ipsec_cfg_msg *req) 490 { 491 u16 sso_pf_func = req->sso_pf_func; 492 u8 nix_sel; 493 u64 val; 494 495 val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); 496 if (req->enable && (val & BIT_ULL(16))) { 497 /* IPSec inline outbound path is already enabled for a given 498 * CPT LF, HRM states that inline inbound & outbound paths 499 * must not be enabled at the same time for a given CPT LF 500 */ 501 return CPT_AF_ERR_INLINE_IPSEC_INB_ENA; 502 } 503 /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */ 504 if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO)) 505 return CPT_AF_ERR_SSO_PF_FUNC_INVALID; 506 507 nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0; 508 /* Enable CPT LF for IPsec inline inbound operations */ 509 if (req->enable) 510 val |= BIT_ULL(9); 511 else 512 val &= ~BIT_ULL(9); 513 514 val |= (u64)nix_sel << 8; 515 rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); 516 517 if (sso_pf_func) { 518 /* Set SSO_PF_FUNC */ 519 val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); 520 val |= (u64)sso_pf_func << 32; 521 val |= (u64)req->nix_pf_func << 48; 522 rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); 523 } 524 if (req->sso_pf_func_ovrd) 525 /* Set SSO_PF_FUNC_OVRD for inline IPSec */ 526 rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1); 527 528 /* Configure the X2P Link register with the cpt base channel number and 529 * range of channels it should propagate to X2P 530 */ 531 if (!is_rvu_otx2(rvu)) { 532 val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16); 533 val |= (u64)rvu->hw->cpt_chan_base; 534 535 rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val); 536 rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val); 537 } 538 539 return 0; 540 } 541 542 static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf, 543 struct cpt_inline_ipsec_cfg_msg *req) 544 { 545 u16 nix_pf_func = req->nix_pf_func; 546 int nix_blkaddr; 547 u8 nix_sel; 548 u64 val; 549 550 val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); 551 if (req->enable && (val & BIT_ULL(9))) { 552 /* IPSec inline inbound path is already enabled for a given 553 * CPT LF, HRM states that inline inbound & outbound paths 554 * must not be enabled at the same time for a given CPT LF 555 */ 556 return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA; 557 } 558 559 /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */ 560 if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX)) 561 return CPT_AF_ERR_NIX_PF_FUNC_INVALID; 562 563 /* Enable CPT LF for IPsec inline outbound operations */ 564 if (req->enable) 565 val |= BIT_ULL(16); 566 else 567 val &= ~BIT_ULL(16); 568 rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); 569 570 if (nix_pf_func) { 571 /* Set NIX_PF_FUNC */ 572 val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); 573 val |= (u64)nix_pf_func << 48; 574 rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); 575 576 nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func); 577 nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1; 578 579 val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); 580 val |= (u64)nix_sel << 8; 581 rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); 582 } 583 584 return 0; 585 } 586 587 int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu, 588 struct cpt_inline_ipsec_cfg_msg *req, 589 struct msg_rsp *rsp) 590 { 591 u16 pcifunc = req->hdr.pcifunc; 592 struct rvu_block *block; 593 int cptlf, blkaddr, ret; 594 u16 actual_slot; 595 596 blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc, 597 req->slot, &actual_slot); 598 if (blkaddr < 0) 599 return CPT_AF_ERR_LF_INVALID; 600 601 block = &rvu->hw->block[blkaddr]; 602 603 cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot); 604 if (cptlf < 0) 605 return CPT_AF_ERR_LF_INVALID; 606 607 switch (req->dir) { 608 case CPT_INLINE_INBOUND: 609 ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req); 610 break; 611 612 case CPT_INLINE_OUTBOUND: 613 ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req); 614 break; 615 616 default: 617 return CPT_AF_ERR_PARAM; 618 } 619 620 return ret; 621 } 622 623 static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) 624 { 625 u64 offset = req->reg_offset; 626 int blkaddr, num_lfs, lf; 627 struct rvu_block *block; 628 struct rvu_pfvf *pfvf; 629 630 blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); 631 if (blkaddr < 0) 632 return false; 633 634 /* Registers that can be accessed from PF/VF */ 635 if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) || 636 (offset & 0xFF000) == CPT_AF_LFX_CTL2(0)) { 637 if (offset & 7) 638 return false; 639 640 lf = (offset & 0xFFF) >> 3; 641 block = &rvu->hw->block[blkaddr]; 642 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 643 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr); 644 if (lf >= num_lfs) 645 /* Slot is not valid for that PF/VF */ 646 return false; 647 648 /* Translate local LF used by VFs to global CPT LF */ 649 lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], 650 req->hdr.pcifunc, lf); 651 if (lf < 0) 652 return false; 653 654 return true; 655 } else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) { 656 /* Registers that can be accessed from PF */ 657 switch (offset) { 658 case CPT_AF_DIAG: 659 case CPT_AF_CTL: 660 case CPT_AF_PF_FUNC: 661 case CPT_AF_BLK_RST: 662 case CPT_AF_CONSTANTS1: 663 case CPT_AF_CTX_FLUSH_TIMER: 664 return true; 665 } 666 667 switch (offset & 0xFF000) { 668 case CPT_AF_EXEX_STS(0): 669 case CPT_AF_EXEX_CTL(0): 670 case CPT_AF_EXEX_CTL2(0): 671 case CPT_AF_EXEX_UCODE_BASE(0): 672 if (offset & 7) 673 return false; 674 break; 675 default: 676 return false; 677 } 678 return true; 679 } 680 return false; 681 } 682 683 int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, 684 struct cpt_rd_wr_reg_msg *req, 685 struct cpt_rd_wr_reg_msg *rsp) 686 { 687 int blkaddr; 688 689 blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); 690 if (blkaddr < 0) 691 return blkaddr; 692 693 /* This message is accepted only if sent from CPT PF/VF */ 694 if (!is_cpt_pf(rvu, req->hdr.pcifunc) && 695 !is_cpt_vf(rvu, req->hdr.pcifunc)) 696 return CPT_AF_ERR_ACCESS_DENIED; 697 698 rsp->reg_offset = req->reg_offset; 699 rsp->ret_val = req->ret_val; 700 rsp->is_write = req->is_write; 701 702 if (!is_valid_offset(rvu, req)) 703 return CPT_AF_ERR_ACCESS_DENIED; 704 705 if (req->is_write) 706 rvu_write64(rvu, blkaddr, req->reg_offset, req->val); 707 else 708 rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset); 709 710 return 0; 711 } 712 713 static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr) 714 { 715 if (is_rvu_otx2(rvu)) 716 return; 717 718 rsp->ctx_mis_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_MIS_PC); 719 rsp->ctx_hit_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_HIT_PC); 720 rsp->ctx_aop_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_PC); 721 rsp->ctx_aop_lat_pc = rvu_read64(rvu, blkaddr, 722 CPT_AF_CTX_AOP_LATENCY_PC); 723 rsp->ctx_ifetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_PC); 724 rsp->ctx_ifetch_lat_pc = rvu_read64(rvu, blkaddr, 725 CPT_AF_CTX_IFETCH_LATENCY_PC); 726 rsp->ctx_ffetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC); 727 rsp->ctx_ffetch_lat_pc = rvu_read64(rvu, blkaddr, 728 CPT_AF_CTX_FFETCH_LATENCY_PC); 729 rsp->ctx_wback_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC); 730 rsp->ctx_wback_lat_pc = rvu_read64(rvu, blkaddr, 731 CPT_AF_CTX_FFETCH_LATENCY_PC); 732 rsp->ctx_psh_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC); 733 rsp->ctx_psh_lat_pc = rvu_read64(rvu, blkaddr, 734 CPT_AF_CTX_FFETCH_LATENCY_PC); 735 rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR); 736 rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID); 737 rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER); 738 739 rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME); 740 rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG); 741 rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS); 742 rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS); 743 rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG); 744 rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0)); 745 rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1)); 746 } 747 748 static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr) 749 { 750 u16 max_ses, max_ies, max_aes; 751 u32 e_min = 0, e_max = 0; 752 u64 reg; 753 754 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); 755 max_ses = reg & 0xffff; 756 max_ies = (reg >> 16) & 0xffff; 757 max_aes = (reg >> 32) & 0xffff; 758 759 /* Get AE status */ 760 e_min = max_ses + max_ies; 761 e_max = max_ses + max_ies + max_aes; 762 cpt_get_eng_sts(e_min, e_max, rsp, ae); 763 /* Get SE status */ 764 e_min = 0; 765 e_max = max_ses; 766 cpt_get_eng_sts(e_min, e_max, rsp, se); 767 /* Get IE status */ 768 e_min = max_ses; 769 e_max = max_ses + max_ies; 770 cpt_get_eng_sts(e_min, e_max, rsp, ie); 771 } 772 773 int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req, 774 struct cpt_sts_rsp *rsp) 775 { 776 int blkaddr; 777 778 blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); 779 if (blkaddr < 0) 780 return blkaddr; 781 782 /* This message is accepted only if sent from CPT PF/VF */ 783 if (!is_cpt_pf(rvu, req->hdr.pcifunc) && 784 !is_cpt_vf(rvu, req->hdr.pcifunc)) 785 return CPT_AF_ERR_ACCESS_DENIED; 786 787 get_ctx_pc(rvu, rsp, blkaddr); 788 789 /* Get CPT engines status */ 790 get_eng_sts(rvu, rsp, blkaddr); 791 792 /* Read CPT instruction PC registers */ 793 rsp->inst_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC); 794 rsp->inst_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC); 795 rsp->rd_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC); 796 rsp->rd_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC); 797 rsp->rd_uc_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC); 798 rsp->active_cycles_pc = rvu_read64(rvu, blkaddr, 799 CPT_AF_ACTIVE_CYCLES_PC); 800 rsp->exe_err_info = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO); 801 rsp->cptclk_cnt = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT); 802 rsp->diag = rvu_read64(rvu, blkaddr, CPT_AF_DIAG); 803 804 return 0; 805 } 806 807 #define RXC_ZOMBIE_THRES GENMASK_ULL(59, 48) 808 #define RXC_ZOMBIE_LIMIT GENMASK_ULL(43, 32) 809 #define RXC_ACTIVE_THRES GENMASK_ULL(27, 16) 810 #define RXC_ACTIVE_LIMIT GENMASK_ULL(11, 0) 811 #define RXC_ACTIVE_COUNT GENMASK_ULL(60, 48) 812 #define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48) 813 814 static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req, 815 int blkaddr) 816 { 817 u64 dfrg_reg; 818 819 dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres); 820 dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit); 821 dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres); 822 dfrg_reg |= FIELD_PREP(RXC_ACTIVE_LIMIT, req->active_limit); 823 824 rvu_write64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG, req->step); 825 rvu_write64(rvu, blkaddr, CPT_AF_RXC_DFRG, dfrg_reg); 826 } 827 828 int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu, 829 struct cpt_rxc_time_cfg_req *req, 830 struct msg_rsp *rsp) 831 { 832 int blkaddr; 833 834 blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); 835 if (blkaddr < 0) 836 return blkaddr; 837 838 /* This message is accepted only if sent from CPT PF/VF */ 839 if (!is_cpt_pf(rvu, req->hdr.pcifunc) && 840 !is_cpt_vf(rvu, req->hdr.pcifunc)) 841 return CPT_AF_ERR_ACCESS_DENIED; 842 843 cpt_rxc_time_cfg(rvu, req, blkaddr); 844 845 return 0; 846 } 847 848 int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req, 849 struct msg_rsp *rsp) 850 { 851 return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc); 852 } 853 854 static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr) 855 { 856 struct cpt_rxc_time_cfg_req req; 857 int timeout = 2000; 858 u64 reg; 859 860 if (is_rvu_otx2(rvu)) 861 return; 862 863 /* Set time limit to minimum values, so that rxc entries will be 864 * flushed out quickly. 865 */ 866 req.step = 1; 867 req.zombie_thres = 1; 868 req.zombie_limit = 1; 869 req.active_thres = 1; 870 req.active_limit = 1; 871 872 cpt_rxc_time_cfg(rvu, &req, blkaddr); 873 874 do { 875 reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS); 876 udelay(1); 877 if (FIELD_GET(RXC_ACTIVE_COUNT, reg)) 878 timeout--; 879 else 880 break; 881 } while (timeout); 882 883 if (timeout == 0) 884 dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n"); 885 886 timeout = 2000; 887 do { 888 reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS); 889 udelay(1); 890 if (FIELD_GET(RXC_ZOMBIE_COUNT, reg)) 891 timeout--; 892 else 893 break; 894 } while (timeout); 895 896 if (timeout == 0) 897 dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n"); 898 } 899 900 #define INPROG_INFLIGHT(reg) ((reg) & 0x1FF) 901 #define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31)) 902 #define INPROG_GRB(reg) (((reg) >> 32) & 0xFF) 903 #define INPROG_GWB(reg) (((reg) >> 40) & 0xFF) 904 905 static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot) 906 { 907 int i = 0, hard_lp_ctr = 100000; 908 u64 inprog, grp_ptr; 909 u16 nq_ptr, dq_ptr; 910 911 /* Disable instructions enqueuing */ 912 rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0); 913 914 /* Disable executions in the LF's queue */ 915 inprog = rvu_read64(rvu, blkaddr, 916 CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); 917 inprog &= ~BIT_ULL(16); 918 rvu_write64(rvu, blkaddr, 919 CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog); 920 921 /* Wait for CPT queue to become execution-quiescent */ 922 do { 923 inprog = rvu_read64(rvu, blkaddr, 924 CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); 925 if (INPROG_GRB_PARTIAL(inprog)) { 926 i = 0; 927 hard_lp_ctr--; 928 } else { 929 i++; 930 } 931 932 grp_ptr = rvu_read64(rvu, blkaddr, 933 CPT_AF_BAR2_ALIASX(slot, 934 CPT_LF_Q_GRP_PTR)); 935 nq_ptr = (grp_ptr >> 32) & 0x7FFF; 936 dq_ptr = grp_ptr & 0x7FFF; 937 938 } while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr)); 939 940 if (hard_lp_ctr == 0) 941 dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n"); 942 943 i = 0; 944 hard_lp_ctr = 100000; 945 do { 946 inprog = rvu_read64(rvu, blkaddr, 947 CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); 948 949 if ((INPROG_INFLIGHT(inprog) == 0) && 950 (INPROG_GWB(inprog) < 40) && 951 ((INPROG_GRB(inprog) == 0) || 952 (INPROG_GRB((inprog)) == 40))) { 953 i++; 954 } else { 955 i = 0; 956 hard_lp_ctr--; 957 } 958 } while (hard_lp_ctr && (i < 10)); 959 960 if (hard_lp_ctr == 0) 961 dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n"); 962 } 963 964 int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot) 965 { 966 u64 reg; 967 968 if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc)) 969 cpt_rxc_teardown(rvu, blkaddr); 970 971 /* Enable BAR2 ALIAS for this pcifunc. */ 972 reg = BIT_ULL(16) | pcifunc; 973 rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg); 974 975 cpt_lf_disable_iqueue(rvu, blkaddr, slot); 976 977 /* Set group drop to help clear out hardware */ 978 reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); 979 reg |= BIT_ULL(17); 980 rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg); 981 982 rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0); 983 984 return 0; 985 } 986 987 #define CPT_RES_LEN 16 988 #define CPT_SE_IE_EGRP 1ULL 989 990 static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr, 991 int nix_blkaddr) 992 { 993 int cpt_pf_num = get_cpt_pf_num(rvu); 994 struct cpt_inst_lmtst_req *req; 995 dma_addr_t res_daddr; 996 int timeout = 3000; 997 u8 cpt_idx; 998 u64 *inst; 999 u16 *res; 1000 int rc; 1001 1002 res = kzalloc(CPT_RES_LEN, GFP_KERNEL); 1003 if (!res) 1004 return -ENOMEM; 1005 1006 res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN, 1007 DMA_BIDIRECTIONAL); 1008 if (dma_mapping_error(rvu->dev, res_daddr)) { 1009 dev_err(rvu->dev, "DMA mapping failed for CPT result\n"); 1010 rc = -EFAULT; 1011 goto res_free; 1012 } 1013 *res = 0xFFFF; 1014 1015 /* Send mbox message to CPT PF */ 1016 req = (struct cpt_inst_lmtst_req *) 1017 otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up, 1018 cpt_pf_num, sizeof(*req), 1019 sizeof(struct msg_rsp)); 1020 if (!req) { 1021 rc = -ENOMEM; 1022 goto res_daddr_unmap; 1023 } 1024 req->hdr.sig = OTX2_MBOX_REQ_SIG; 1025 req->hdr.id = MBOX_MSG_CPT_INST_LMTST; 1026 1027 inst = req->inst; 1028 /* Prepare CPT_INST_S */ 1029 inst[0] = 0; 1030 inst[1] = res_daddr; 1031 /* AF PF FUNC */ 1032 inst[2] = 0; 1033 /* Set QORD */ 1034 inst[3] = 1; 1035 inst[4] = 0; 1036 inst[5] = 0; 1037 inst[6] = 0; 1038 /* Set EGRP */ 1039 inst[7] = CPT_SE_IE_EGRP << 61; 1040 1041 /* Subtract 1 from the NIX-CPT credit count to preserve 1042 * credit counts. 1043 */ 1044 cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1; 1045 rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 1046 BIT_ULL(22) - 1); 1047 1048 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num); 1049 rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num); 1050 if (rc) 1051 dev_warn(rvu->dev, "notification to pf %d failed\n", 1052 cpt_pf_num); 1053 /* Wait for CPT instruction to be completed */ 1054 do { 1055 mdelay(1); 1056 if (*res == 0xFFFF) 1057 timeout--; 1058 else 1059 break; 1060 } while (timeout); 1061 1062 if (timeout == 0) 1063 dev_warn(rvu->dev, "Poll for result hits hard loop counter\n"); 1064 1065 res_daddr_unmap: 1066 dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL); 1067 res_free: 1068 kfree(res); 1069 1070 return 0; 1071 } 1072 1073 #define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46) 1074 #define CTX_CAM_CPTR GENMASK_ULL(45, 0) 1075 1076 int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc) 1077 { 1078 int nix_blkaddr, blkaddr; 1079 u16 max_ctx_entries, i; 1080 int slot = 0, num_lfs; 1081 u64 reg, cam_data; 1082 int rc; 1083 1084 nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1085 if (nix_blkaddr < 0) 1086 return -EINVAL; 1087 1088 if (is_rvu_otx2(rvu)) 1089 return 0; 1090 1091 blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0; 1092 1093 /* Submit CPT_INST_S to track when all packets have been 1094 * flushed through for the NIX PF FUNC in inline inbound case. 1095 */ 1096 rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr); 1097 if (rc) 1098 return rc; 1099 1100 /* Wait for rxc entries to be flushed out */ 1101 cpt_rxc_teardown(rvu, blkaddr); 1102 1103 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0); 1104 max_ctx_entries = (reg >> 48) & 0xFFF; 1105 1106 mutex_lock(&rvu->rsrc_lock); 1107 1108 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), 1109 blkaddr); 1110 if (num_lfs == 0) { 1111 dev_warn(rvu->dev, "CPT LF is not configured\n"); 1112 goto unlock; 1113 } 1114 1115 /* Enable BAR2 ALIAS for this pcifunc. */ 1116 reg = BIT_ULL(16) | pcifunc; 1117 rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg); 1118 1119 for (i = 0; i < max_ctx_entries; i++) { 1120 cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i)); 1121 1122 if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) && 1123 FIELD_GET(CTX_CAM_CPTR, cam_data)) { 1124 reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data); 1125 rvu_write64(rvu, blkaddr, 1126 CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH), 1127 reg); 1128 } 1129 } 1130 rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0); 1131 1132 unlock: 1133 mutex_unlock(&rvu->rsrc_lock); 1134 1135 return 0; 1136 } 1137