1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 12 #include "rvu_struct.h" 13 #include "rvu_reg.h" 14 #include "rvu.h" 15 #include "npc.h" 16 #include "cgx.h" 17 #include "npc_profile.h" 18 #include "rvu_npc_hash.h" 19 20 #define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */ 21 #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ 22 23 #define NPC_PARSE_RESULT_DMAC_OFFSET 8 24 #define NPC_HW_TSTAMP_OFFSET 8ULL 25 #define NPC_KEX_CHAN_MASK 0xFFFULL 26 #define NPC_KEX_PF_FUNC_MASK 0xFFFFULL 27 28 #define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8)) 29 30 static const char def_pfl_name[] = "default"; 31 32 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 33 int blkaddr, u16 pcifunc); 34 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 35 u16 pcifunc); 36 37 bool is_npc_intf_tx(u8 intf) 38 { 39 return !!(intf & 0x1); 40 } 41 42 bool is_npc_intf_rx(u8 intf) 43 { 44 return !(intf & 0x1); 45 } 46 47 bool is_npc_interface_valid(struct rvu *rvu, u8 intf) 48 { 49 struct rvu_hwinfo *hw = rvu->hw; 50 51 return intf < hw->npc_intfs; 52 } 53 54 int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena) 55 { 56 /* Due to a HW issue in these silicon versions, parse nibble enable 57 * configuration has to be identical for both Rx and Tx interfaces. 58 */ 59 if (is_rvu_96xx_B0(rvu)) 60 return nibble_ena; 61 return 0; 62 } 63 64 static int npc_mcam_verify_pf_func(struct rvu *rvu, 65 struct mcam_entry *entry_data, u8 intf, 66 u16 pcifunc) 67 { 68 u16 pf_func, pf_func_mask; 69 70 if (is_npc_intf_rx(intf)) 71 return 0; 72 73 pf_func_mask = (entry_data->kw_mask[0] >> 32) & 74 NPC_KEX_PF_FUNC_MASK; 75 pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK; 76 77 pf_func = be16_to_cpu((__force __be16)pf_func); 78 if (pf_func_mask != NPC_KEX_PF_FUNC_MASK || 79 ((pf_func & ~RVU_PFVF_FUNC_MASK) != 80 (pcifunc & ~RVU_PFVF_FUNC_MASK))) 81 return -EINVAL; 82 83 return 0; 84 } 85 86 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) 87 { 88 int blkaddr; 89 u64 val = 0; 90 91 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 92 if (blkaddr < 0) 93 return; 94 95 /* Config CPI base for the PKIND */ 96 val = pkind | 1ULL << 62; 97 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val); 98 } 99 100 int rvu_npc_get_pkind(struct rvu *rvu, u16 pf) 101 { 102 struct npc_pkind *pkind = &rvu->hw->pkind; 103 u32 map; 104 int i; 105 106 for (i = 0; i < pkind->rsrc.max; i++) { 107 map = pkind->pfchan_map[i]; 108 if (((map >> 16) & 0x3F) == pf) 109 return i; 110 } 111 return -1; 112 } 113 114 #define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20) 115 116 int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable) 117 { 118 int pkind, blkaddr; 119 u64 val; 120 121 pkind = rvu_npc_get_pkind(rvu, pf); 122 if (pkind < 0) { 123 dev_err(rvu->dev, "%s: pkind not mapped\n", __func__); 124 return -EINVAL; 125 } 126 127 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); 128 if (blkaddr < 0) { 129 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 130 return -EINVAL; 131 } 132 133 val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); 134 val &= ~NPC_AF_ACTION0_PTR_ADVANCE; 135 /* If timestamp is enabled then configure NPC to shift 8 bytes */ 136 if (enable) 137 val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE, 138 NPC_HW_TSTAMP_OFFSET); 139 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); 140 141 return 0; 142 } 143 144 static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc, 145 int nixlf) 146 { 147 struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam); 148 struct rvu *rvu = hw->rvu; 149 int blkaddr = 0, max = 0; 150 struct rvu_block *block; 151 struct rvu_pfvf *pfvf; 152 153 pfvf = rvu_get_pfvf(rvu, pcifunc); 154 /* Given a PF/VF and NIX LF number calculate the unicast mcam 155 * entry index based on the NIX block assigned to the PF/VF. 156 */ 157 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 158 while (blkaddr) { 159 if (pfvf->nix_blkaddr == blkaddr) 160 break; 161 block = &rvu->hw->block[blkaddr]; 162 max += block->lf.max; 163 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 164 } 165 166 return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF; 167 } 168 169 int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, 170 u16 pcifunc, int nixlf, int type) 171 { 172 int pf = rvu_get_pf(pcifunc); 173 int index; 174 175 /* Check if this is for a PF */ 176 if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) { 177 /* Reserved entries exclude PF0 */ 178 pf--; 179 index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF); 180 /* Broadcast address matching entry should be first so 181 * that the packet can be replicated to all VFs. 182 */ 183 if (type == NIXLF_BCAST_ENTRY) 184 return index; 185 else if (type == NIXLF_ALLMULTI_ENTRY) 186 return index + 1; 187 else if (type == NIXLF_PROMISC_ENTRY) 188 return index + 2; 189 } 190 191 return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf); 192 } 193 194 int npc_get_bank(struct npc_mcam *mcam, int index) 195 { 196 int bank = index / mcam->banksize; 197 198 /* 0,1 & 2,3 banks are combined for this keysize */ 199 if (mcam->keysize == NPC_MCAM_KEY_X2) 200 return bank ? 2 : 0; 201 202 return bank; 203 } 204 205 bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, 206 int blkaddr, int index) 207 { 208 int bank = npc_get_bank(mcam, index); 209 u64 cfg; 210 211 index &= (mcam->banksize - 1); 212 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank)); 213 return (cfg & 1); 214 } 215 216 void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 217 int blkaddr, int index, bool enable) 218 { 219 int bank = npc_get_bank(mcam, index); 220 int actbank = bank; 221 222 index &= (mcam->banksize - 1); 223 for (; bank < (actbank + mcam->banks_per_entry); bank++) { 224 rvu_write64(rvu, blkaddr, 225 NPC_AF_MCAMEX_BANKX_CFG(index, bank), 226 enable ? 1 : 0); 227 } 228 } 229 230 static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 231 int blkaddr, int index) 232 { 233 int bank = npc_get_bank(mcam, index); 234 int actbank = bank; 235 236 index &= (mcam->banksize - 1); 237 for (; bank < (actbank + mcam->banks_per_entry); bank++) { 238 rvu_write64(rvu, blkaddr, 239 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0); 240 rvu_write64(rvu, blkaddr, 241 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0); 242 243 rvu_write64(rvu, blkaddr, 244 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0); 245 rvu_write64(rvu, blkaddr, 246 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0); 247 248 rvu_write64(rvu, blkaddr, 249 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0); 250 rvu_write64(rvu, blkaddr, 251 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0); 252 } 253 } 254 255 static void npc_get_keyword(struct mcam_entry *entry, int idx, 256 u64 *cam0, u64 *cam1) 257 { 258 u64 kw_mask = 0x00; 259 260 #define CAM_MASK(n) (BIT_ULL(n) - 1) 261 262 /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and 263 * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1. 264 * 265 * Also, only 48 bits of BANKX_CAMX_W1 are valid. 266 */ 267 switch (idx) { 268 case 0: 269 /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */ 270 *cam1 = entry->kw[0]; 271 kw_mask = entry->kw_mask[0]; 272 break; 273 case 1: 274 /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */ 275 *cam1 = entry->kw[1] & CAM_MASK(48); 276 kw_mask = entry->kw_mask[1] & CAM_MASK(48); 277 break; 278 case 2: 279 /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48> 280 * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0> 281 */ 282 *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16); 283 *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16); 284 kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16); 285 kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16); 286 break; 287 case 3: 288 /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48> 289 * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0> 290 */ 291 *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16); 292 *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16); 293 kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16); 294 kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16); 295 break; 296 case 4: 297 /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32> 298 * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0> 299 */ 300 *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32); 301 *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32); 302 kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32); 303 kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32); 304 break; 305 case 5: 306 /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32> 307 * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0> 308 */ 309 *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32); 310 *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32); 311 kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32); 312 kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32); 313 break; 314 case 6: 315 /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16> 316 * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0> 317 */ 318 *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48); 319 *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48); 320 kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48); 321 kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48); 322 break; 323 case 7: 324 /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */ 325 *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48); 326 kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48); 327 break; 328 } 329 330 *cam1 &= kw_mask; 331 *cam0 = ~*cam1 & kw_mask; 332 } 333 334 static void npc_fill_entryword(struct mcam_entry *entry, int idx, 335 u64 cam0, u64 cam1) 336 { 337 /* Similar to npc_get_keyword, but fills mcam_entry structure from 338 * CAM registers. 339 */ 340 switch (idx) { 341 case 0: 342 entry->kw[0] = cam1; 343 entry->kw_mask[0] = cam1 ^ cam0; 344 break; 345 case 1: 346 entry->kw[1] = cam1; 347 entry->kw_mask[1] = cam1 ^ cam0; 348 break; 349 case 2: 350 entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48; 351 entry->kw[2] = (cam1 >> 16) & CAM_MASK(48); 352 entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48; 353 entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48); 354 break; 355 case 3: 356 entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48; 357 entry->kw[3] = (cam1 >> 16) & CAM_MASK(32); 358 entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48; 359 entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32); 360 break; 361 case 4: 362 entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32; 363 entry->kw[4] = (cam1 >> 32) & CAM_MASK(32); 364 entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32; 365 entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32); 366 break; 367 case 5: 368 entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32; 369 entry->kw[5] = (cam1 >> 32) & CAM_MASK(16); 370 entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32; 371 entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16); 372 break; 373 case 6: 374 entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16; 375 entry->kw[6] = (cam1 >> 48) & CAM_MASK(16); 376 entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16; 377 entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16); 378 break; 379 case 7: 380 entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16; 381 entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16; 382 break; 383 } 384 } 385 386 static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam, 387 int blkaddr, u16 pf_func) 388 { 389 int bank, nixlf, index; 390 391 /* get ucast entry rule entry index */ 392 nix_get_nixlf(rvu, pf_func, &nixlf, NULL); 393 index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf, 394 NIXLF_UCAST_ENTRY); 395 bank = npc_get_bank(mcam, index); 396 index &= (mcam->banksize - 1); 397 398 return rvu_read64(rvu, blkaddr, 399 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 400 } 401 402 static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, 403 int blkaddr, int index, struct mcam_entry *entry, 404 bool *enable) 405 { 406 struct rvu_npc_mcam_rule *rule; 407 u16 owner, target_func; 408 struct rvu_pfvf *pfvf; 409 u64 rx_action; 410 411 owner = mcam->entry2pfvf_map[index]; 412 target_func = (entry->action >> 4) & 0xffff; 413 /* do nothing when target is LBK/PF or owner is not PF */ 414 if (is_pffunc_af(owner) || is_afvf(target_func) || 415 (owner & RVU_PFVF_FUNC_MASK) || 416 !(target_func & RVU_PFVF_FUNC_MASK)) 417 return; 418 419 /* save entry2target_pffunc */ 420 pfvf = rvu_get_pfvf(rvu, target_func); 421 mcam->entry2target_pffunc[index] = target_func; 422 423 /* don't enable rule when nixlf not attached or initialized */ 424 if (!(is_nixlf_attached(rvu, target_func) && 425 test_bit(NIXLF_INITIALIZED, &pfvf->flags))) 426 *enable = false; 427 428 /* fix up not needed for the rules added by user(ntuple filters) */ 429 list_for_each_entry(rule, &mcam->mcam_rules, list) { 430 if (rule->entry == index) 431 return; 432 } 433 434 /* copy VF default entry action to the VF mcam entry */ 435 rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr, 436 target_func); 437 if (rx_action) 438 entry->action = rx_action; 439 } 440 441 static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 442 int blkaddr, int index, u8 intf, 443 struct mcam_entry *entry, bool enable) 444 { 445 int bank = npc_get_bank(mcam, index); 446 int kw = 0, actbank, actindex; 447 u8 tx_intf_mask = ~intf & 0x3; 448 u8 tx_intf = intf; 449 u64 cam0, cam1; 450 451 actbank = bank; /* Save bank id, to set action later on */ 452 actindex = index; 453 index &= (mcam->banksize - 1); 454 455 /* Disable before mcam entry update */ 456 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); 457 458 /* Clear mcam entry to avoid writes being suppressed by NPC */ 459 npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex); 460 461 /* CAM1 takes the comparison value and 462 * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'. 463 * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0 464 * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1 465 * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare. 466 */ 467 for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { 468 /* Interface should be set in all banks */ 469 if (is_npc_intf_tx(intf)) { 470 /* Last bit must be set and rest don't care 471 * for TX interfaces 472 */ 473 tx_intf_mask = 0x1; 474 tx_intf = intf & tx_intf_mask; 475 tx_intf_mask = ~tx_intf & tx_intf_mask; 476 } 477 478 rvu_write64(rvu, blkaddr, 479 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 480 tx_intf); 481 rvu_write64(rvu, blkaddr, 482 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 483 tx_intf_mask); 484 485 /* Set the match key */ 486 npc_get_keyword(entry, kw, &cam0, &cam1); 487 rvu_write64(rvu, blkaddr, 488 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1); 489 rvu_write64(rvu, blkaddr, 490 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0); 491 492 npc_get_keyword(entry, kw + 1, &cam0, &cam1); 493 rvu_write64(rvu, blkaddr, 494 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1); 495 rvu_write64(rvu, blkaddr, 496 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0); 497 } 498 499 /* PF installing VF rule */ 500 if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries) 501 npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable); 502 503 /* Set 'action' */ 504 rvu_write64(rvu, blkaddr, 505 NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action); 506 507 /* Set TAG 'action' */ 508 rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank), 509 entry->vtag_action); 510 511 /* Enable the entry */ 512 if (enable) 513 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true); 514 } 515 516 void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 517 int blkaddr, u16 src, 518 struct mcam_entry *entry, u8 *intf, u8 *ena) 519 { 520 int sbank = npc_get_bank(mcam, src); 521 int bank, kw = 0; 522 u64 cam0, cam1; 523 524 src &= (mcam->banksize - 1); 525 bank = sbank; 526 527 for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) { 528 cam1 = rvu_read64(rvu, blkaddr, 529 NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1)); 530 cam0 = rvu_read64(rvu, blkaddr, 531 NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0)); 532 npc_fill_entryword(entry, kw, cam0, cam1); 533 534 cam1 = rvu_read64(rvu, blkaddr, 535 NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1)); 536 cam0 = rvu_read64(rvu, blkaddr, 537 NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0)); 538 npc_fill_entryword(entry, kw + 1, cam0, cam1); 539 } 540 541 entry->action = rvu_read64(rvu, blkaddr, 542 NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); 543 entry->vtag_action = 544 rvu_read64(rvu, blkaddr, 545 NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); 546 *intf = rvu_read64(rvu, blkaddr, 547 NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3; 548 *ena = rvu_read64(rvu, blkaddr, 549 NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1; 550 } 551 552 static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 553 int blkaddr, u16 src, u16 dest) 554 { 555 int dbank = npc_get_bank(mcam, dest); 556 int sbank = npc_get_bank(mcam, src); 557 u64 cfg, sreg, dreg; 558 int bank, i; 559 560 src &= (mcam->banksize - 1); 561 dest &= (mcam->banksize - 1); 562 563 /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */ 564 for (bank = 0; bank < mcam->banks_per_entry; bank++) { 565 sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0); 566 dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0); 567 for (i = 0; i < 6; i++) { 568 cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8)); 569 rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg); 570 } 571 } 572 573 /* Copy action */ 574 cfg = rvu_read64(rvu, blkaddr, 575 NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); 576 rvu_write64(rvu, blkaddr, 577 NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg); 578 579 /* Copy TAG action */ 580 cfg = rvu_read64(rvu, blkaddr, 581 NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); 582 rvu_write64(rvu, blkaddr, 583 NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg); 584 585 /* Enable or disable */ 586 cfg = rvu_read64(rvu, blkaddr, 587 NPC_AF_MCAMEX_BANKX_CFG(src, sbank)); 588 rvu_write64(rvu, blkaddr, 589 NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg); 590 } 591 592 static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, 593 int blkaddr, int index) 594 { 595 int bank = npc_get_bank(mcam, index); 596 597 index &= (mcam->banksize - 1); 598 return rvu_read64(rvu, blkaddr, 599 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 600 } 601 602 void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, 603 int nixlf, u64 chan, u8 *mac_addr) 604 { 605 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 606 struct npc_install_flow_req req = { 0 }; 607 struct npc_install_flow_rsp rsp = { 0 }; 608 struct npc_mcam *mcam = &rvu->hw->mcam; 609 struct nix_rx_action action = { 0 }; 610 int blkaddr, index; 611 612 /* AF's and SDP VFs work in promiscuous mode */ 613 if (is_afvf(pcifunc) || is_sdp_vf(pcifunc)) 614 return; 615 616 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 617 if (blkaddr < 0) 618 return; 619 620 /* Ucast rule should not be installed if DMAC 621 * extraction is not supported by the profile. 622 */ 623 if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf)) 624 return; 625 626 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 627 nixlf, NIXLF_UCAST_ENTRY); 628 629 /* Don't change the action if entry is already enabled 630 * Otherwise RSS action may get overwritten. 631 */ 632 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { 633 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 634 blkaddr, index); 635 } else { 636 action.op = NIX_RX_ACTIONOP_UCAST; 637 action.pf_func = pcifunc; 638 } 639 640 req.default_rule = 1; 641 ether_addr_copy(req.packet.dmac, mac_addr); 642 eth_broadcast_addr((u8 *)&req.mask.dmac); 643 req.features = BIT_ULL(NPC_DMAC); 644 req.channel = chan; 645 req.chan_mask = 0xFFFU; 646 req.intf = pfvf->nix_rx_intf; 647 req.op = action.op; 648 req.hdr.pcifunc = 0; /* AF is requester */ 649 req.vf = action.pf_func; 650 req.index = action.index; 651 req.match_id = action.match_id; 652 req.flow_key_alg = action.flow_key_alg; 653 654 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 655 } 656 657 void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, 658 int nixlf, u64 chan, u8 chan_cnt) 659 { 660 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 661 struct npc_install_flow_req req = { 0 }; 662 struct npc_install_flow_rsp rsp = { 0 }; 663 struct npc_mcam *mcam = &rvu->hw->mcam; 664 struct rvu_hwinfo *hw = rvu->hw; 665 int blkaddr, ucast_idx, index; 666 struct nix_rx_action action = { 0 }; 667 u64 relaxed_mask; 668 669 if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc)) 670 return; 671 672 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 673 if (blkaddr < 0) 674 return; 675 676 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 677 nixlf, NIXLF_PROMISC_ENTRY); 678 679 if (is_cgx_vf(rvu, pcifunc)) 680 index = npc_get_nixlf_mcam_index(mcam, 681 pcifunc & ~RVU_PFVF_FUNC_MASK, 682 nixlf, NIXLF_PROMISC_ENTRY); 683 684 /* If the corresponding PF's ucast action is RSS, 685 * use the same action for promisc also 686 */ 687 ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, 688 nixlf, NIXLF_UCAST_ENTRY); 689 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) 690 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 691 blkaddr, ucast_idx); 692 693 if (action.op != NIX_RX_ACTIONOP_RSS) { 694 *(u64 *)&action = 0; 695 action.op = NIX_RX_ACTIONOP_UCAST; 696 } 697 698 /* RX_ACTION set to MCAST for CGX PF's */ 699 if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && 700 is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { 701 *(u64 *)&action = 0; 702 action.op = NIX_RX_ACTIONOP_MCAST; 703 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 704 action.index = pfvf->promisc_mce_idx; 705 } 706 707 /* For cn10k the upper two bits of the channel number are 708 * cpt channel number. with masking out these bits in the 709 * mcam entry, same entry used for NIX will allow packets 710 * received from cpt for parsing. 711 */ 712 if (!is_rvu_otx2(rvu)) { 713 req.chan_mask = NIX_CHAN_CPT_X2P_MASK; 714 } else { 715 req.chan_mask = 0xFFFU; 716 } 717 718 if (chan_cnt > 1) { 719 if (!is_power_of_2(chan_cnt)) { 720 dev_err(rvu->dev, 721 "%s: channel count more than 1, must be power of 2\n", __func__); 722 return; 723 } 724 relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1, 725 ilog2(chan_cnt)); 726 req.chan_mask &= relaxed_mask; 727 } 728 729 req.channel = chan; 730 req.intf = pfvf->nix_rx_intf; 731 req.entry = index; 732 req.op = action.op; 733 req.hdr.pcifunc = 0; /* AF is requester */ 734 req.vf = pcifunc; 735 req.index = action.index; 736 req.match_id = action.match_id; 737 req.flow_key_alg = action.flow_key_alg; 738 739 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 740 } 741 742 void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, 743 int nixlf, bool enable) 744 { 745 struct npc_mcam *mcam = &rvu->hw->mcam; 746 int blkaddr, index; 747 748 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 749 if (blkaddr < 0) 750 return; 751 752 /* Get 'pcifunc' of PF device */ 753 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 754 755 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 756 nixlf, NIXLF_PROMISC_ENTRY); 757 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 758 } 759 760 void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, 761 int nixlf, u64 chan) 762 { 763 struct rvu_pfvf *pfvf; 764 struct npc_install_flow_req req = { 0 }; 765 struct npc_install_flow_rsp rsp = { 0 }; 766 struct npc_mcam *mcam = &rvu->hw->mcam; 767 struct rvu_hwinfo *hw = rvu->hw; 768 int blkaddr, index; 769 770 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 771 if (blkaddr < 0) 772 return; 773 774 /* Skip LBK VFs */ 775 if (is_afvf(pcifunc)) 776 return; 777 778 /* If pkt replication is not supported, 779 * then only PF is allowed to add a bcast match entry. 780 */ 781 if (!hw->cap.nix_rx_multicast && is_vf(pcifunc)) 782 return; 783 784 /* Get 'pcifunc' of PF device */ 785 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 786 pfvf = rvu_get_pfvf(rvu, pcifunc); 787 788 /* Bcast rule should not be installed if both DMAC 789 * and LXMB extraction is not supported by the profile. 790 */ 791 if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) && 792 !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf)) 793 return; 794 795 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 796 nixlf, NIXLF_BCAST_ENTRY); 797 798 if (!hw->cap.nix_rx_multicast) { 799 /* Early silicon doesn't support pkt replication, 800 * so install entry with UCAST action, so that PF 801 * receives all broadcast packets. 802 */ 803 req.op = NIX_RX_ACTIONOP_UCAST; 804 } else { 805 req.op = NIX_RX_ACTIONOP_MCAST; 806 req.index = pfvf->bcast_mce_idx; 807 } 808 809 eth_broadcast_addr((u8 *)&req.packet.dmac); 810 eth_broadcast_addr((u8 *)&req.mask.dmac); 811 req.features = BIT_ULL(NPC_DMAC); 812 req.channel = chan; 813 req.chan_mask = 0xFFFU; 814 req.intf = pfvf->nix_rx_intf; 815 req.entry = index; 816 req.hdr.pcifunc = 0; /* AF is requester */ 817 req.vf = pcifunc; 818 819 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 820 } 821 822 void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 823 bool enable) 824 { 825 struct npc_mcam *mcam = &rvu->hw->mcam; 826 int blkaddr, index; 827 828 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 829 if (blkaddr < 0) 830 return; 831 832 /* Get 'pcifunc' of PF device */ 833 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 834 835 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 836 NIXLF_BCAST_ENTRY); 837 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 838 } 839 840 void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 841 u64 chan) 842 { 843 struct npc_install_flow_req req = { 0 }; 844 struct npc_install_flow_rsp rsp = { 0 }; 845 struct npc_mcam *mcam = &rvu->hw->mcam; 846 struct rvu_hwinfo *hw = rvu->hw; 847 int blkaddr, ucast_idx, index; 848 u8 mac_addr[ETH_ALEN] = { 0 }; 849 struct nix_rx_action action = { 0 }; 850 struct rvu_pfvf *pfvf; 851 u16 vf_func; 852 853 /* Only CGX PF/VF can add allmulticast entry */ 854 if (is_afvf(pcifunc) && is_sdp_vf(pcifunc)) 855 return; 856 857 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 858 if (blkaddr < 0) 859 return; 860 861 /* Get 'pcifunc' of PF device */ 862 vf_func = pcifunc & RVU_PFVF_FUNC_MASK; 863 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 864 pfvf = rvu_get_pfvf(rvu, pcifunc); 865 866 /* Mcast rule should not be installed if both DMAC 867 * and LXMB extraction is not supported by the profile. 868 */ 869 if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) && 870 !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf)) 871 return; 872 873 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 874 nixlf, NIXLF_ALLMULTI_ENTRY); 875 876 /* If the corresponding PF's ucast action is RSS, 877 * use the same action for multicast entry also 878 */ 879 ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, 880 nixlf, NIXLF_UCAST_ENTRY); 881 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) 882 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 883 blkaddr, ucast_idx); 884 885 if (action.op != NIX_RX_ACTIONOP_RSS) { 886 *(u64 *)&action = 0; 887 action.op = NIX_RX_ACTIONOP_UCAST; 888 action.pf_func = pcifunc; 889 } 890 891 /* RX_ACTION set to MCAST for CGX PF's */ 892 if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) { 893 *(u64 *)&action = 0; 894 action.op = NIX_RX_ACTIONOP_MCAST; 895 action.index = pfvf->mcast_mce_idx; 896 } 897 898 mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */ 899 ether_addr_copy(req.packet.dmac, mac_addr); 900 ether_addr_copy(req.mask.dmac, mac_addr); 901 req.features = BIT_ULL(NPC_DMAC); 902 903 /* For cn10k the upper two bits of the channel number are 904 * cpt channel number. with masking out these bits in the 905 * mcam entry, same entry used for NIX will allow packets 906 * received from cpt for parsing. 907 */ 908 if (!is_rvu_otx2(rvu)) 909 req.chan_mask = NIX_CHAN_CPT_X2P_MASK; 910 else 911 req.chan_mask = 0xFFFU; 912 913 req.channel = chan; 914 req.intf = pfvf->nix_rx_intf; 915 req.entry = index; 916 req.op = action.op; 917 req.hdr.pcifunc = 0; /* AF is requester */ 918 req.vf = pcifunc | vf_func; 919 req.index = action.index; 920 req.match_id = action.match_id; 921 req.flow_key_alg = action.flow_key_alg; 922 923 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 924 } 925 926 void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 927 bool enable) 928 { 929 struct npc_mcam *mcam = &rvu->hw->mcam; 930 int blkaddr, index; 931 932 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 933 if (blkaddr < 0) 934 return; 935 936 /* Get 'pcifunc' of PF device */ 937 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 938 939 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 940 NIXLF_ALLMULTI_ENTRY); 941 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 942 } 943 944 static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, 945 int blkaddr, u16 pcifunc, u64 rx_action) 946 { 947 int actindex, index, bank, entry; 948 struct rvu_npc_mcam_rule *rule; 949 bool enable, update; 950 951 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 952 return; 953 954 mutex_lock(&mcam->lock); 955 for (index = 0; index < mcam->bmap_entries; index++) { 956 if (mcam->entry2target_pffunc[index] == pcifunc) { 957 update = true; 958 /* update not needed for the rules added via ntuple filters */ 959 list_for_each_entry(rule, &mcam->mcam_rules, list) { 960 if (rule->entry == index) 961 update = false; 962 } 963 if (!update) 964 continue; 965 bank = npc_get_bank(mcam, index); 966 actindex = index; 967 entry = index & (mcam->banksize - 1); 968 969 /* read vf flow entry enable status */ 970 enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, 971 actindex); 972 /* disable before mcam entry update */ 973 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, 974 false); 975 /* update 'action' */ 976 rvu_write64(rvu, blkaddr, 977 NPC_AF_MCAMEX_BANKX_ACTION(entry, bank), 978 rx_action); 979 if (enable) 980 npc_enable_mcam_entry(rvu, mcam, blkaddr, 981 actindex, true); 982 } 983 } 984 mutex_unlock(&mcam->lock); 985 } 986 987 void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, 988 int group, int alg_idx, int mcam_index) 989 { 990 struct npc_mcam *mcam = &rvu->hw->mcam; 991 struct rvu_hwinfo *hw = rvu->hw; 992 struct nix_rx_action action; 993 int blkaddr, index, bank; 994 struct rvu_pfvf *pfvf; 995 996 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 997 if (blkaddr < 0) 998 return; 999 1000 /* Check if this is for reserved default entry */ 1001 if (mcam_index < 0) { 1002 if (group != DEFAULT_RSS_CONTEXT_GROUP) 1003 return; 1004 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 1005 nixlf, NIXLF_UCAST_ENTRY); 1006 } else { 1007 /* TODO: validate this mcam index */ 1008 index = mcam_index; 1009 } 1010 1011 if (index >= mcam->total_entries) 1012 return; 1013 1014 bank = npc_get_bank(mcam, index); 1015 index &= (mcam->banksize - 1); 1016 1017 *(u64 *)&action = rvu_read64(rvu, blkaddr, 1018 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 1019 /* Ignore if no action was set earlier */ 1020 if (!*(u64 *)&action) 1021 return; 1022 1023 action.op = NIX_RX_ACTIONOP_RSS; 1024 action.pf_func = pcifunc; 1025 action.index = group; 1026 action.flow_key_alg = alg_idx; 1027 1028 rvu_write64(rvu, blkaddr, 1029 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); 1030 1031 /* update the VF flow rule action with the VF default entry action */ 1032 if (mcam_index < 0) 1033 npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc, 1034 *(u64 *)&action); 1035 1036 /* update the action change in default rule */ 1037 pfvf = rvu_get_pfvf(rvu, pcifunc); 1038 if (pfvf->def_ucast_rule) 1039 pfvf->def_ucast_rule->rx_action = action; 1040 1041 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 1042 nixlf, NIXLF_PROMISC_ENTRY); 1043 1044 /* If PF's promiscuous entry is enabled, 1045 * Set RSS action for that entry as well 1046 */ 1047 if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) && 1048 is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { 1049 bank = npc_get_bank(mcam, index); 1050 index &= (mcam->banksize - 1); 1051 1052 rvu_write64(rvu, blkaddr, 1053 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), 1054 *(u64 *)&action); 1055 } 1056 } 1057 1058 void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, 1059 int nixlf, int type, bool enable) 1060 { 1061 struct npc_mcam *mcam = &rvu->hw->mcam; 1062 struct rvu_hwinfo *hw = rvu->hw; 1063 struct nix_mce_list *mce_list; 1064 int index, blkaddr, mce_idx; 1065 struct rvu_pfvf *pfvf; 1066 1067 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1068 if (blkaddr < 0) 1069 return; 1070 1071 index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK, 1072 nixlf, type); 1073 1074 /* disable MCAM entry when packet replication is not supported by hw */ 1075 if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) { 1076 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 1077 return; 1078 } 1079 1080 /* return incase mce list is not enabled */ 1081 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 1082 if (hw->cap.nix_rx_multicast && is_vf(pcifunc) && 1083 type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list) 1084 return; 1085 1086 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 1087 1088 nix_update_mce_list(rvu, pcifunc, mce_list, 1089 mce_idx, index, enable); 1090 if (enable) 1091 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 1092 } 1093 1094 static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, 1095 int nixlf, bool enable) 1096 { 1097 struct npc_mcam *mcam = &rvu->hw->mcam; 1098 int index, blkaddr; 1099 1100 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1101 if (blkaddr < 0) 1102 return; 1103 1104 /* Ucast MCAM match entry of this PF/VF */ 1105 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 1106 nixlf, NIXLF_UCAST_ENTRY); 1107 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 1108 1109 /* Nothing to do for VFs, on platforms where pkt replication 1110 * is not supported 1111 */ 1112 if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast) 1113 return; 1114 1115 /* add/delete pf_func to broadcast MCE list */ 1116 npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, 1117 NIXLF_BCAST_ENTRY, enable); 1118 } 1119 1120 void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1121 { 1122 if (nixlf < 0) 1123 return; 1124 1125 npc_enadis_default_entries(rvu, pcifunc, nixlf, false); 1126 1127 /* Delete multicast and promisc MCAM entries */ 1128 npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, 1129 NIXLF_ALLMULTI_ENTRY, false); 1130 npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, 1131 NIXLF_PROMISC_ENTRY, false); 1132 } 1133 1134 bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable) 1135 { 1136 int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1137 struct npc_mcam *mcam = &rvu->hw->mcam; 1138 struct rvu_npc_mcam_rule *rule, *tmp; 1139 1140 mutex_lock(&mcam->lock); 1141 1142 list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { 1143 if (rule->intf != intf) 1144 continue; 1145 1146 if (rule->entry != entry) 1147 continue; 1148 1149 rule->enable = enable; 1150 mutex_unlock(&mcam->lock); 1151 1152 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1153 entry, enable); 1154 1155 return true; 1156 } 1157 1158 mutex_unlock(&mcam->lock); 1159 return false; 1160 } 1161 1162 void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1163 { 1164 if (nixlf < 0) 1165 return; 1166 1167 /* Enables only broadcast match entry. Promisc/Allmulti are enabled 1168 * in set_rx_mode mbox handler. 1169 */ 1170 npc_enadis_default_entries(rvu, pcifunc, nixlf, true); 1171 } 1172 1173 void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1174 { 1175 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1176 struct npc_mcam *mcam = &rvu->hw->mcam; 1177 struct rvu_npc_mcam_rule *rule, *tmp; 1178 int blkaddr; 1179 1180 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1181 if (blkaddr < 0) 1182 return; 1183 1184 mutex_lock(&mcam->lock); 1185 1186 /* Disable MCAM entries directing traffic to this 'pcifunc' */ 1187 list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { 1188 if (is_npc_intf_rx(rule->intf) && 1189 rule->rx_action.pf_func == pcifunc && 1190 rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) { 1191 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1192 rule->entry, false); 1193 rule->enable = false; 1194 /* Indicate that default rule is disabled */ 1195 if (rule->default_rule) { 1196 pfvf->def_ucast_rule = NULL; 1197 list_del(&rule->list); 1198 kfree(rule); 1199 } 1200 } 1201 } 1202 1203 mutex_unlock(&mcam->lock); 1204 1205 npc_mcam_disable_flows(rvu, pcifunc); 1206 1207 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1208 } 1209 1210 void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1211 { 1212 struct npc_mcam *mcam = &rvu->hw->mcam; 1213 struct rvu_npc_mcam_rule *rule, *tmp; 1214 int blkaddr; 1215 1216 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1217 if (blkaddr < 0) 1218 return; 1219 1220 mutex_lock(&mcam->lock); 1221 1222 /* Free all MCAM entries owned by this 'pcifunc' */ 1223 npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); 1224 1225 /* Free all MCAM counters owned by this 'pcifunc' */ 1226 npc_mcam_free_all_counters(rvu, mcam, pcifunc); 1227 1228 /* Delete MCAM entries owned by this 'pcifunc' */ 1229 list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { 1230 if (rule->owner == pcifunc && !rule->default_rule) { 1231 list_del(&rule->list); 1232 kfree(rule); 1233 } 1234 } 1235 1236 mutex_unlock(&mcam->lock); 1237 1238 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1239 } 1240 1241 static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr, 1242 struct npc_mcam_kex *mkex, u8 intf) 1243 { 1244 int lid, lt, ld, fl; 1245 1246 if (is_npc_intf_tx(intf)) 1247 return; 1248 1249 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 1250 mkex->keyx_cfg[NIX_INTF_RX]); 1251 1252 /* Program LDATA */ 1253 for (lid = 0; lid < NPC_MAX_LID; lid++) { 1254 for (lt = 0; lt < NPC_MAX_LT; lt++) { 1255 for (ld = 0; ld < NPC_MAX_LD; ld++) 1256 SET_KEX_LD(intf, lid, lt, ld, 1257 mkex->intf_lid_lt_ld[NIX_INTF_RX] 1258 [lid][lt][ld]); 1259 } 1260 } 1261 /* Program LFLAGS */ 1262 for (ld = 0; ld < NPC_MAX_LD; ld++) { 1263 for (fl = 0; fl < NPC_MAX_LFL; fl++) 1264 SET_KEX_LDFLAGS(intf, ld, fl, 1265 mkex->intf_ld_flags[NIX_INTF_RX] 1266 [ld][fl]); 1267 } 1268 } 1269 1270 static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr, 1271 struct npc_mcam_kex *mkex, u8 intf) 1272 { 1273 int lid, lt, ld, fl; 1274 1275 if (is_npc_intf_rx(intf)) 1276 return; 1277 1278 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 1279 mkex->keyx_cfg[NIX_INTF_TX]); 1280 1281 /* Program LDATA */ 1282 for (lid = 0; lid < NPC_MAX_LID; lid++) { 1283 for (lt = 0; lt < NPC_MAX_LT; lt++) { 1284 for (ld = 0; ld < NPC_MAX_LD; ld++) 1285 SET_KEX_LD(intf, lid, lt, ld, 1286 mkex->intf_lid_lt_ld[NIX_INTF_TX] 1287 [lid][lt][ld]); 1288 } 1289 } 1290 /* Program LFLAGS */ 1291 for (ld = 0; ld < NPC_MAX_LD; ld++) { 1292 for (fl = 0; fl < NPC_MAX_LFL; fl++) 1293 SET_KEX_LDFLAGS(intf, ld, fl, 1294 mkex->intf_ld_flags[NIX_INTF_TX] 1295 [ld][fl]); 1296 } 1297 } 1298 1299 static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr, 1300 struct npc_mcam_kex *mkex) 1301 { 1302 struct rvu_hwinfo *hw = rvu->hw; 1303 u8 intf; 1304 int ld; 1305 1306 for (ld = 0; ld < NPC_MAX_LD; ld++) 1307 rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld), 1308 mkex->kex_ld_flags[ld]); 1309 1310 for (intf = 0; intf < hw->npc_intfs; intf++) { 1311 npc_program_mkex_rx(rvu, blkaddr, mkex, intf); 1312 npc_program_mkex_tx(rvu, blkaddr, mkex, intf); 1313 } 1314 1315 /* Programme mkex hash profile */ 1316 npc_program_mkex_hash(rvu, blkaddr); 1317 } 1318 1319 static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr, 1320 u64 *size) 1321 { 1322 u64 prfl_addr, prfl_sz; 1323 1324 if (!rvu->fwdata) 1325 return -EINVAL; 1326 1327 prfl_addr = rvu->fwdata->mcam_addr; 1328 prfl_sz = rvu->fwdata->mcam_sz; 1329 1330 if (!prfl_addr || !prfl_sz) 1331 return -EINVAL; 1332 1333 *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz); 1334 if (!(*prfl_img_addr)) 1335 return -ENOMEM; 1336 1337 *size = prfl_sz; 1338 1339 return 0; 1340 } 1341 1342 /* strtoull of "mkexprof" with base:36 */ 1343 #define MKEX_END_SIGN 0xdeadbeef 1344 1345 static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr, 1346 const char *mkex_profile) 1347 { 1348 struct device *dev = &rvu->pdev->dev; 1349 struct npc_mcam_kex *mcam_kex; 1350 void __iomem *mkex_prfl_addr = NULL; 1351 u64 prfl_sz; 1352 int ret; 1353 1354 /* If user not selected mkex profile */ 1355 if (rvu->kpu_fwdata_sz || 1356 !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN)) 1357 goto program_mkex; 1358 1359 /* Setting up the mapping for mkex profile image */ 1360 ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz); 1361 if (ret < 0) 1362 goto program_mkex; 1363 1364 mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr; 1365 1366 while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) { 1367 /* Compare with mkex mod_param name string */ 1368 if (mcam_kex->mkex_sign == MKEX_SIGN && 1369 !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) { 1370 /* Due to an errata (35786) in A0/B0 pass silicon, 1371 * parse nibble enable configuration has to be 1372 * identical for both Rx and Tx interfaces. 1373 */ 1374 if (!is_rvu_96xx_B0(rvu) || 1375 mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX]) 1376 rvu->kpu.mkex = mcam_kex; 1377 goto program_mkex; 1378 } 1379 1380 mcam_kex++; 1381 prfl_sz -= sizeof(struct npc_mcam_kex); 1382 } 1383 dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile); 1384 1385 program_mkex: 1386 dev_info(rvu->dev, "Using %s mkex profile\n", rvu->kpu.mkex->name); 1387 /* Program selected mkex profile */ 1388 npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex); 1389 if (mkex_prfl_addr) 1390 iounmap(mkex_prfl_addr); 1391 } 1392 1393 static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, 1394 const struct npc_kpu_profile_action *kpuaction, 1395 int kpu, int entry, bool pkind) 1396 { 1397 struct npc_kpu_action0 action0 = {0}; 1398 struct npc_kpu_action1 action1 = {0}; 1399 u64 reg; 1400 1401 action1.errlev = kpuaction->errlev; 1402 action1.errcode = kpuaction->errcode; 1403 action1.dp0_offset = kpuaction->dp0_offset; 1404 action1.dp1_offset = kpuaction->dp1_offset; 1405 action1.dp2_offset = kpuaction->dp2_offset; 1406 1407 if (pkind) 1408 reg = NPC_AF_PKINDX_ACTION1(entry); 1409 else 1410 reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry); 1411 1412 rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1); 1413 1414 action0.byp_count = kpuaction->bypass_count; 1415 action0.capture_ena = kpuaction->cap_ena; 1416 action0.parse_done = kpuaction->parse_done; 1417 action0.next_state = kpuaction->next_state; 1418 action0.capture_lid = kpuaction->lid; 1419 action0.capture_ltype = kpuaction->ltype; 1420 action0.capture_flags = kpuaction->flags; 1421 action0.ptr_advance = kpuaction->ptr_advance; 1422 action0.var_len_offset = kpuaction->offset; 1423 action0.var_len_mask = kpuaction->mask; 1424 action0.var_len_right = kpuaction->right; 1425 action0.var_len_shift = kpuaction->shift; 1426 1427 if (pkind) 1428 reg = NPC_AF_PKINDX_ACTION0(entry); 1429 else 1430 reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry); 1431 1432 rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0); 1433 } 1434 1435 static void npc_config_kpucam(struct rvu *rvu, int blkaddr, 1436 const struct npc_kpu_profile_cam *kpucam, 1437 int kpu, int entry) 1438 { 1439 struct npc_kpu_cam cam0 = {0}; 1440 struct npc_kpu_cam cam1 = {0}; 1441 1442 cam1.state = kpucam->state & kpucam->state_mask; 1443 cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask; 1444 cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask; 1445 cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask; 1446 1447 cam0.state = ~kpucam->state & kpucam->state_mask; 1448 cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask; 1449 cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask; 1450 cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask; 1451 1452 rvu_write64(rvu, blkaddr, 1453 NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0); 1454 rvu_write64(rvu, blkaddr, 1455 NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1); 1456 } 1457 1458 static inline u64 enable_mask(int count) 1459 { 1460 return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL)); 1461 } 1462 1463 static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, 1464 const struct npc_kpu_profile *profile) 1465 { 1466 int entry, num_entries, max_entries; 1467 u64 entry_mask; 1468 1469 if (profile->cam_entries != profile->action_entries) { 1470 dev_err(rvu->dev, 1471 "KPU%d: CAM and action entries [%d != %d] not equal\n", 1472 kpu, profile->cam_entries, profile->action_entries); 1473 } 1474 1475 max_entries = rvu->hw->npc_kpu_entries; 1476 1477 /* Program CAM match entries for previous KPU extracted data */ 1478 num_entries = min_t(int, profile->cam_entries, max_entries); 1479 for (entry = 0; entry < num_entries; entry++) 1480 npc_config_kpucam(rvu, blkaddr, 1481 &profile->cam[entry], kpu, entry); 1482 1483 /* Program this KPU's actions */ 1484 num_entries = min_t(int, profile->action_entries, max_entries); 1485 for (entry = 0; entry < num_entries; entry++) 1486 npc_config_kpuaction(rvu, blkaddr, &profile->action[entry], 1487 kpu, entry, false); 1488 1489 /* Enable all programmed entries */ 1490 num_entries = min_t(int, profile->action_entries, profile->cam_entries); 1491 entry_mask = enable_mask(num_entries); 1492 /* Disable first KPU_MAX_CST_ENT entries for built-in profile */ 1493 if (!rvu->kpu.custom) 1494 entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0); 1495 rvu_write64(rvu, blkaddr, 1496 NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask); 1497 if (num_entries > 64) { 1498 rvu_write64(rvu, blkaddr, 1499 NPC_AF_KPUX_ENTRY_DISX(kpu, 1), 1500 enable_mask(num_entries - 64)); 1501 } 1502 1503 /* Enable this KPU */ 1504 rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01); 1505 } 1506 1507 static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile) 1508 { 1509 profile->custom = 0; 1510 profile->name = def_pfl_name; 1511 profile->version = NPC_KPU_PROFILE_VER; 1512 profile->ikpu = ikpu_action_entries; 1513 profile->pkinds = ARRAY_SIZE(ikpu_action_entries); 1514 profile->kpu = npc_kpu_profiles; 1515 profile->kpus = ARRAY_SIZE(npc_kpu_profiles); 1516 profile->lt_def = &npc_lt_defaults; 1517 profile->mkex = &npc_mkex_default; 1518 profile->mkex_hash = &npc_mkex_hash_default; 1519 1520 return 0; 1521 } 1522 1523 static int npc_apply_custom_kpu(struct rvu *rvu, 1524 struct npc_kpu_profile_adapter *profile) 1525 { 1526 size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0; 1527 struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata; 1528 struct npc_kpu_profile_action *action; 1529 struct npc_kpu_profile_cam *cam; 1530 struct npc_kpu_fwdata *fw_kpu; 1531 int entries; 1532 u16 kpu, entry; 1533 1534 if (rvu->kpu_fwdata_sz < hdr_sz) { 1535 dev_warn(rvu->dev, "Invalid KPU profile size\n"); 1536 return -EINVAL; 1537 } 1538 if (le64_to_cpu(fw->signature) != KPU_SIGN) { 1539 dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n", 1540 fw->signature); 1541 return -EINVAL; 1542 } 1543 /* Verify if the using known profile structure */ 1544 if (NPC_KPU_VER_MAJ(profile->version) > 1545 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) { 1546 dev_warn(rvu->dev, "Not supported Major version: %d > %d\n", 1547 NPC_KPU_VER_MAJ(profile->version), 1548 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)); 1549 return -EINVAL; 1550 } 1551 /* Verify if profile is aligned with the required kernel changes */ 1552 if (NPC_KPU_VER_MIN(profile->version) < 1553 NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) { 1554 dev_warn(rvu->dev, 1555 "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n", 1556 NPC_KPU_VER_MAJ(profile->version), 1557 NPC_KPU_VER_MIN(profile->version), 1558 NPC_KPU_VER_PATCH(profile->version), 1559 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER), 1560 NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER), 1561 NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER)); 1562 return -EINVAL; 1563 } 1564 /* Verify if profile fits the HW */ 1565 if (fw->kpus > profile->kpus) { 1566 dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus, 1567 profile->kpus); 1568 return -EINVAL; 1569 } 1570 1571 profile->custom = 1; 1572 profile->name = fw->name; 1573 profile->version = le64_to_cpu(fw->version); 1574 profile->mkex = &fw->mkex; 1575 profile->lt_def = &fw->lt_def; 1576 1577 for (kpu = 0; kpu < fw->kpus; kpu++) { 1578 fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset); 1579 if (fw_kpu->entries > KPU_MAX_CST_ENT) 1580 dev_warn(rvu->dev, 1581 "Too many custom entries on KPU%d: %d > %d\n", 1582 kpu, fw_kpu->entries, KPU_MAX_CST_ENT); 1583 entries = min(fw_kpu->entries, KPU_MAX_CST_ENT); 1584 cam = (struct npc_kpu_profile_cam *)fw_kpu->data; 1585 offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam); 1586 action = (struct npc_kpu_profile_action *)(fw->data + offset); 1587 offset += fw_kpu->entries * sizeof(*action); 1588 if (rvu->kpu_fwdata_sz < hdr_sz + offset) { 1589 dev_warn(rvu->dev, 1590 "Profile size mismatch on KPU%i parsing.\n", 1591 kpu + 1); 1592 return -EINVAL; 1593 } 1594 for (entry = 0; entry < entries; entry++) { 1595 profile->kpu[kpu].cam[entry] = cam[entry]; 1596 profile->kpu[kpu].action[entry] = action[entry]; 1597 } 1598 } 1599 1600 return 0; 1601 } 1602 1603 static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr, 1604 u64 prfl_sz, const char *kpu_profile) 1605 { 1606 struct npc_kpu_profile_fwdata *kpu_data = NULL; 1607 int rc = -EINVAL; 1608 1609 kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr; 1610 if (le64_to_cpu(kpu_data->signature) == KPU_SIGN && 1611 !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) { 1612 dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n", 1613 kpu_profile); 1614 rvu->kpu_fwdata = kpu_data; 1615 rvu->kpu_fwdata_sz = prfl_sz; 1616 rvu->kpu_prfl_addr = prfl_addr; 1617 rc = 0; 1618 } 1619 1620 return rc; 1621 } 1622 1623 static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz, 1624 const char *kpu_profile) 1625 { 1626 struct npc_coalesced_kpu_prfl *img_data = NULL; 1627 int i = 0, rc = -EINVAL; 1628 void __iomem *kpu_prfl_addr; 1629 u16 offset; 1630 1631 img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr; 1632 if (le64_to_cpu(img_data->signature) == KPU_SIGN && 1633 !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) { 1634 /* Loaded profile is a single KPU profile. */ 1635 rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr, 1636 prfl_sz, kpu_profile); 1637 goto done; 1638 } 1639 1640 /* Loaded profile is coalesced image, offset of first KPU profile.*/ 1641 offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) + 1642 (img_data->num_prfl * sizeof(uint16_t)); 1643 /* Check if mapped image is coalesced image. */ 1644 while (i < img_data->num_prfl) { 1645 /* Profile image offsets are rounded up to next 8 multiple.*/ 1646 offset = ALIGN_8B_CEIL(offset); 1647 kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr + 1648 offset); 1649 rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr, 1650 img_data->prfl_sz[i], kpu_profile); 1651 if (!rc) 1652 break; 1653 /* Calculating offset of profile image based on profile size.*/ 1654 offset += img_data->prfl_sz[i]; 1655 i++; 1656 } 1657 done: 1658 return rc; 1659 } 1660 1661 static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile) 1662 { 1663 int ret = -EINVAL; 1664 u64 prfl_sz; 1665 1666 /* Setting up the mapping for NPC profile image */ 1667 ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz); 1668 if (ret < 0) 1669 goto done; 1670 1671 /* Detect if profile is coalesced or single KPU profile and load */ 1672 ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile); 1673 if (ret == 0) 1674 goto done; 1675 1676 /* Cleaning up if KPU profile image from fwdata is not valid. */ 1677 if (rvu->kpu_prfl_addr) { 1678 iounmap(rvu->kpu_prfl_addr); 1679 rvu->kpu_prfl_addr = NULL; 1680 rvu->kpu_fwdata_sz = 0; 1681 rvu->kpu_fwdata = NULL; 1682 } 1683 1684 done: 1685 return ret; 1686 } 1687 1688 static void npc_load_kpu_profile(struct rvu *rvu) 1689 { 1690 struct npc_kpu_profile_adapter *profile = &rvu->kpu; 1691 const char *kpu_profile = rvu->kpu_pfl_name; 1692 const struct firmware *fw = NULL; 1693 bool retry_fwdb = false; 1694 1695 /* If user not specified profile customization */ 1696 if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN)) 1697 goto revert_to_default; 1698 /* First prepare default KPU, then we'll customize top entries. */ 1699 npc_prepare_default_kpu(profile); 1700 1701 /* Order of preceedence for load loading NPC profile (high to low) 1702 * Firmware binary in filesystem. 1703 * Firmware database method. 1704 * Default KPU profile. 1705 */ 1706 if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) { 1707 dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n", 1708 kpu_profile); 1709 rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL); 1710 if (rvu->kpu_fwdata) { 1711 memcpy(rvu->kpu_fwdata, fw->data, fw->size); 1712 rvu->kpu_fwdata_sz = fw->size; 1713 } 1714 release_firmware(fw); 1715 retry_fwdb = true; 1716 goto program_kpu; 1717 } 1718 1719 load_image_fwdb: 1720 /* Loading the KPU profile using firmware database */ 1721 if (npc_load_kpu_profile_fwdb(rvu, kpu_profile)) 1722 goto revert_to_default; 1723 1724 program_kpu: 1725 /* Apply profile customization if firmware was loaded. */ 1726 if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) { 1727 /* If image from firmware filesystem fails to load or invalid 1728 * retry with firmware database method. 1729 */ 1730 if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) { 1731 /* Loading image from firmware database failed. */ 1732 if (rvu->kpu_prfl_addr) { 1733 iounmap(rvu->kpu_prfl_addr); 1734 rvu->kpu_prfl_addr = NULL; 1735 } else { 1736 kfree(rvu->kpu_fwdata); 1737 } 1738 rvu->kpu_fwdata = NULL; 1739 rvu->kpu_fwdata_sz = 0; 1740 if (retry_fwdb) { 1741 retry_fwdb = false; 1742 goto load_image_fwdb; 1743 } 1744 } 1745 1746 dev_warn(rvu->dev, 1747 "Can't load KPU profile %s. Using default.\n", 1748 kpu_profile); 1749 kfree(rvu->kpu_fwdata); 1750 rvu->kpu_fwdata = NULL; 1751 goto revert_to_default; 1752 } 1753 1754 dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n", 1755 profile->name, NPC_KPU_VER_MAJ(profile->version), 1756 NPC_KPU_VER_MIN(profile->version), 1757 NPC_KPU_VER_PATCH(profile->version)); 1758 1759 return; 1760 1761 revert_to_default: 1762 npc_prepare_default_kpu(profile); 1763 } 1764 1765 static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) 1766 { 1767 struct rvu_hwinfo *hw = rvu->hw; 1768 int num_pkinds, num_kpus, idx; 1769 1770 /* Disable all KPUs and their entries */ 1771 for (idx = 0; idx < hw->npc_kpus; idx++) { 1772 rvu_write64(rvu, blkaddr, 1773 NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL); 1774 rvu_write64(rvu, blkaddr, 1775 NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL); 1776 rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00); 1777 } 1778 1779 /* Load and customize KPU profile. */ 1780 npc_load_kpu_profile(rvu); 1781 1782 /* First program IKPU profile i.e PKIND configs. 1783 * Check HW max count to avoid configuring junk or 1784 * writing to unsupported CSR addresses. 1785 */ 1786 num_pkinds = rvu->kpu.pkinds; 1787 num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds); 1788 1789 for (idx = 0; idx < num_pkinds; idx++) 1790 npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true); 1791 1792 /* Program KPU CAM and Action profiles */ 1793 num_kpus = rvu->kpu.kpus; 1794 num_kpus = min_t(int, hw->npc_kpus, num_kpus); 1795 1796 for (idx = 0; idx < num_kpus; idx++) 1797 npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]); 1798 } 1799 1800 static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) 1801 { 1802 int nixlf_count = rvu_get_nixlf_count(rvu); 1803 struct npc_mcam *mcam = &rvu->hw->mcam; 1804 int rsvd, err; 1805 u16 index; 1806 int cntr; 1807 u64 cfg; 1808 1809 /* Actual number of MCAM entries vary by entry size */ 1810 cfg = (rvu_read64(rvu, blkaddr, 1811 NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07; 1812 mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize; 1813 mcam->keysize = cfg; 1814 1815 /* Number of banks combined per MCAM entry */ 1816 if (cfg == NPC_MCAM_KEY_X4) 1817 mcam->banks_per_entry = 4; 1818 else if (cfg == NPC_MCAM_KEY_X2) 1819 mcam->banks_per_entry = 2; 1820 else 1821 mcam->banks_per_entry = 1; 1822 1823 /* Reserve one MCAM entry for each of the NIX LF to 1824 * guarantee space to install default matching DMAC rule. 1825 * Also reserve 2 MCAM entries for each PF for default 1826 * channel based matching or 'bcast & promisc' matching to 1827 * support BCAST and PROMISC modes of operation for PFs. 1828 * PF0 is excluded. 1829 */ 1830 rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) + 1831 ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF); 1832 if (mcam->total_entries <= rsvd) { 1833 dev_warn(rvu->dev, 1834 "Insufficient NPC MCAM size %d for pkt I/O, exiting\n", 1835 mcam->total_entries); 1836 return -ENOMEM; 1837 } 1838 1839 mcam->bmap_entries = mcam->total_entries - rsvd; 1840 mcam->nixlf_offset = mcam->bmap_entries; 1841 mcam->pf_offset = mcam->nixlf_offset + nixlf_count; 1842 1843 /* Allocate bitmaps for managing MCAM entries */ 1844 mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries), 1845 sizeof(long), GFP_KERNEL); 1846 if (!mcam->bmap) 1847 return -ENOMEM; 1848 1849 mcam->bmap_reverse = devm_kcalloc(rvu->dev, 1850 BITS_TO_LONGS(mcam->bmap_entries), 1851 sizeof(long), GFP_KERNEL); 1852 if (!mcam->bmap_reverse) 1853 return -ENOMEM; 1854 1855 mcam->bmap_fcnt = mcam->bmap_entries; 1856 1857 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 1858 mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, 1859 sizeof(u16), GFP_KERNEL); 1860 if (!mcam->entry2pfvf_map) 1861 return -ENOMEM; 1862 1863 /* Reserve 1/8th of MCAM entries at the bottom for low priority 1864 * allocations and another 1/8th at the top for high priority 1865 * allocations. 1866 */ 1867 mcam->lprio_count = mcam->bmap_entries / 8; 1868 if (mcam->lprio_count > BITS_PER_LONG) 1869 mcam->lprio_count = round_down(mcam->lprio_count, 1870 BITS_PER_LONG); 1871 mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count; 1872 mcam->hprio_count = mcam->lprio_count; 1873 mcam->hprio_end = mcam->hprio_count; 1874 1875 /* Allocate bitmap for managing MCAM counters and memory 1876 * for saving counter to RVU PFFUNC allocation mapping. 1877 */ 1878 err = rvu_alloc_bitmap(&mcam->counters); 1879 if (err) 1880 return err; 1881 1882 mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max, 1883 sizeof(u16), GFP_KERNEL); 1884 if (!mcam->cntr2pfvf_map) 1885 goto free_mem; 1886 1887 /* Alloc memory for MCAM entry to counter mapping and for tracking 1888 * counter's reference count. 1889 */ 1890 mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, 1891 sizeof(u16), GFP_KERNEL); 1892 if (!mcam->entry2cntr_map) 1893 goto free_mem; 1894 1895 mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max, 1896 sizeof(u16), GFP_KERNEL); 1897 if (!mcam->cntr_refcnt) 1898 goto free_mem; 1899 1900 /* Alloc memory for saving target device of mcam rule */ 1901 mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries, 1902 sizeof(u16), GFP_KERNEL); 1903 if (!mcam->entry2target_pffunc) 1904 goto free_mem; 1905 1906 for (index = 0; index < mcam->bmap_entries; index++) { 1907 mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; 1908 mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; 1909 } 1910 1911 for (cntr = 0; cntr < mcam->counters.max; cntr++) 1912 mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; 1913 1914 mutex_init(&mcam->lock); 1915 1916 return 0; 1917 1918 free_mem: 1919 kfree(mcam->counters.bmap); 1920 return -ENOMEM; 1921 } 1922 1923 static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr) 1924 { 1925 struct npc_pkind *pkind = &rvu->hw->pkind; 1926 struct npc_mcam *mcam = &rvu->hw->mcam; 1927 struct rvu_hwinfo *hw = rvu->hw; 1928 u64 npc_const, npc_const1; 1929 u64 npc_const2 = 0; 1930 1931 npc_const = rvu_read64(rvu, blkaddr, NPC_AF_CONST); 1932 npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1); 1933 if (npc_const1 & BIT_ULL(63)) 1934 npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2); 1935 1936 pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT; 1937 hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL; 1938 hw->npc_kpu_entries = npc_const1 & 0xFFFULL; 1939 hw->npc_kpus = (npc_const >> 8) & 0x1FULL; 1940 hw->npc_intfs = npc_const & 0xFULL; 1941 hw->npc_counters = (npc_const >> 48) & 0xFFFFULL; 1942 1943 mcam->banks = (npc_const >> 44) & 0xFULL; 1944 mcam->banksize = (npc_const >> 28) & 0xFFFFULL; 1945 hw->npc_stat_ena = BIT_ULL(9); 1946 /* Extended set */ 1947 if (npc_const2) { 1948 hw->npc_ext_set = true; 1949 /* 96xx supports only match_stats and npc_counters 1950 * reflected in NPC_AF_CONST reg. 1951 * STAT_SEL and ENA are at [0:8] and 9 bit positions. 1952 * 98xx has both match_stat and ext and npc_counter 1953 * reflected in NPC_AF_CONST2 1954 * STAT_SEL_EXT added at [12:14] bit position. 1955 * cn10k supports only ext and hence npc_counters in 1956 * NPC_AF_CONST is 0 and npc_counters reflected in NPC_AF_CONST2. 1957 * STAT_SEL bitpos incremented from [0:8] to [0:11] and ENA bit moved to 63 1958 */ 1959 if (!hw->npc_counters) 1960 hw->npc_stat_ena = BIT_ULL(63); 1961 hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL; 1962 mcam->banksize = npc_const2 & 0xFFFFULL; 1963 } 1964 1965 mcam->counters.max = hw->npc_counters; 1966 } 1967 1968 static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr) 1969 { 1970 struct npc_mcam_kex *mkex = rvu->kpu.mkex; 1971 struct npc_mcam *mcam = &rvu->hw->mcam; 1972 struct rvu_hwinfo *hw = rvu->hw; 1973 u64 nibble_ena, rx_kex, tx_kex; 1974 u8 intf; 1975 1976 /* Reserve last counter for MCAM RX miss action which is set to 1977 * drop packet. This way we will know how many pkts didn't match 1978 * any MCAM entry. 1979 */ 1980 mcam->counters.max--; 1981 mcam->rx_miss_act_cntr = mcam->counters.max; 1982 1983 rx_kex = mkex->keyx_cfg[NIX_INTF_RX]; 1984 tx_kex = mkex->keyx_cfg[NIX_INTF_TX]; 1985 nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex); 1986 1987 nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena); 1988 if (nibble_ena) { 1989 tx_kex &= ~NPC_PARSE_NIBBLE; 1990 tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena); 1991 mkex->keyx_cfg[NIX_INTF_TX] = tx_kex; 1992 } 1993 1994 /* Configure RX interfaces */ 1995 for (intf = 0; intf < hw->npc_intfs; intf++) { 1996 if (is_npc_intf_tx(intf)) 1997 continue; 1998 1999 /* Set RX MCAM search key size. LA..LE (ltype only) + Channel */ 2000 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 2001 rx_kex); 2002 2003 /* If MCAM lookup doesn't result in a match, drop the received 2004 * packet. And map this action to a counter to count dropped 2005 * packets. 2006 */ 2007 rvu_write64(rvu, blkaddr, 2008 NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP); 2009 2010 /* NPC_AF_INTFX_MISS_STAT_ACT[14:12] - counter[11:9] 2011 * NPC_AF_INTFX_MISS_STAT_ACT[8:0] - counter[8:0] 2012 */ 2013 rvu_write64(rvu, blkaddr, 2014 NPC_AF_INTFX_MISS_STAT_ACT(intf), 2015 ((mcam->rx_miss_act_cntr >> 9) << 12) | 2016 hw->npc_stat_ena | mcam->rx_miss_act_cntr); 2017 } 2018 2019 /* Configure TX interfaces */ 2020 for (intf = 0; intf < hw->npc_intfs; intf++) { 2021 if (is_npc_intf_rx(intf)) 2022 continue; 2023 2024 /* Extract Ltypes LID_LA to LID_LE */ 2025 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 2026 tx_kex); 2027 2028 /* Set TX miss action to UCAST_DEFAULT i.e 2029 * transmit the packet on NIX LF SQ's default channel. 2030 */ 2031 rvu_write64(rvu, blkaddr, 2032 NPC_AF_INTFX_MISS_ACT(intf), 2033 NIX_TX_ACTIONOP_UCAST_DEFAULT); 2034 } 2035 } 2036 2037 int rvu_npc_init(struct rvu *rvu) 2038 { 2039 struct npc_kpu_profile_adapter *kpu = &rvu->kpu; 2040 struct npc_pkind *pkind = &rvu->hw->pkind; 2041 struct npc_mcam *mcam = &rvu->hw->mcam; 2042 int blkaddr, entry, bank, err; 2043 2044 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2045 if (blkaddr < 0) { 2046 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 2047 return -ENODEV; 2048 } 2049 2050 rvu_npc_hw_init(rvu, blkaddr); 2051 2052 /* First disable all MCAM entries, to stop traffic towards NIXLFs */ 2053 for (bank = 0; bank < mcam->banks; bank++) { 2054 for (entry = 0; entry < mcam->banksize; entry++) 2055 rvu_write64(rvu, blkaddr, 2056 NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0); 2057 } 2058 2059 err = rvu_alloc_bitmap(&pkind->rsrc); 2060 if (err) 2061 return err; 2062 /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0', 2063 * no need to configure PKIND for all LBKs separately. 2064 */ 2065 rvu_alloc_rsrc(&pkind->rsrc); 2066 2067 /* Allocate mem for pkind to PF and channel mapping info */ 2068 pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max, 2069 sizeof(u32), GFP_KERNEL); 2070 if (!pkind->pfchan_map) 2071 return -ENOMEM; 2072 2073 /* Configure KPU profile */ 2074 npc_parser_profile_init(rvu, blkaddr); 2075 2076 /* Config Outer L2, IPv4's NPC layer info */ 2077 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2, 2078 (kpu->lt_def->pck_ol2.lid << 8) | (kpu->lt_def->pck_ol2.ltype_match << 4) | 2079 kpu->lt_def->pck_ol2.ltype_mask); 2080 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4, 2081 (kpu->lt_def->pck_oip4.lid << 8) | (kpu->lt_def->pck_oip4.ltype_match << 4) | 2082 kpu->lt_def->pck_oip4.ltype_mask); 2083 2084 /* Config Inner IPV4 NPC layer info */ 2085 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4, 2086 (kpu->lt_def->pck_iip4.lid << 8) | (kpu->lt_def->pck_iip4.ltype_match << 4) | 2087 kpu->lt_def->pck_iip4.ltype_mask); 2088 2089 /* Enable below for Rx pkts. 2090 * - Outer IPv4 header checksum validation. 2091 * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2B]. 2092 * - Detect outer L2 multicast address and set NPC_RESULT_S[L2M]. 2093 * - Inner IPv4 header checksum validation. 2094 * - Set non zero checksum error code value 2095 */ 2096 rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, 2097 rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | 2098 ((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) | 2099 BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1)); 2100 2101 rvu_npc_setup_interfaces(rvu, blkaddr); 2102 2103 npc_config_secret_key(rvu, blkaddr); 2104 /* Configure MKEX profile */ 2105 npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name); 2106 2107 err = npc_mcam_rsrcs_init(rvu, blkaddr); 2108 if (err) 2109 return err; 2110 2111 err = npc_flow_steering_init(rvu, blkaddr); 2112 if (err) { 2113 dev_err(rvu->dev, 2114 "Incorrect mkex profile loaded using default mkex\n"); 2115 npc_load_mkex_profile(rvu, blkaddr, def_pfl_name); 2116 } 2117 2118 return 0; 2119 } 2120 2121 void rvu_npc_freemem(struct rvu *rvu) 2122 { 2123 struct npc_pkind *pkind = &rvu->hw->pkind; 2124 struct npc_mcam *mcam = &rvu->hw->mcam; 2125 2126 kfree(pkind->rsrc.bmap); 2127 kfree(mcam->counters.bmap); 2128 if (rvu->kpu_prfl_addr) 2129 iounmap(rvu->kpu_prfl_addr); 2130 else 2131 kfree(rvu->kpu_fwdata); 2132 mutex_destroy(&mcam->lock); 2133 } 2134 2135 void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, 2136 int blkaddr, int *alloc_cnt, 2137 int *enable_cnt) 2138 { 2139 struct npc_mcam *mcam = &rvu->hw->mcam; 2140 int entry; 2141 2142 *alloc_cnt = 0; 2143 *enable_cnt = 0; 2144 2145 for (entry = 0; entry < mcam->bmap_entries; entry++) { 2146 if (mcam->entry2pfvf_map[entry] == pcifunc) { 2147 (*alloc_cnt)++; 2148 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry)) 2149 (*enable_cnt)++; 2150 } 2151 } 2152 } 2153 2154 void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, 2155 int blkaddr, int *alloc_cnt, 2156 int *enable_cnt) 2157 { 2158 struct npc_mcam *mcam = &rvu->hw->mcam; 2159 int cntr; 2160 2161 *alloc_cnt = 0; 2162 *enable_cnt = 0; 2163 2164 for (cntr = 0; cntr < mcam->counters.max; cntr++) { 2165 if (mcam->cntr2pfvf_map[cntr] == pcifunc) { 2166 (*alloc_cnt)++; 2167 if (mcam->cntr_refcnt[cntr]) 2168 (*enable_cnt)++; 2169 } 2170 } 2171 } 2172 2173 static int npc_mcam_verify_entry(struct npc_mcam *mcam, 2174 u16 pcifunc, int entry) 2175 { 2176 /* verify AF installed entries */ 2177 if (is_pffunc_af(pcifunc)) 2178 return 0; 2179 /* Verify if entry is valid and if it is indeed 2180 * allocated to the requesting PFFUNC. 2181 */ 2182 if (entry >= mcam->bmap_entries) 2183 return NPC_MCAM_INVALID_REQ; 2184 2185 if (pcifunc != mcam->entry2pfvf_map[entry]) 2186 return NPC_MCAM_PERM_DENIED; 2187 2188 return 0; 2189 } 2190 2191 static int npc_mcam_verify_counter(struct npc_mcam *mcam, 2192 u16 pcifunc, int cntr) 2193 { 2194 /* Verify if counter is valid and if it is indeed 2195 * allocated to the requesting PFFUNC. 2196 */ 2197 if (cntr >= mcam->counters.max) 2198 return NPC_MCAM_INVALID_REQ; 2199 2200 if (pcifunc != mcam->cntr2pfvf_map[cntr]) 2201 return NPC_MCAM_PERM_DENIED; 2202 2203 return 0; 2204 } 2205 2206 static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, 2207 int blkaddr, u16 entry, u16 cntr) 2208 { 2209 u16 index = entry & (mcam->banksize - 1); 2210 u32 bank = npc_get_bank(mcam, entry); 2211 struct rvu_hwinfo *hw = rvu->hw; 2212 2213 /* Set mapping and increment counter's refcnt */ 2214 mcam->entry2cntr_map[entry] = cntr; 2215 mcam->cntr_refcnt[cntr]++; 2216 /* Enable stats */ 2217 rvu_write64(rvu, blkaddr, 2218 NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 2219 ((cntr >> 9) << 12) | hw->npc_stat_ena | cntr); 2220 } 2221 2222 static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, 2223 struct npc_mcam *mcam, 2224 int blkaddr, u16 entry, u16 cntr) 2225 { 2226 u16 index = entry & (mcam->banksize - 1); 2227 u32 bank = npc_get_bank(mcam, entry); 2228 2229 /* Remove mapping and reduce counter's refcnt */ 2230 mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP; 2231 mcam->cntr_refcnt[cntr]--; 2232 /* Disable stats */ 2233 rvu_write64(rvu, blkaddr, 2234 NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00); 2235 } 2236 2237 /* Sets MCAM entry in bitmap as used. Update 2238 * reverse bitmap too. Should be called with 2239 * 'mcam->lock' held. 2240 */ 2241 static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index) 2242 { 2243 u16 entry, rentry; 2244 2245 entry = index; 2246 rentry = mcam->bmap_entries - index - 1; 2247 2248 __set_bit(entry, mcam->bmap); 2249 __set_bit(rentry, mcam->bmap_reverse); 2250 mcam->bmap_fcnt--; 2251 } 2252 2253 /* Sets MCAM entry in bitmap as free. Update 2254 * reverse bitmap too. Should be called with 2255 * 'mcam->lock' held. 2256 */ 2257 static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index) 2258 { 2259 u16 entry, rentry; 2260 2261 entry = index; 2262 rentry = mcam->bmap_entries - index - 1; 2263 2264 __clear_bit(entry, mcam->bmap); 2265 __clear_bit(rentry, mcam->bmap_reverse); 2266 mcam->bmap_fcnt++; 2267 } 2268 2269 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 2270 int blkaddr, u16 pcifunc) 2271 { 2272 u16 index, cntr; 2273 2274 /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */ 2275 for (index = 0; index < mcam->bmap_entries; index++) { 2276 if (mcam->entry2pfvf_map[index] == pcifunc) { 2277 mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; 2278 /* Free the entry in bitmap */ 2279 npc_mcam_clear_bit(mcam, index); 2280 /* Disable the entry */ 2281 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); 2282 2283 /* Update entry2counter mapping */ 2284 cntr = mcam->entry2cntr_map[index]; 2285 if (cntr != NPC_MCAM_INVALID_MAP) 2286 npc_unmap_mcam_entry_and_cntr(rvu, mcam, 2287 blkaddr, index, 2288 cntr); 2289 mcam->entry2target_pffunc[index] = 0x0; 2290 } 2291 } 2292 } 2293 2294 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 2295 u16 pcifunc) 2296 { 2297 u16 cntr; 2298 2299 /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */ 2300 for (cntr = 0; cntr < mcam->counters.max; cntr++) { 2301 if (mcam->cntr2pfvf_map[cntr] == pcifunc) { 2302 mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; 2303 mcam->cntr_refcnt[cntr] = 0; 2304 rvu_free_rsrc(&mcam->counters, cntr); 2305 /* This API is expected to be called after freeing 2306 * MCAM entries, which inturn will remove 2307 * 'entry to counter' mapping. 2308 * No need to do it again. 2309 */ 2310 } 2311 } 2312 } 2313 2314 /* Find area of contiguous free entries of size 'nr'. 2315 * If not found return max contiguous free entries available. 2316 */ 2317 static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, 2318 u16 nr, u16 *max_area) 2319 { 2320 u16 max_area_start = 0; 2321 u16 index, next, end; 2322 2323 *max_area = 0; 2324 2325 again: 2326 index = find_next_zero_bit(map, size, start); 2327 if (index >= size) 2328 return max_area_start; 2329 2330 end = ((index + nr) >= size) ? size : index + nr; 2331 next = find_next_bit(map, end, index); 2332 if (*max_area < (next - index)) { 2333 *max_area = next - index; 2334 max_area_start = index; 2335 } 2336 2337 if (next < end) { 2338 start = next + 1; 2339 goto again; 2340 } 2341 2342 return max_area_start; 2343 } 2344 2345 /* Find number of free MCAM entries available 2346 * within range i.e in between 'start' and 'end'. 2347 */ 2348 static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) 2349 { 2350 u16 index, next; 2351 u16 fcnt = 0; 2352 2353 again: 2354 if (start >= end) 2355 return fcnt; 2356 2357 index = find_next_zero_bit(map, end, start); 2358 if (index >= end) 2359 return fcnt; 2360 2361 next = find_next_bit(map, end, index); 2362 if (next <= end) { 2363 fcnt += next - index; 2364 start = next + 1; 2365 goto again; 2366 } 2367 2368 fcnt += end - index; 2369 return fcnt; 2370 } 2371 2372 static void 2373 npc_get_mcam_search_range_priority(struct npc_mcam *mcam, 2374 struct npc_mcam_alloc_entry_req *req, 2375 u16 *start, u16 *end, bool *reverse) 2376 { 2377 u16 fcnt; 2378 2379 if (req->priority == NPC_MCAM_HIGHER_PRIO) 2380 goto hprio; 2381 2382 /* For a low priority entry allocation 2383 * - If reference entry is not in hprio zone then 2384 * search range: ref_entry to end. 2385 * - If reference entry is in hprio zone and if 2386 * request can be accomodated in non-hprio zone then 2387 * search range: 'start of middle zone' to 'end' 2388 * - else search in reverse, so that less number of hprio 2389 * zone entries are allocated. 2390 */ 2391 2392 *reverse = false; 2393 *start = req->ref_entry + 1; 2394 *end = mcam->bmap_entries; 2395 2396 if (req->ref_entry >= mcam->hprio_end) 2397 return; 2398 2399 fcnt = npc_mcam_get_free_count(mcam->bmap, 2400 mcam->hprio_end, mcam->bmap_entries); 2401 if (fcnt > req->count) 2402 *start = mcam->hprio_end; 2403 else 2404 *reverse = true; 2405 return; 2406 2407 hprio: 2408 /* For a high priority entry allocation, search is always 2409 * in reverse to preserve hprio zone entries. 2410 * - If reference entry is not in lprio zone then 2411 * search range: 0 to ref_entry. 2412 * - If reference entry is in lprio zone and if 2413 * request can be accomodated in middle zone then 2414 * search range: 'hprio_end' to 'lprio_start' 2415 */ 2416 2417 *reverse = true; 2418 *start = 0; 2419 *end = req->ref_entry; 2420 2421 if (req->ref_entry <= mcam->lprio_start) 2422 return; 2423 2424 fcnt = npc_mcam_get_free_count(mcam->bmap, 2425 mcam->hprio_end, mcam->lprio_start); 2426 if (fcnt < req->count) 2427 return; 2428 *start = mcam->hprio_end; 2429 *end = mcam->lprio_start; 2430 } 2431 2432 static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, 2433 struct npc_mcam_alloc_entry_req *req, 2434 struct npc_mcam_alloc_entry_rsp *rsp) 2435 { 2436 u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; 2437 u16 fcnt, hp_fcnt, lp_fcnt; 2438 u16 start, end, index; 2439 int entry, next_start; 2440 bool reverse = false; 2441 unsigned long *bmap; 2442 u16 max_contig; 2443 2444 mutex_lock(&mcam->lock); 2445 2446 /* Check if there are any free entries */ 2447 if (!mcam->bmap_fcnt) { 2448 mutex_unlock(&mcam->lock); 2449 return NPC_MCAM_ALLOC_FAILED; 2450 } 2451 2452 /* MCAM entries are divided into high priority, middle and 2453 * low priority zones. Idea is to not allocate top and lower 2454 * most entries as much as possible, this is to increase 2455 * probability of honouring priority allocation requests. 2456 * 2457 * Two bitmaps are used for mcam entry management, 2458 * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'. 2459 * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'. 2460 * 2461 * Reverse bitmap is used to allocate entries 2462 * - when a higher priority entry is requested 2463 * - when available free entries are less. 2464 * Lower priority ones out of avaialble free entries are always 2465 * chosen when 'high vs low' question arises. 2466 */ 2467 2468 /* Get the search range for priority allocation request */ 2469 if (req->priority) { 2470 npc_get_mcam_search_range_priority(mcam, req, 2471 &start, &end, &reverse); 2472 goto alloc; 2473 } 2474 2475 /* For a VF base MCAM match rule is set by its PF. And all the 2476 * further MCAM rules installed by VF on its own are 2477 * concatenated with the base rule set by its PF. Hence PF entries 2478 * should be at lower priority compared to VF entries. Otherwise 2479 * base rule is hit always and rules installed by VF will be of 2480 * no use. Hence if the request is from PF and NOT a priority 2481 * allocation request then allocate low priority entries. 2482 */ 2483 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 2484 goto lprio_alloc; 2485 2486 /* Find out the search range for non-priority allocation request 2487 * 2488 * Get MCAM free entry count in middle zone. 2489 */ 2490 lp_fcnt = npc_mcam_get_free_count(mcam->bmap, 2491 mcam->lprio_start, 2492 mcam->bmap_entries); 2493 hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end); 2494 fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt; 2495 2496 /* Check if request can be accomodated in the middle zone */ 2497 if (fcnt > req->count) { 2498 start = mcam->hprio_end; 2499 end = mcam->lprio_start; 2500 } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) { 2501 /* Expand search zone from half of hprio zone to 2502 * half of lprio zone. 2503 */ 2504 start = mcam->hprio_end / 2; 2505 end = mcam->bmap_entries - (mcam->lprio_count / 2); 2506 reverse = true; 2507 } else { 2508 /* Not enough free entries, search all entries in reverse, 2509 * so that low priority ones will get used up. 2510 */ 2511 lprio_alloc: 2512 reverse = true; 2513 start = 0; 2514 end = mcam->bmap_entries; 2515 } 2516 2517 alloc: 2518 if (reverse) { 2519 bmap = mcam->bmap_reverse; 2520 start = mcam->bmap_entries - start; 2521 end = mcam->bmap_entries - end; 2522 swap(start, end); 2523 } else { 2524 bmap = mcam->bmap; 2525 } 2526 2527 if (req->contig) { 2528 /* Allocate requested number of contiguous entries, if 2529 * unsuccessful find max contiguous entries available. 2530 */ 2531 index = npc_mcam_find_zero_area(bmap, end, start, 2532 req->count, &max_contig); 2533 rsp->count = max_contig; 2534 if (reverse) 2535 rsp->entry = mcam->bmap_entries - index - max_contig; 2536 else 2537 rsp->entry = index; 2538 } else { 2539 /* Allocate requested number of non-contiguous entries, 2540 * if unsuccessful allocate as many as possible. 2541 */ 2542 rsp->count = 0; 2543 next_start = start; 2544 for (entry = 0; entry < req->count; entry++) { 2545 index = find_next_zero_bit(bmap, end, next_start); 2546 if (index >= end) 2547 break; 2548 2549 next_start = start + (index - start) + 1; 2550 2551 /* Save the entry's index */ 2552 if (reverse) 2553 index = mcam->bmap_entries - index - 1; 2554 entry_list[entry] = index; 2555 rsp->count++; 2556 } 2557 } 2558 2559 /* If allocating requested no of entries is unsucessful, 2560 * expand the search range to full bitmap length and retry. 2561 */ 2562 if (!req->priority && (rsp->count < req->count) && 2563 ((end - start) != mcam->bmap_entries)) { 2564 reverse = true; 2565 start = 0; 2566 end = mcam->bmap_entries; 2567 goto alloc; 2568 } 2569 2570 /* For priority entry allocation requests, if allocation is 2571 * failed then expand search to max possible range and retry. 2572 */ 2573 if (req->priority && rsp->count < req->count) { 2574 if (req->priority == NPC_MCAM_LOWER_PRIO && 2575 (start != (req->ref_entry + 1))) { 2576 start = req->ref_entry + 1; 2577 end = mcam->bmap_entries; 2578 reverse = false; 2579 goto alloc; 2580 } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) && 2581 ((end - start) != req->ref_entry)) { 2582 start = 0; 2583 end = req->ref_entry; 2584 reverse = true; 2585 goto alloc; 2586 } 2587 } 2588 2589 /* Copy MCAM entry indices into mbox response entry_list. 2590 * Requester always expects indices in ascending order, so 2591 * reverse the list if reverse bitmap is used for allocation. 2592 */ 2593 if (!req->contig && rsp->count) { 2594 index = 0; 2595 for (entry = rsp->count - 1; entry >= 0; entry--) { 2596 if (reverse) 2597 rsp->entry_list[index++] = entry_list[entry]; 2598 else 2599 rsp->entry_list[entry] = entry_list[entry]; 2600 } 2601 } 2602 2603 /* Mark the allocated entries as used and set nixlf mapping */ 2604 for (entry = 0; entry < rsp->count; entry++) { 2605 index = req->contig ? 2606 (rsp->entry + entry) : rsp->entry_list[entry]; 2607 npc_mcam_set_bit(mcam, index); 2608 mcam->entry2pfvf_map[index] = pcifunc; 2609 mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; 2610 } 2611 2612 /* Update available free count in mbox response */ 2613 rsp->free_count = mcam->bmap_fcnt; 2614 2615 mutex_unlock(&mcam->lock); 2616 return 0; 2617 } 2618 2619 /* Marks bitmaps to reserved the mcam slot */ 2620 void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx) 2621 { 2622 struct npc_mcam *mcam = &rvu->hw->mcam; 2623 2624 npc_mcam_set_bit(mcam, entry_idx); 2625 } 2626 2627 int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, 2628 struct npc_mcam_alloc_entry_req *req, 2629 struct npc_mcam_alloc_entry_rsp *rsp) 2630 { 2631 struct npc_mcam *mcam = &rvu->hw->mcam; 2632 u16 pcifunc = req->hdr.pcifunc; 2633 int blkaddr; 2634 2635 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2636 if (blkaddr < 0) 2637 return NPC_MCAM_INVALID_REQ; 2638 2639 rsp->entry = NPC_MCAM_ENTRY_INVALID; 2640 rsp->free_count = 0; 2641 2642 /* Check if ref_entry is within range */ 2643 if (req->priority && req->ref_entry >= mcam->bmap_entries) { 2644 dev_err(rvu->dev, "%s: reference entry %d is out of range\n", 2645 __func__, req->ref_entry); 2646 return NPC_MCAM_INVALID_REQ; 2647 } 2648 2649 /* ref_entry can't be '0' if requested priority is high. 2650 * Can't be last entry if requested priority is low. 2651 */ 2652 if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) || 2653 ((req->ref_entry == (mcam->bmap_entries - 1)) && 2654 req->priority == NPC_MCAM_LOWER_PRIO)) 2655 return NPC_MCAM_INVALID_REQ; 2656 2657 /* Since list of allocated indices needs to be sent to requester, 2658 * max number of non-contiguous entries per mbox msg is limited. 2659 */ 2660 if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) { 2661 dev_err(rvu->dev, 2662 "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n", 2663 __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES); 2664 return NPC_MCAM_INVALID_REQ; 2665 } 2666 2667 /* Alloc request from PFFUNC with no NIXLF attached should be denied */ 2668 if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) 2669 return NPC_MCAM_ALLOC_DENIED; 2670 2671 return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); 2672 } 2673 2674 int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, 2675 struct npc_mcam_free_entry_req *req, 2676 struct msg_rsp *rsp) 2677 { 2678 struct npc_mcam *mcam = &rvu->hw->mcam; 2679 u16 pcifunc = req->hdr.pcifunc; 2680 int blkaddr, rc = 0; 2681 u16 cntr; 2682 2683 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2684 if (blkaddr < 0) 2685 return NPC_MCAM_INVALID_REQ; 2686 2687 /* Free request from PFFUNC with no NIXLF attached, ignore */ 2688 if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) 2689 return NPC_MCAM_INVALID_REQ; 2690 2691 mutex_lock(&mcam->lock); 2692 2693 if (req->all) 2694 goto free_all; 2695 2696 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2697 if (rc) 2698 goto exit; 2699 2700 mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP; 2701 mcam->entry2target_pffunc[req->entry] = 0x0; 2702 npc_mcam_clear_bit(mcam, req->entry); 2703 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); 2704 2705 /* Update entry2counter mapping */ 2706 cntr = mcam->entry2cntr_map[req->entry]; 2707 if (cntr != NPC_MCAM_INVALID_MAP) 2708 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2709 req->entry, cntr); 2710 2711 goto exit; 2712 2713 free_all: 2714 /* Free up all entries allocated to requesting PFFUNC */ 2715 npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); 2716 exit: 2717 mutex_unlock(&mcam->lock); 2718 return rc; 2719 } 2720 2721 int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu, 2722 struct npc_mcam_read_entry_req *req, 2723 struct npc_mcam_read_entry_rsp *rsp) 2724 { 2725 struct npc_mcam *mcam = &rvu->hw->mcam; 2726 u16 pcifunc = req->hdr.pcifunc; 2727 int blkaddr, rc; 2728 2729 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2730 if (blkaddr < 0) 2731 return NPC_MCAM_INVALID_REQ; 2732 2733 mutex_lock(&mcam->lock); 2734 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2735 if (!rc) { 2736 npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry, 2737 &rsp->entry_data, 2738 &rsp->intf, &rsp->enable); 2739 } 2740 2741 mutex_unlock(&mcam->lock); 2742 return rc; 2743 } 2744 2745 int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, 2746 struct npc_mcam_write_entry_req *req, 2747 struct msg_rsp *rsp) 2748 { 2749 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 2750 struct npc_mcam *mcam = &rvu->hw->mcam; 2751 u16 pcifunc = req->hdr.pcifunc; 2752 int blkaddr, rc; 2753 u8 nix_intf; 2754 2755 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2756 if (blkaddr < 0) 2757 return NPC_MCAM_INVALID_REQ; 2758 2759 mutex_lock(&mcam->lock); 2760 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2761 if (rc) 2762 goto exit; 2763 2764 if (req->set_cntr && 2765 npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) { 2766 rc = NPC_MCAM_INVALID_REQ; 2767 goto exit; 2768 } 2769 2770 if (!is_npc_interface_valid(rvu, req->intf)) { 2771 rc = NPC_MCAM_INVALID_REQ; 2772 goto exit; 2773 } 2774 2775 if (is_npc_intf_tx(req->intf)) 2776 nix_intf = pfvf->nix_tx_intf; 2777 else 2778 nix_intf = pfvf->nix_rx_intf; 2779 2780 if (!is_pffunc_af(pcifunc) && 2781 npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { 2782 rc = NPC_MCAM_INVALID_REQ; 2783 goto exit; 2784 } 2785 2786 /* For AF installed rules, the nix_intf should be set to target NIX */ 2787 if (is_pffunc_af(req->hdr.pcifunc)) 2788 nix_intf = req->intf; 2789 2790 npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf, 2791 &req->entry_data, req->enable_entry); 2792 2793 if (req->set_cntr) 2794 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2795 req->entry, req->cntr); 2796 2797 rc = 0; 2798 exit: 2799 mutex_unlock(&mcam->lock); 2800 return rc; 2801 } 2802 2803 int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, 2804 struct npc_mcam_ena_dis_entry_req *req, 2805 struct msg_rsp *rsp) 2806 { 2807 struct npc_mcam *mcam = &rvu->hw->mcam; 2808 u16 pcifunc = req->hdr.pcifunc; 2809 int blkaddr, rc; 2810 2811 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2812 if (blkaddr < 0) 2813 return NPC_MCAM_INVALID_REQ; 2814 2815 mutex_lock(&mcam->lock); 2816 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2817 mutex_unlock(&mcam->lock); 2818 if (rc) 2819 return rc; 2820 2821 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true); 2822 2823 return 0; 2824 } 2825 2826 int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, 2827 struct npc_mcam_ena_dis_entry_req *req, 2828 struct msg_rsp *rsp) 2829 { 2830 struct npc_mcam *mcam = &rvu->hw->mcam; 2831 u16 pcifunc = req->hdr.pcifunc; 2832 int blkaddr, rc; 2833 2834 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2835 if (blkaddr < 0) 2836 return NPC_MCAM_INVALID_REQ; 2837 2838 mutex_lock(&mcam->lock); 2839 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2840 mutex_unlock(&mcam->lock); 2841 if (rc) 2842 return rc; 2843 2844 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); 2845 2846 return 0; 2847 } 2848 2849 int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, 2850 struct npc_mcam_shift_entry_req *req, 2851 struct npc_mcam_shift_entry_rsp *rsp) 2852 { 2853 struct npc_mcam *mcam = &rvu->hw->mcam; 2854 u16 pcifunc = req->hdr.pcifunc; 2855 u16 old_entry, new_entry; 2856 int blkaddr, rc = 0; 2857 u16 index, cntr; 2858 2859 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2860 if (blkaddr < 0) 2861 return NPC_MCAM_INVALID_REQ; 2862 2863 if (req->shift_count > NPC_MCAM_MAX_SHIFTS) 2864 return NPC_MCAM_INVALID_REQ; 2865 2866 mutex_lock(&mcam->lock); 2867 for (index = 0; index < req->shift_count; index++) { 2868 old_entry = req->curr_entry[index]; 2869 new_entry = req->new_entry[index]; 2870 2871 /* Check if both old and new entries are valid and 2872 * does belong to this PFFUNC or not. 2873 */ 2874 rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry); 2875 if (rc) 2876 break; 2877 2878 rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry); 2879 if (rc) 2880 break; 2881 2882 /* new_entry should not have a counter mapped */ 2883 if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) { 2884 rc = NPC_MCAM_PERM_DENIED; 2885 break; 2886 } 2887 2888 /* Disable the new_entry */ 2889 npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false); 2890 2891 /* Copy rule from old entry to new entry */ 2892 npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry); 2893 2894 /* Copy counter mapping, if any */ 2895 cntr = mcam->entry2cntr_map[old_entry]; 2896 if (cntr != NPC_MCAM_INVALID_MAP) { 2897 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2898 old_entry, cntr); 2899 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2900 new_entry, cntr); 2901 } 2902 2903 /* Enable new_entry and disable old_entry */ 2904 npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true); 2905 npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false); 2906 } 2907 2908 /* If shift has failed then report the failed index */ 2909 if (index != req->shift_count) { 2910 rc = NPC_MCAM_PERM_DENIED; 2911 rsp->failed_entry_idx = index; 2912 } 2913 2914 mutex_unlock(&mcam->lock); 2915 return rc; 2916 } 2917 2918 int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, 2919 struct npc_mcam_alloc_counter_req *req, 2920 struct npc_mcam_alloc_counter_rsp *rsp) 2921 { 2922 struct npc_mcam *mcam = &rvu->hw->mcam; 2923 u16 pcifunc = req->hdr.pcifunc; 2924 u16 max_contig, cntr; 2925 int blkaddr, index; 2926 2927 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2928 if (blkaddr < 0) 2929 return NPC_MCAM_INVALID_REQ; 2930 2931 /* If the request is from a PFFUNC with no NIXLF attached, ignore */ 2932 if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) 2933 return NPC_MCAM_INVALID_REQ; 2934 2935 /* Since list of allocated counter IDs needs to be sent to requester, 2936 * max number of non-contiguous counters per mbox msg is limited. 2937 */ 2938 if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS) 2939 return NPC_MCAM_INVALID_REQ; 2940 2941 mutex_lock(&mcam->lock); 2942 2943 /* Check if unused counters are available or not */ 2944 if (!rvu_rsrc_free_count(&mcam->counters)) { 2945 mutex_unlock(&mcam->lock); 2946 return NPC_MCAM_ALLOC_FAILED; 2947 } 2948 2949 rsp->count = 0; 2950 2951 if (req->contig) { 2952 /* Allocate requested number of contiguous counters, if 2953 * unsuccessful find max contiguous entries available. 2954 */ 2955 index = npc_mcam_find_zero_area(mcam->counters.bmap, 2956 mcam->counters.max, 0, 2957 req->count, &max_contig); 2958 rsp->count = max_contig; 2959 rsp->cntr = index; 2960 for (cntr = index; cntr < (index + max_contig); cntr++) { 2961 __set_bit(cntr, mcam->counters.bmap); 2962 mcam->cntr2pfvf_map[cntr] = pcifunc; 2963 } 2964 } else { 2965 /* Allocate requested number of non-contiguous counters, 2966 * if unsuccessful allocate as many as possible. 2967 */ 2968 for (cntr = 0; cntr < req->count; cntr++) { 2969 index = rvu_alloc_rsrc(&mcam->counters); 2970 if (index < 0) 2971 break; 2972 rsp->cntr_list[cntr] = index; 2973 rsp->count++; 2974 mcam->cntr2pfvf_map[index] = pcifunc; 2975 } 2976 } 2977 2978 mutex_unlock(&mcam->lock); 2979 return 0; 2980 } 2981 2982 int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, 2983 struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) 2984 { 2985 struct npc_mcam *mcam = &rvu->hw->mcam; 2986 u16 index, entry = 0; 2987 int blkaddr, err; 2988 2989 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2990 if (blkaddr < 0) 2991 return NPC_MCAM_INVALID_REQ; 2992 2993 mutex_lock(&mcam->lock); 2994 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2995 if (err) { 2996 mutex_unlock(&mcam->lock); 2997 return err; 2998 } 2999 3000 /* Mark counter as free/unused */ 3001 mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP; 3002 rvu_free_rsrc(&mcam->counters, req->cntr); 3003 3004 /* Disable all MCAM entry's stats which are using this counter */ 3005 while (entry < mcam->bmap_entries) { 3006 if (!mcam->cntr_refcnt[req->cntr]) 3007 break; 3008 3009 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 3010 if (index >= mcam->bmap_entries) 3011 break; 3012 entry = index + 1; 3013 if (mcam->entry2cntr_map[index] != req->cntr) 3014 continue; 3015 3016 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 3017 index, req->cntr); 3018 } 3019 3020 mutex_unlock(&mcam->lock); 3021 return 0; 3022 } 3023 3024 int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, 3025 struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp) 3026 { 3027 struct npc_mcam *mcam = &rvu->hw->mcam; 3028 u16 index, entry = 0; 3029 int blkaddr, rc; 3030 3031 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3032 if (blkaddr < 0) 3033 return NPC_MCAM_INVALID_REQ; 3034 3035 mutex_lock(&mcam->lock); 3036 rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 3037 if (rc) 3038 goto exit; 3039 3040 /* Unmap the MCAM entry and counter */ 3041 if (!req->all) { 3042 rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry); 3043 if (rc) 3044 goto exit; 3045 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 3046 req->entry, req->cntr); 3047 goto exit; 3048 } 3049 3050 /* Disable all MCAM entry's stats which are using this counter */ 3051 while (entry < mcam->bmap_entries) { 3052 if (!mcam->cntr_refcnt[req->cntr]) 3053 break; 3054 3055 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 3056 if (index >= mcam->bmap_entries) 3057 break; 3058 entry = index + 1; 3059 3060 if (mcam->entry2cntr_map[index] != req->cntr) 3061 continue; 3062 3063 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 3064 index, req->cntr); 3065 } 3066 exit: 3067 mutex_unlock(&mcam->lock); 3068 return rc; 3069 } 3070 3071 int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, 3072 struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) 3073 { 3074 struct npc_mcam *mcam = &rvu->hw->mcam; 3075 int blkaddr, err; 3076 3077 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3078 if (blkaddr < 0) 3079 return NPC_MCAM_INVALID_REQ; 3080 3081 mutex_lock(&mcam->lock); 3082 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 3083 mutex_unlock(&mcam->lock); 3084 if (err) 3085 return err; 3086 3087 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00); 3088 3089 return 0; 3090 } 3091 3092 int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, 3093 struct npc_mcam_oper_counter_req *req, 3094 struct npc_mcam_oper_counter_rsp *rsp) 3095 { 3096 struct npc_mcam *mcam = &rvu->hw->mcam; 3097 int blkaddr, err; 3098 3099 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3100 if (blkaddr < 0) 3101 return NPC_MCAM_INVALID_REQ; 3102 3103 mutex_lock(&mcam->lock); 3104 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 3105 mutex_unlock(&mcam->lock); 3106 if (err) 3107 return err; 3108 3109 rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr)); 3110 rsp->stat &= BIT_ULL(48) - 1; 3111 3112 return 0; 3113 } 3114 3115 int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, 3116 struct npc_mcam_alloc_and_write_entry_req *req, 3117 struct npc_mcam_alloc_and_write_entry_rsp *rsp) 3118 { 3119 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 3120 struct npc_mcam_alloc_counter_req cntr_req; 3121 struct npc_mcam_alloc_counter_rsp cntr_rsp; 3122 struct npc_mcam_alloc_entry_req entry_req; 3123 struct npc_mcam_alloc_entry_rsp entry_rsp; 3124 struct npc_mcam *mcam = &rvu->hw->mcam; 3125 u16 entry = NPC_MCAM_ENTRY_INVALID; 3126 u16 cntr = NPC_MCAM_ENTRY_INVALID; 3127 int blkaddr, rc; 3128 u8 nix_intf; 3129 3130 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3131 if (blkaddr < 0) 3132 return NPC_MCAM_INVALID_REQ; 3133 3134 if (!is_npc_interface_valid(rvu, req->intf)) 3135 return NPC_MCAM_INVALID_REQ; 3136 3137 if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, 3138 req->hdr.pcifunc)) 3139 return NPC_MCAM_INVALID_REQ; 3140 3141 /* Try to allocate a MCAM entry */ 3142 entry_req.hdr.pcifunc = req->hdr.pcifunc; 3143 entry_req.contig = true; 3144 entry_req.priority = req->priority; 3145 entry_req.ref_entry = req->ref_entry; 3146 entry_req.count = 1; 3147 3148 rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, 3149 &entry_req, &entry_rsp); 3150 if (rc) 3151 return rc; 3152 3153 if (!entry_rsp.count) 3154 return NPC_MCAM_ALLOC_FAILED; 3155 3156 entry = entry_rsp.entry; 3157 3158 if (!req->alloc_cntr) 3159 goto write_entry; 3160 3161 /* Now allocate counter */ 3162 cntr_req.hdr.pcifunc = req->hdr.pcifunc; 3163 cntr_req.contig = true; 3164 cntr_req.count = 1; 3165 3166 rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); 3167 if (rc) { 3168 /* Free allocated MCAM entry */ 3169 mutex_lock(&mcam->lock); 3170 mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP; 3171 npc_mcam_clear_bit(mcam, entry); 3172 mutex_unlock(&mcam->lock); 3173 return rc; 3174 } 3175 3176 cntr = cntr_rsp.cntr; 3177 3178 write_entry: 3179 mutex_lock(&mcam->lock); 3180 3181 if (is_npc_intf_tx(req->intf)) 3182 nix_intf = pfvf->nix_tx_intf; 3183 else 3184 nix_intf = pfvf->nix_rx_intf; 3185 3186 npc_config_mcam_entry(rvu, mcam, blkaddr, entry, nix_intf, 3187 &req->entry_data, req->enable_entry); 3188 3189 if (req->alloc_cntr) 3190 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr); 3191 mutex_unlock(&mcam->lock); 3192 3193 rsp->entry = entry; 3194 rsp->cntr = cntr; 3195 3196 return 0; 3197 } 3198 3199 #define GET_KEX_CFG(intf) \ 3200 rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf)) 3201 3202 #define GET_KEX_FLAGS(ld) \ 3203 rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld)) 3204 3205 #define GET_KEX_LD(intf, lid, lt, ld) \ 3206 rvu_read64(rvu, BLKADDR_NPC, \ 3207 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)) 3208 3209 #define GET_KEX_LDFLAGS(intf, ld, fl) \ 3210 rvu_read64(rvu, BLKADDR_NPC, \ 3211 NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl)) 3212 3213 int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, 3214 struct npc_get_kex_cfg_rsp *rsp) 3215 { 3216 int lid, lt, ld, fl; 3217 3218 rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX); 3219 rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX); 3220 for (lid = 0; lid < NPC_MAX_LID; lid++) { 3221 for (lt = 0; lt < NPC_MAX_LT; lt++) { 3222 for (ld = 0; ld < NPC_MAX_LD; ld++) { 3223 rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] = 3224 GET_KEX_LD(NIX_INTF_RX, lid, lt, ld); 3225 rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] = 3226 GET_KEX_LD(NIX_INTF_TX, lid, lt, ld); 3227 } 3228 } 3229 } 3230 for (ld = 0; ld < NPC_MAX_LD; ld++) 3231 rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld); 3232 3233 for (ld = 0; ld < NPC_MAX_LD; ld++) { 3234 for (fl = 0; fl < NPC_MAX_LFL; fl++) { 3235 rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] = 3236 GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl); 3237 rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] = 3238 GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl); 3239 } 3240 } 3241 memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN); 3242 return 0; 3243 } 3244 3245 static int 3246 npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind, 3247 u8 var_len_off, u8 var_len_off_mask, u8 shift_dir) 3248 { 3249 struct npc_kpu_action0 *act0; 3250 u8 shift_count = 0; 3251 int blkaddr; 3252 u64 val; 3253 3254 if (!var_len_off_mask) 3255 return -EINVAL; 3256 3257 if (var_len_off_mask != 0xff) { 3258 if (shift_dir) 3259 shift_count = __ffs(var_len_off_mask); 3260 else 3261 shift_count = (8 - __fls(var_len_off_mask)); 3262 } 3263 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); 3264 if (blkaddr < 0) { 3265 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 3266 return -EINVAL; 3267 } 3268 val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); 3269 act0 = (struct npc_kpu_action0 *)&val; 3270 act0->var_len_shift = shift_count; 3271 act0->var_len_right = shift_dir; 3272 act0->var_len_mask = var_len_off_mask; 3273 act0->var_len_offset = var_len_off; 3274 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); 3275 return 0; 3276 } 3277 3278 int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, 3279 u64 pkind, u8 var_len_off, u8 var_len_off_mask, 3280 u8 shift_dir) 3281 3282 { 3283 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 3284 int blkaddr, nixlf, rc, intf_mode; 3285 int pf = rvu_get_pf(pcifunc); 3286 u64 rxpkind, txpkind; 3287 u8 cgx_id, lmac_id; 3288 3289 /* use default pkind to disable edsa/higig */ 3290 rxpkind = rvu_npc_get_pkind(rvu, pf); 3291 txpkind = NPC_TX_DEF_PKIND; 3292 intf_mode = NPC_INTF_MODE_DEF; 3293 3294 if (mode & OTX2_PRIV_FLAGS_CUSTOM) { 3295 if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) { 3296 rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind, 3297 var_len_off, 3298 var_len_off_mask, 3299 shift_dir); 3300 if (rc) 3301 return rc; 3302 } 3303 rxpkind = pkind; 3304 txpkind = pkind; 3305 } 3306 3307 if (dir & PKIND_RX) { 3308 /* rx pkind set req valid only for cgx mapped PFs */ 3309 if (!is_cgx_config_permitted(rvu, pcifunc)) 3310 return 0; 3311 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 3312 3313 rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, 3314 rxpkind); 3315 if (rc) 3316 return rc; 3317 } 3318 3319 if (dir & PKIND_TX) { 3320 /* Tx pkind set request valid if PCIFUNC has NIXLF attached */ 3321 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3322 if (rc) 3323 return rc; 3324 3325 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), 3326 txpkind); 3327 } 3328 3329 pfvf->intf_mode = intf_mode; 3330 return 0; 3331 } 3332 3333 int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req, 3334 struct msg_rsp *rsp) 3335 { 3336 return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode, 3337 req->dir, req->pkind, req->var_len_off, 3338 req->var_len_off_mask, req->shift_dir); 3339 } 3340 3341 int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, 3342 struct msg_req *req, 3343 struct npc_mcam_read_base_rule_rsp *rsp) 3344 { 3345 struct npc_mcam *mcam = &rvu->hw->mcam; 3346 int index, blkaddr, nixlf, rc = 0; 3347 u16 pcifunc = req->hdr.pcifunc; 3348 struct rvu_pfvf *pfvf; 3349 u8 intf, enable; 3350 3351 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3352 if (blkaddr < 0) 3353 return NPC_MCAM_INVALID_REQ; 3354 3355 /* Return the channel number in case of PF */ 3356 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 3357 pfvf = rvu_get_pfvf(rvu, pcifunc); 3358 rsp->entry.kw[0] = pfvf->rx_chan_base; 3359 rsp->entry.kw_mask[0] = 0xFFFULL; 3360 goto out; 3361 } 3362 3363 /* Find the pkt steering rule installed by PF to this VF */ 3364 mutex_lock(&mcam->lock); 3365 for (index = 0; index < mcam->bmap_entries; index++) { 3366 if (mcam->entry2target_pffunc[index] == pcifunc) 3367 goto read_entry; 3368 } 3369 3370 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3371 if (rc < 0) { 3372 mutex_unlock(&mcam->lock); 3373 goto out; 3374 } 3375 /* Read the default ucast entry if there is no pkt steering rule */ 3376 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 3377 NIXLF_UCAST_ENTRY); 3378 read_entry: 3379 /* Read the mcam entry */ 3380 npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf, 3381 &enable); 3382 mutex_unlock(&mcam->lock); 3383 out: 3384 return rc; 3385 } 3386 3387 int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, 3388 struct npc_mcam_get_stats_req *req, 3389 struct npc_mcam_get_stats_rsp *rsp) 3390 { 3391 struct npc_mcam *mcam = &rvu->hw->mcam; 3392 u16 index, cntr; 3393 int blkaddr; 3394 u64 regval; 3395 u32 bank; 3396 3397 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3398 if (blkaddr < 0) 3399 return NPC_MCAM_INVALID_REQ; 3400 3401 mutex_lock(&mcam->lock); 3402 3403 index = req->entry & (mcam->banksize - 1); 3404 bank = npc_get_bank(mcam, req->entry); 3405 3406 /* read MCAM entry STAT_ACT register */ 3407 regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank)); 3408 3409 if (!(regval & rvu->hw->npc_stat_ena)) { 3410 rsp->stat_ena = 0; 3411 mutex_unlock(&mcam->lock); 3412 return 0; 3413 } 3414 3415 cntr = regval & 0x1FF; 3416 3417 rsp->stat_ena = 1; 3418 rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr)); 3419 rsp->stat &= BIT_ULL(48) - 1; 3420 3421 mutex_unlock(&mcam->lock); 3422 3423 return 0; 3424 } 3425