1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 15 #include "rvu_struct.h" 16 #include "rvu_reg.h" 17 #include "rvu.h" 18 #include "npc.h" 19 #include "cgx.h" 20 #include "npc_profile.h" 21 22 #define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */ 23 #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ 24 25 #define NPC_PARSE_RESULT_DMAC_OFFSET 8 26 #define NPC_HW_TSTAMP_OFFSET 8 27 #define NPC_KEX_CHAN_MASK 0xFFFULL 28 #define NPC_KEX_PF_FUNC_MASK 0xFFFFULL 29 30 #define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8)) 31 32 static const char def_pfl_name[] = "default"; 33 34 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 35 int blkaddr, u16 pcifunc); 36 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 37 u16 pcifunc); 38 39 bool is_npc_intf_tx(u8 intf) 40 { 41 return !!(intf & 0x1); 42 } 43 44 bool is_npc_intf_rx(u8 intf) 45 { 46 return !(intf & 0x1); 47 } 48 49 bool is_npc_interface_valid(struct rvu *rvu, u8 intf) 50 { 51 struct rvu_hwinfo *hw = rvu->hw; 52 53 return intf < hw->npc_intfs; 54 } 55 56 int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena) 57 { 58 /* Due to a HW issue in these silicon versions, parse nibble enable 59 * configuration has to be identical for both Rx and Tx interfaces. 60 */ 61 if (is_rvu_96xx_B0(rvu)) 62 return nibble_ena; 63 return 0; 64 } 65 66 static int npc_mcam_verify_pf_func(struct rvu *rvu, 67 struct mcam_entry *entry_data, u8 intf, 68 u16 pcifunc) 69 { 70 u16 pf_func, pf_func_mask; 71 72 if (is_npc_intf_rx(intf)) 73 return 0; 74 75 pf_func_mask = (entry_data->kw_mask[0] >> 32) & 76 NPC_KEX_PF_FUNC_MASK; 77 pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK; 78 79 pf_func = be16_to_cpu((__force __be16)pf_func); 80 if (pf_func_mask != NPC_KEX_PF_FUNC_MASK || 81 ((pf_func & ~RVU_PFVF_FUNC_MASK) != 82 (pcifunc & ~RVU_PFVF_FUNC_MASK))) 83 return -EINVAL; 84 85 return 0; 86 } 87 88 int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel) 89 { 90 int pf = rvu_get_pf(pcifunc); 91 u8 cgx_id, lmac_id; 92 int base = 0, end; 93 94 if (is_npc_intf_tx(intf)) 95 return 0; 96 97 /* return in case of AF installed rules */ 98 if (is_pffunc_af(pcifunc)) 99 return 0; 100 101 if (is_afvf(pcifunc)) { 102 end = rvu_get_num_lbk_chans(); 103 if (end < 0) 104 return -EINVAL; 105 } else { 106 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 107 base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0x0); 108 /* CGX mapped functions has maximum of 16 channels */ 109 end = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0xF); 110 } 111 112 if (channel < base || channel > end) 113 return -EINVAL; 114 115 return 0; 116 } 117 118 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) 119 { 120 int blkaddr; 121 u64 val = 0; 122 123 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 124 if (blkaddr < 0) 125 return; 126 127 /* Config CPI base for the PKIND */ 128 val = pkind | 1ULL << 62; 129 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val); 130 } 131 132 int rvu_npc_get_pkind(struct rvu *rvu, u16 pf) 133 { 134 struct npc_pkind *pkind = &rvu->hw->pkind; 135 u32 map; 136 int i; 137 138 for (i = 0; i < pkind->rsrc.max; i++) { 139 map = pkind->pfchan_map[i]; 140 if (((map >> 16) & 0x3F) == pf) 141 return i; 142 } 143 return -1; 144 } 145 146 #define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20) 147 148 int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable) 149 { 150 int pkind, blkaddr; 151 u64 val; 152 153 pkind = rvu_npc_get_pkind(rvu, pf); 154 if (pkind < 0) { 155 dev_err(rvu->dev, "%s: pkind not mapped\n", __func__); 156 return -EINVAL; 157 } 158 159 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); 160 if (blkaddr < 0) { 161 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 162 return -EINVAL; 163 } 164 165 val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); 166 val &= ~NPC_AF_ACTION0_PTR_ADVANCE; 167 /* If timestamp is enabled then configure NPC to shift 8 bytes */ 168 if (enable) 169 val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE, 170 NPC_HW_TSTAMP_OFFSET); 171 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); 172 173 return 0; 174 } 175 176 static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc, 177 int nixlf) 178 { 179 struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam); 180 struct rvu *rvu = hw->rvu; 181 int blkaddr = 0, max = 0; 182 struct rvu_block *block; 183 struct rvu_pfvf *pfvf; 184 185 pfvf = rvu_get_pfvf(rvu, pcifunc); 186 /* Given a PF/VF and NIX LF number calculate the unicast mcam 187 * entry index based on the NIX block assigned to the PF/VF. 188 */ 189 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 190 while (blkaddr) { 191 if (pfvf->nix_blkaddr == blkaddr) 192 break; 193 block = &rvu->hw->block[blkaddr]; 194 max += block->lf.max; 195 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 196 } 197 198 return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF; 199 } 200 201 int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, 202 u16 pcifunc, int nixlf, int type) 203 { 204 int pf = rvu_get_pf(pcifunc); 205 int index; 206 207 /* Check if this is for a PF */ 208 if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) { 209 /* Reserved entries exclude PF0 */ 210 pf--; 211 index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF); 212 /* Broadcast address matching entry should be first so 213 * that the packet can be replicated to all VFs. 214 */ 215 if (type == NIXLF_BCAST_ENTRY) 216 return index; 217 else if (type == NIXLF_ALLMULTI_ENTRY) 218 return index + 1; 219 else if (type == NIXLF_PROMISC_ENTRY) 220 return index + 2; 221 } 222 223 return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf); 224 } 225 226 int npc_get_bank(struct npc_mcam *mcam, int index) 227 { 228 int bank = index / mcam->banksize; 229 230 /* 0,1 & 2,3 banks are combined for this keysize */ 231 if (mcam->keysize == NPC_MCAM_KEY_X2) 232 return bank ? 2 : 0; 233 234 return bank; 235 } 236 237 bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, 238 int blkaddr, int index) 239 { 240 int bank = npc_get_bank(mcam, index); 241 u64 cfg; 242 243 index &= (mcam->banksize - 1); 244 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank)); 245 return (cfg & 1); 246 } 247 248 void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 249 int blkaddr, int index, bool enable) 250 { 251 int bank = npc_get_bank(mcam, index); 252 int actbank = bank; 253 254 index &= (mcam->banksize - 1); 255 for (; bank < (actbank + mcam->banks_per_entry); bank++) { 256 rvu_write64(rvu, blkaddr, 257 NPC_AF_MCAMEX_BANKX_CFG(index, bank), 258 enable ? 1 : 0); 259 } 260 } 261 262 static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 263 int blkaddr, int index) 264 { 265 int bank = npc_get_bank(mcam, index); 266 int actbank = bank; 267 268 index &= (mcam->banksize - 1); 269 for (; bank < (actbank + mcam->banks_per_entry); bank++) { 270 rvu_write64(rvu, blkaddr, 271 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0); 272 rvu_write64(rvu, blkaddr, 273 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0); 274 275 rvu_write64(rvu, blkaddr, 276 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0); 277 rvu_write64(rvu, blkaddr, 278 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0); 279 280 rvu_write64(rvu, blkaddr, 281 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0); 282 rvu_write64(rvu, blkaddr, 283 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0); 284 } 285 } 286 287 static void npc_get_keyword(struct mcam_entry *entry, int idx, 288 u64 *cam0, u64 *cam1) 289 { 290 u64 kw_mask = 0x00; 291 292 #define CAM_MASK(n) (BIT_ULL(n) - 1) 293 294 /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and 295 * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1. 296 * 297 * Also, only 48 bits of BANKX_CAMX_W1 are valid. 298 */ 299 switch (idx) { 300 case 0: 301 /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */ 302 *cam1 = entry->kw[0]; 303 kw_mask = entry->kw_mask[0]; 304 break; 305 case 1: 306 /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */ 307 *cam1 = entry->kw[1] & CAM_MASK(48); 308 kw_mask = entry->kw_mask[1] & CAM_MASK(48); 309 break; 310 case 2: 311 /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48> 312 * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0> 313 */ 314 *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16); 315 *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16); 316 kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16); 317 kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16); 318 break; 319 case 3: 320 /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48> 321 * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0> 322 */ 323 *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16); 324 *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16); 325 kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16); 326 kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16); 327 break; 328 case 4: 329 /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32> 330 * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0> 331 */ 332 *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32); 333 *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32); 334 kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32); 335 kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32); 336 break; 337 case 5: 338 /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32> 339 * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0> 340 */ 341 *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32); 342 *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32); 343 kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32); 344 kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32); 345 break; 346 case 6: 347 /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16> 348 * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0> 349 */ 350 *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48); 351 *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48); 352 kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48); 353 kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48); 354 break; 355 case 7: 356 /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */ 357 *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48); 358 kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48); 359 break; 360 } 361 362 *cam1 &= kw_mask; 363 *cam0 = ~*cam1 & kw_mask; 364 } 365 366 static void npc_fill_entryword(struct mcam_entry *entry, int idx, 367 u64 cam0, u64 cam1) 368 { 369 /* Similar to npc_get_keyword, but fills mcam_entry structure from 370 * CAM registers. 371 */ 372 switch (idx) { 373 case 0: 374 entry->kw[0] = cam1; 375 entry->kw_mask[0] = cam1 ^ cam0; 376 break; 377 case 1: 378 entry->kw[1] = cam1; 379 entry->kw_mask[1] = cam1 ^ cam0; 380 break; 381 case 2: 382 entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48; 383 entry->kw[2] = (cam1 >> 16) & CAM_MASK(48); 384 entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48; 385 entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48); 386 break; 387 case 3: 388 entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48; 389 entry->kw[3] = (cam1 >> 16) & CAM_MASK(32); 390 entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48; 391 entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32); 392 break; 393 case 4: 394 entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32; 395 entry->kw[4] = (cam1 >> 32) & CAM_MASK(32); 396 entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32; 397 entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32); 398 break; 399 case 5: 400 entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32; 401 entry->kw[5] = (cam1 >> 32) & CAM_MASK(16); 402 entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32; 403 entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16); 404 break; 405 case 6: 406 entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16; 407 entry->kw[6] = (cam1 >> 48) & CAM_MASK(16); 408 entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16; 409 entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16); 410 break; 411 case 7: 412 entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16; 413 entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16; 414 break; 415 } 416 } 417 418 static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam, 419 int blkaddr, u16 pf_func) 420 { 421 int bank, nixlf, index; 422 423 /* get ucast entry rule entry index */ 424 nix_get_nixlf(rvu, pf_func, &nixlf, NULL); 425 index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf, 426 NIXLF_UCAST_ENTRY); 427 bank = npc_get_bank(mcam, index); 428 index &= (mcam->banksize - 1); 429 430 return rvu_read64(rvu, blkaddr, 431 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 432 } 433 434 static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, 435 int blkaddr, int index, struct mcam_entry *entry, 436 bool *enable) 437 { 438 u16 owner, target_func; 439 struct rvu_pfvf *pfvf; 440 u64 rx_action; 441 442 owner = mcam->entry2pfvf_map[index]; 443 target_func = (entry->action >> 4) & 0xffff; 444 /* do nothing when target is LBK/PF or owner is not PF */ 445 if (is_pffunc_af(owner) || is_afvf(target_func) || 446 (owner & RVU_PFVF_FUNC_MASK) || 447 !(target_func & RVU_PFVF_FUNC_MASK)) 448 return; 449 450 /* save entry2target_pffunc */ 451 pfvf = rvu_get_pfvf(rvu, target_func); 452 mcam->entry2target_pffunc[index] = target_func; 453 454 /* don't enable rule when nixlf not attached or initialized */ 455 if (!(is_nixlf_attached(rvu, target_func) && 456 test_bit(NIXLF_INITIALIZED, &pfvf->flags))) 457 *enable = false; 458 459 /* copy VF default entry action to the VF mcam entry */ 460 rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr, 461 target_func); 462 if (rx_action) 463 entry->action = rx_action; 464 } 465 466 static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 467 int blkaddr, int index, u8 intf, 468 struct mcam_entry *entry, bool enable) 469 { 470 int bank = npc_get_bank(mcam, index); 471 int kw = 0, actbank, actindex; 472 u8 tx_intf_mask = ~intf & 0x3; 473 u8 tx_intf = intf; 474 u64 cam0, cam1; 475 476 actbank = bank; /* Save bank id, to set action later on */ 477 actindex = index; 478 index &= (mcam->banksize - 1); 479 480 /* Disable before mcam entry update */ 481 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); 482 483 /* Clear mcam entry to avoid writes being suppressed by NPC */ 484 npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex); 485 486 /* CAM1 takes the comparison value and 487 * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'. 488 * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0 489 * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1 490 * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare. 491 */ 492 for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { 493 /* Interface should be set in all banks */ 494 if (is_npc_intf_tx(intf)) { 495 /* Last bit must be set and rest don't care 496 * for TX interfaces 497 */ 498 tx_intf_mask = 0x1; 499 tx_intf = intf & tx_intf_mask; 500 tx_intf_mask = ~tx_intf & tx_intf_mask; 501 } 502 503 rvu_write64(rvu, blkaddr, 504 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 505 tx_intf); 506 rvu_write64(rvu, blkaddr, 507 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 508 tx_intf_mask); 509 510 /* Set the match key */ 511 npc_get_keyword(entry, kw, &cam0, &cam1); 512 rvu_write64(rvu, blkaddr, 513 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1); 514 rvu_write64(rvu, blkaddr, 515 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0); 516 517 npc_get_keyword(entry, kw + 1, &cam0, &cam1); 518 rvu_write64(rvu, blkaddr, 519 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1); 520 rvu_write64(rvu, blkaddr, 521 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0); 522 } 523 524 /* PF installing VF rule */ 525 if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries) 526 npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable); 527 528 /* Set 'action' */ 529 rvu_write64(rvu, blkaddr, 530 NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action); 531 532 /* Set TAG 'action' */ 533 rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank), 534 entry->vtag_action); 535 536 /* Enable the entry */ 537 if (enable) 538 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true); 539 } 540 541 void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 542 int blkaddr, u16 src, 543 struct mcam_entry *entry, u8 *intf, u8 *ena) 544 { 545 int sbank = npc_get_bank(mcam, src); 546 int bank, kw = 0; 547 u64 cam0, cam1; 548 549 src &= (mcam->banksize - 1); 550 bank = sbank; 551 552 for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) { 553 cam1 = rvu_read64(rvu, blkaddr, 554 NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1)); 555 cam0 = rvu_read64(rvu, blkaddr, 556 NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0)); 557 npc_fill_entryword(entry, kw, cam0, cam1); 558 559 cam1 = rvu_read64(rvu, blkaddr, 560 NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1)); 561 cam0 = rvu_read64(rvu, blkaddr, 562 NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0)); 563 npc_fill_entryword(entry, kw + 1, cam0, cam1); 564 } 565 566 entry->action = rvu_read64(rvu, blkaddr, 567 NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); 568 entry->vtag_action = 569 rvu_read64(rvu, blkaddr, 570 NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); 571 *intf = rvu_read64(rvu, blkaddr, 572 NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3; 573 *ena = rvu_read64(rvu, blkaddr, 574 NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1; 575 } 576 577 static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 578 int blkaddr, u16 src, u16 dest) 579 { 580 int dbank = npc_get_bank(mcam, dest); 581 int sbank = npc_get_bank(mcam, src); 582 u64 cfg, sreg, dreg; 583 int bank, i; 584 585 src &= (mcam->banksize - 1); 586 dest &= (mcam->banksize - 1); 587 588 /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */ 589 for (bank = 0; bank < mcam->banks_per_entry; bank++) { 590 sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0); 591 dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0); 592 for (i = 0; i < 6; i++) { 593 cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8)); 594 rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg); 595 } 596 } 597 598 /* Copy action */ 599 cfg = rvu_read64(rvu, blkaddr, 600 NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); 601 rvu_write64(rvu, blkaddr, 602 NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg); 603 604 /* Copy TAG action */ 605 cfg = rvu_read64(rvu, blkaddr, 606 NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); 607 rvu_write64(rvu, blkaddr, 608 NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg); 609 610 /* Enable or disable */ 611 cfg = rvu_read64(rvu, blkaddr, 612 NPC_AF_MCAMEX_BANKX_CFG(src, sbank)); 613 rvu_write64(rvu, blkaddr, 614 NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg); 615 } 616 617 static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, 618 int blkaddr, int index) 619 { 620 int bank = npc_get_bank(mcam, index); 621 622 index &= (mcam->banksize - 1); 623 return rvu_read64(rvu, blkaddr, 624 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 625 } 626 627 void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, 628 int nixlf, u64 chan, u8 *mac_addr) 629 { 630 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 631 struct npc_install_flow_req req = { 0 }; 632 struct npc_install_flow_rsp rsp = { 0 }; 633 struct npc_mcam *mcam = &rvu->hw->mcam; 634 struct nix_rx_action action; 635 int blkaddr, index; 636 637 /* AF's VFs work in promiscuous mode */ 638 if (is_afvf(pcifunc)) 639 return; 640 641 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 642 if (blkaddr < 0) 643 return; 644 645 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 646 nixlf, NIXLF_UCAST_ENTRY); 647 648 /* Don't change the action if entry is already enabled 649 * Otherwise RSS action may get overwritten. 650 */ 651 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { 652 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 653 blkaddr, index); 654 } else { 655 *(u64 *)&action = 0x00; 656 action.op = NIX_RX_ACTIONOP_UCAST; 657 action.pf_func = pcifunc; 658 } 659 660 req.default_rule = 1; 661 ether_addr_copy(req.packet.dmac, mac_addr); 662 eth_broadcast_addr((u8 *)&req.mask.dmac); 663 req.features = BIT_ULL(NPC_DMAC); 664 req.channel = chan; 665 req.chan_mask = 0xFFFU; 666 req.intf = pfvf->nix_rx_intf; 667 req.op = action.op; 668 req.hdr.pcifunc = 0; /* AF is requester */ 669 req.vf = action.pf_func; 670 req.index = action.index; 671 req.match_id = action.match_id; 672 req.flow_key_alg = action.flow_key_alg; 673 674 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 675 } 676 677 void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, 678 int nixlf, u64 chan, u8 chan_cnt) 679 { 680 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 681 struct npc_install_flow_req req = { 0 }; 682 struct npc_install_flow_rsp rsp = { 0 }; 683 struct npc_mcam *mcam = &rvu->hw->mcam; 684 struct rvu_hwinfo *hw = rvu->hw; 685 int blkaddr, ucast_idx, index; 686 struct nix_rx_action action; 687 u64 relaxed_mask; 688 689 if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc)) 690 return; 691 692 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 693 if (blkaddr < 0) 694 return; 695 696 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 697 nixlf, NIXLF_PROMISC_ENTRY); 698 699 if (is_cgx_vf(rvu, pcifunc)) 700 index = npc_get_nixlf_mcam_index(mcam, 701 pcifunc & ~RVU_PFVF_FUNC_MASK, 702 nixlf, NIXLF_PROMISC_ENTRY); 703 704 /* If the corresponding PF's ucast action is RSS, 705 * use the same action for promisc also 706 */ 707 ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, 708 nixlf, NIXLF_UCAST_ENTRY); 709 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) 710 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 711 blkaddr, ucast_idx); 712 713 if (action.op != NIX_RX_ACTIONOP_RSS) { 714 *(u64 *)&action = 0x00; 715 action.op = NIX_RX_ACTIONOP_UCAST; 716 } 717 718 /* RX_ACTION set to MCAST for CGX PF's */ 719 if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && 720 is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { 721 *(u64 *)&action = 0x00; 722 action.op = NIX_RX_ACTIONOP_MCAST; 723 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 724 action.index = pfvf->promisc_mce_idx; 725 } 726 727 req.chan_mask = 0xFFFU; 728 if (chan_cnt > 1) { 729 if (!is_power_of_2(chan_cnt)) { 730 dev_err(rvu->dev, 731 "%s: channel count more than 1, must be power of 2\n", __func__); 732 return; 733 } 734 relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1, 735 ilog2(chan_cnt)); 736 req.chan_mask &= relaxed_mask; 737 } 738 739 req.channel = chan; 740 req.intf = pfvf->nix_rx_intf; 741 req.entry = index; 742 req.op = action.op; 743 req.hdr.pcifunc = 0; /* AF is requester */ 744 req.vf = pcifunc; 745 req.index = action.index; 746 req.match_id = action.match_id; 747 req.flow_key_alg = action.flow_key_alg; 748 749 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 750 } 751 752 void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, 753 int nixlf, bool enable) 754 { 755 struct npc_mcam *mcam = &rvu->hw->mcam; 756 int blkaddr, index; 757 758 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 759 if (blkaddr < 0) 760 return; 761 762 /* Get 'pcifunc' of PF device */ 763 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 764 765 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 766 nixlf, NIXLF_PROMISC_ENTRY); 767 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 768 } 769 770 void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, 771 int nixlf, u64 chan) 772 { 773 struct rvu_pfvf *pfvf; 774 struct npc_install_flow_req req = { 0 }; 775 struct npc_install_flow_rsp rsp = { 0 }; 776 struct npc_mcam *mcam = &rvu->hw->mcam; 777 struct rvu_hwinfo *hw = rvu->hw; 778 int blkaddr, index; 779 780 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 781 if (blkaddr < 0) 782 return; 783 784 /* Skip LBK VFs */ 785 if (is_afvf(pcifunc)) 786 return; 787 788 /* If pkt replication is not supported, 789 * then only PF is allowed to add a bcast match entry. 790 */ 791 if (!hw->cap.nix_rx_multicast && is_vf(pcifunc)) 792 return; 793 794 /* Get 'pcifunc' of PF device */ 795 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 796 pfvf = rvu_get_pfvf(rvu, pcifunc); 797 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 798 nixlf, NIXLF_BCAST_ENTRY); 799 800 if (!hw->cap.nix_rx_multicast) { 801 /* Early silicon doesn't support pkt replication, 802 * so install entry with UCAST action, so that PF 803 * receives all broadcast packets. 804 */ 805 req.op = NIX_RX_ACTIONOP_UCAST; 806 } else { 807 req.op = NIX_RX_ACTIONOP_MCAST; 808 req.index = pfvf->bcast_mce_idx; 809 } 810 811 eth_broadcast_addr((u8 *)&req.packet.dmac); 812 eth_broadcast_addr((u8 *)&req.mask.dmac); 813 req.features = BIT_ULL(NPC_DMAC); 814 req.channel = chan; 815 req.chan_mask = 0xFFFU; 816 req.intf = pfvf->nix_rx_intf; 817 req.entry = index; 818 req.hdr.pcifunc = 0; /* AF is requester */ 819 req.vf = pcifunc; 820 821 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 822 } 823 824 void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 825 bool enable) 826 { 827 struct npc_mcam *mcam = &rvu->hw->mcam; 828 int blkaddr, index; 829 830 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 831 if (blkaddr < 0) 832 return; 833 834 /* Get 'pcifunc' of PF device */ 835 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 836 837 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 838 NIXLF_BCAST_ENTRY); 839 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 840 } 841 842 void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 843 u64 chan) 844 { 845 struct npc_install_flow_req req = { 0 }; 846 struct npc_install_flow_rsp rsp = { 0 }; 847 struct npc_mcam *mcam = &rvu->hw->mcam; 848 struct rvu_hwinfo *hw = rvu->hw; 849 int blkaddr, ucast_idx, index; 850 u8 mac_addr[ETH_ALEN] = { 0 }; 851 struct nix_rx_action action; 852 struct rvu_pfvf *pfvf; 853 u16 vf_func; 854 855 /* Only CGX PF/VF can add allmulticast entry */ 856 if (is_afvf(pcifunc)) 857 return; 858 859 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 860 if (blkaddr < 0) 861 return; 862 863 /* Get 'pcifunc' of PF device */ 864 vf_func = pcifunc & RVU_PFVF_FUNC_MASK; 865 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 866 pfvf = rvu_get_pfvf(rvu, pcifunc); 867 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 868 nixlf, NIXLF_ALLMULTI_ENTRY); 869 870 /* If the corresponding PF's ucast action is RSS, 871 * use the same action for multicast entry also 872 */ 873 ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, 874 nixlf, NIXLF_UCAST_ENTRY); 875 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) 876 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 877 blkaddr, ucast_idx); 878 879 if (action.op != NIX_RX_ACTIONOP_RSS) { 880 *(u64 *)&action = 0x00; 881 action.op = NIX_RX_ACTIONOP_UCAST; 882 action.pf_func = pcifunc; 883 } 884 885 /* RX_ACTION set to MCAST for CGX PF's */ 886 if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) { 887 *(u64 *)&action = 0x00; 888 action.op = NIX_RX_ACTIONOP_MCAST; 889 action.index = pfvf->mcast_mce_idx; 890 } 891 892 mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */ 893 ether_addr_copy(req.packet.dmac, mac_addr); 894 ether_addr_copy(req.mask.dmac, mac_addr); 895 req.features = BIT_ULL(NPC_DMAC); 896 897 /* For cn10k the upper two bits of the channel number are 898 * cpt channel number. with masking out these bits in the 899 * mcam entry, same entry used for NIX will allow packets 900 * received from cpt for parsing. 901 */ 902 if (!is_rvu_otx2(rvu)) 903 req.chan_mask = NIX_CHAN_CPT_X2P_MASK; 904 else 905 req.chan_mask = 0xFFFU; 906 907 req.channel = chan; 908 req.intf = pfvf->nix_rx_intf; 909 req.entry = index; 910 req.op = action.op; 911 req.hdr.pcifunc = 0; /* AF is requester */ 912 req.vf = pcifunc | vf_func; 913 req.index = action.index; 914 req.match_id = action.match_id; 915 req.flow_key_alg = action.flow_key_alg; 916 917 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 918 } 919 920 void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 921 bool enable) 922 { 923 struct npc_mcam *mcam = &rvu->hw->mcam; 924 int blkaddr, index; 925 926 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 927 if (blkaddr < 0) 928 return; 929 930 /* Get 'pcifunc' of PF device */ 931 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 932 933 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 934 NIXLF_ALLMULTI_ENTRY); 935 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 936 } 937 938 static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, 939 int blkaddr, u16 pcifunc, u64 rx_action) 940 { 941 int actindex, index, bank; 942 bool enable; 943 944 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 945 return; 946 947 mutex_lock(&mcam->lock); 948 for (index = 0; index < mcam->bmap_entries; index++) { 949 if (mcam->entry2target_pffunc[index] == pcifunc) { 950 bank = npc_get_bank(mcam, index); 951 actindex = index; 952 index &= (mcam->banksize - 1); 953 954 /* read vf flow entry enable status */ 955 enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, 956 actindex); 957 /* disable before mcam entry update */ 958 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, 959 false); 960 /* update 'action' */ 961 rvu_write64(rvu, blkaddr, 962 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), 963 rx_action); 964 if (enable) 965 npc_enable_mcam_entry(rvu, mcam, blkaddr, 966 actindex, true); 967 } 968 } 969 mutex_unlock(&mcam->lock); 970 } 971 972 void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, 973 int group, int alg_idx, int mcam_index) 974 { 975 struct npc_mcam *mcam = &rvu->hw->mcam; 976 struct rvu_hwinfo *hw = rvu->hw; 977 struct nix_rx_action action; 978 int blkaddr, index, bank; 979 struct rvu_pfvf *pfvf; 980 981 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 982 if (blkaddr < 0) 983 return; 984 985 /* Check if this is for reserved default entry */ 986 if (mcam_index < 0) { 987 if (group != DEFAULT_RSS_CONTEXT_GROUP) 988 return; 989 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 990 nixlf, NIXLF_UCAST_ENTRY); 991 } else { 992 /* TODO: validate this mcam index */ 993 index = mcam_index; 994 } 995 996 if (index >= mcam->total_entries) 997 return; 998 999 bank = npc_get_bank(mcam, index); 1000 index &= (mcam->banksize - 1); 1001 1002 *(u64 *)&action = rvu_read64(rvu, blkaddr, 1003 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 1004 /* Ignore if no action was set earlier */ 1005 if (!*(u64 *)&action) 1006 return; 1007 1008 action.op = NIX_RX_ACTIONOP_RSS; 1009 action.pf_func = pcifunc; 1010 action.index = group; 1011 action.flow_key_alg = alg_idx; 1012 1013 rvu_write64(rvu, blkaddr, 1014 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); 1015 1016 /* update the VF flow rule action with the VF default entry action */ 1017 if (mcam_index < 0) 1018 npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc, 1019 *(u64 *)&action); 1020 1021 /* update the action change in default rule */ 1022 pfvf = rvu_get_pfvf(rvu, pcifunc); 1023 if (pfvf->def_ucast_rule) 1024 pfvf->def_ucast_rule->rx_action = action; 1025 1026 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 1027 nixlf, NIXLF_PROMISC_ENTRY); 1028 1029 /* If PF's promiscuous entry is enabled, 1030 * Set RSS action for that entry as well 1031 */ 1032 if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) && 1033 is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { 1034 bank = npc_get_bank(mcam, index); 1035 index &= (mcam->banksize - 1); 1036 1037 rvu_write64(rvu, blkaddr, 1038 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), 1039 *(u64 *)&action); 1040 } 1041 } 1042 1043 void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, 1044 int nixlf, int type, bool enable) 1045 { 1046 struct npc_mcam *mcam = &rvu->hw->mcam; 1047 struct rvu_hwinfo *hw = rvu->hw; 1048 struct nix_mce_list *mce_list; 1049 int index, blkaddr, mce_idx; 1050 struct rvu_pfvf *pfvf; 1051 1052 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1053 if (blkaddr < 0) 1054 return; 1055 1056 index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK, 1057 nixlf, type); 1058 1059 /* disable MCAM entry when packet replication is not supported by hw */ 1060 if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) { 1061 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 1062 return; 1063 } 1064 1065 /* return incase mce list is not enabled */ 1066 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 1067 if (hw->cap.nix_rx_multicast && is_vf(pcifunc) && 1068 type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list) 1069 return; 1070 1071 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 1072 1073 nix_update_mce_list(rvu, pcifunc, mce_list, 1074 mce_idx, index, enable); 1075 if (enable) 1076 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 1077 } 1078 1079 static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, 1080 int nixlf, bool enable) 1081 { 1082 struct npc_mcam *mcam = &rvu->hw->mcam; 1083 int index, blkaddr; 1084 1085 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1086 if (blkaddr < 0) 1087 return; 1088 1089 /* Ucast MCAM match entry of this PF/VF */ 1090 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 1091 nixlf, NIXLF_UCAST_ENTRY); 1092 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 1093 1094 /* Nothing to do for VFs, on platforms where pkt replication 1095 * is not supported 1096 */ 1097 if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast) 1098 return; 1099 1100 /* add/delete pf_func to broadcast MCE list */ 1101 npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, 1102 NIXLF_BCAST_ENTRY, enable); 1103 } 1104 1105 void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1106 { 1107 npc_enadis_default_entries(rvu, pcifunc, nixlf, false); 1108 1109 /* Delete multicast and promisc MCAM entries */ 1110 npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, 1111 NIXLF_ALLMULTI_ENTRY, false); 1112 npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, 1113 NIXLF_PROMISC_ENTRY, false); 1114 } 1115 1116 void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1117 { 1118 /* Enables only broadcast match entry. Promisc/Allmulti are enabled 1119 * in set_rx_mode mbox handler. 1120 */ 1121 npc_enadis_default_entries(rvu, pcifunc, nixlf, true); 1122 } 1123 1124 void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1125 { 1126 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1127 struct npc_mcam *mcam = &rvu->hw->mcam; 1128 struct rvu_npc_mcam_rule *rule, *tmp; 1129 int blkaddr; 1130 1131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1132 if (blkaddr < 0) 1133 return; 1134 1135 mutex_lock(&mcam->lock); 1136 1137 /* Disable MCAM entries directing traffic to this 'pcifunc' */ 1138 list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { 1139 if (is_npc_intf_rx(rule->intf) && 1140 rule->rx_action.pf_func == pcifunc && 1141 rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) { 1142 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1143 rule->entry, false); 1144 rule->enable = false; 1145 /* Indicate that default rule is disabled */ 1146 if (rule->default_rule) { 1147 pfvf->def_ucast_rule = NULL; 1148 list_del(&rule->list); 1149 kfree(rule); 1150 } 1151 } 1152 } 1153 1154 mutex_unlock(&mcam->lock); 1155 1156 npc_mcam_disable_flows(rvu, pcifunc); 1157 1158 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1159 } 1160 1161 void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1162 { 1163 struct npc_mcam *mcam = &rvu->hw->mcam; 1164 struct rvu_npc_mcam_rule *rule, *tmp; 1165 int blkaddr; 1166 1167 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1168 if (blkaddr < 0) 1169 return; 1170 1171 mutex_lock(&mcam->lock); 1172 1173 /* Free all MCAM entries owned by this 'pcifunc' */ 1174 npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); 1175 1176 /* Free all MCAM counters owned by this 'pcifunc' */ 1177 npc_mcam_free_all_counters(rvu, mcam, pcifunc); 1178 1179 /* Delete MCAM entries owned by this 'pcifunc' */ 1180 list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { 1181 if (rule->owner == pcifunc && !rule->default_rule) { 1182 list_del(&rule->list); 1183 kfree(rule); 1184 } 1185 } 1186 1187 mutex_unlock(&mcam->lock); 1188 1189 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1190 } 1191 1192 #define SET_KEX_LD(intf, lid, ltype, ld, cfg) \ 1193 rvu_write64(rvu, blkaddr, \ 1194 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg) 1195 1196 #define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \ 1197 rvu_write64(rvu, blkaddr, \ 1198 NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) 1199 1200 static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr, 1201 struct npc_mcam_kex *mkex, u8 intf) 1202 { 1203 int lid, lt, ld, fl; 1204 1205 if (is_npc_intf_tx(intf)) 1206 return; 1207 1208 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 1209 mkex->keyx_cfg[NIX_INTF_RX]); 1210 1211 /* Program LDATA */ 1212 for (lid = 0; lid < NPC_MAX_LID; lid++) { 1213 for (lt = 0; lt < NPC_MAX_LT; lt++) { 1214 for (ld = 0; ld < NPC_MAX_LD; ld++) 1215 SET_KEX_LD(intf, lid, lt, ld, 1216 mkex->intf_lid_lt_ld[NIX_INTF_RX] 1217 [lid][lt][ld]); 1218 } 1219 } 1220 /* Program LFLAGS */ 1221 for (ld = 0; ld < NPC_MAX_LD; ld++) { 1222 for (fl = 0; fl < NPC_MAX_LFL; fl++) 1223 SET_KEX_LDFLAGS(intf, ld, fl, 1224 mkex->intf_ld_flags[NIX_INTF_RX] 1225 [ld][fl]); 1226 } 1227 } 1228 1229 static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr, 1230 struct npc_mcam_kex *mkex, u8 intf) 1231 { 1232 int lid, lt, ld, fl; 1233 1234 if (is_npc_intf_rx(intf)) 1235 return; 1236 1237 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 1238 mkex->keyx_cfg[NIX_INTF_TX]); 1239 1240 /* Program LDATA */ 1241 for (lid = 0; lid < NPC_MAX_LID; lid++) { 1242 for (lt = 0; lt < NPC_MAX_LT; lt++) { 1243 for (ld = 0; ld < NPC_MAX_LD; ld++) 1244 SET_KEX_LD(intf, lid, lt, ld, 1245 mkex->intf_lid_lt_ld[NIX_INTF_TX] 1246 [lid][lt][ld]); 1247 } 1248 } 1249 /* Program LFLAGS */ 1250 for (ld = 0; ld < NPC_MAX_LD; ld++) { 1251 for (fl = 0; fl < NPC_MAX_LFL; fl++) 1252 SET_KEX_LDFLAGS(intf, ld, fl, 1253 mkex->intf_ld_flags[NIX_INTF_TX] 1254 [ld][fl]); 1255 } 1256 } 1257 1258 static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr, 1259 struct npc_mcam_kex *mkex) 1260 { 1261 struct rvu_hwinfo *hw = rvu->hw; 1262 u8 intf; 1263 int ld; 1264 1265 for (ld = 0; ld < NPC_MAX_LD; ld++) 1266 rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld), 1267 mkex->kex_ld_flags[ld]); 1268 1269 for (intf = 0; intf < hw->npc_intfs; intf++) { 1270 npc_program_mkex_rx(rvu, blkaddr, mkex, intf); 1271 npc_program_mkex_tx(rvu, blkaddr, mkex, intf); 1272 } 1273 } 1274 1275 static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr, 1276 u64 *size) 1277 { 1278 u64 prfl_addr, prfl_sz; 1279 1280 if (!rvu->fwdata) 1281 return -EINVAL; 1282 1283 prfl_addr = rvu->fwdata->mcam_addr; 1284 prfl_sz = rvu->fwdata->mcam_sz; 1285 1286 if (!prfl_addr || !prfl_sz) 1287 return -EINVAL; 1288 1289 *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz); 1290 if (!(*prfl_img_addr)) 1291 return -ENOMEM; 1292 1293 *size = prfl_sz; 1294 1295 return 0; 1296 } 1297 1298 /* strtoull of "mkexprof" with base:36 */ 1299 #define MKEX_END_SIGN 0xdeadbeef 1300 1301 static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr, 1302 const char *mkex_profile) 1303 { 1304 struct device *dev = &rvu->pdev->dev; 1305 struct npc_mcam_kex *mcam_kex; 1306 void __iomem *mkex_prfl_addr = NULL; 1307 u64 prfl_sz; 1308 int ret; 1309 1310 /* If user not selected mkex profile */ 1311 if (rvu->kpu_fwdata_sz || 1312 !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN)) 1313 goto program_mkex; 1314 1315 /* Setting up the mapping for mkex profile image */ 1316 ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz); 1317 if (ret < 0) 1318 goto program_mkex; 1319 1320 mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr; 1321 1322 while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) { 1323 /* Compare with mkex mod_param name string */ 1324 if (mcam_kex->mkex_sign == MKEX_SIGN && 1325 !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) { 1326 /* Due to an errata (35786) in A0/B0 pass silicon, 1327 * parse nibble enable configuration has to be 1328 * identical for both Rx and Tx interfaces. 1329 */ 1330 if (!is_rvu_96xx_B0(rvu) || 1331 mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX]) 1332 rvu->kpu.mkex = mcam_kex; 1333 goto program_mkex; 1334 } 1335 1336 mcam_kex++; 1337 prfl_sz -= sizeof(struct npc_mcam_kex); 1338 } 1339 dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile); 1340 1341 program_mkex: 1342 dev_info(rvu->dev, "Using %s mkex profile\n", rvu->kpu.mkex->name); 1343 /* Program selected mkex profile */ 1344 npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex); 1345 if (mkex_prfl_addr) 1346 iounmap(mkex_prfl_addr); 1347 } 1348 1349 static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, 1350 const struct npc_kpu_profile_action *kpuaction, 1351 int kpu, int entry, bool pkind) 1352 { 1353 struct npc_kpu_action0 action0 = {0}; 1354 struct npc_kpu_action1 action1 = {0}; 1355 u64 reg; 1356 1357 action1.errlev = kpuaction->errlev; 1358 action1.errcode = kpuaction->errcode; 1359 action1.dp0_offset = kpuaction->dp0_offset; 1360 action1.dp1_offset = kpuaction->dp1_offset; 1361 action1.dp2_offset = kpuaction->dp2_offset; 1362 1363 if (pkind) 1364 reg = NPC_AF_PKINDX_ACTION1(entry); 1365 else 1366 reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry); 1367 1368 rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1); 1369 1370 action0.byp_count = kpuaction->bypass_count; 1371 action0.capture_ena = kpuaction->cap_ena; 1372 action0.parse_done = kpuaction->parse_done; 1373 action0.next_state = kpuaction->next_state; 1374 action0.capture_lid = kpuaction->lid; 1375 action0.capture_ltype = kpuaction->ltype; 1376 action0.capture_flags = kpuaction->flags; 1377 action0.ptr_advance = kpuaction->ptr_advance; 1378 action0.var_len_offset = kpuaction->offset; 1379 action0.var_len_mask = kpuaction->mask; 1380 action0.var_len_right = kpuaction->right; 1381 action0.var_len_shift = kpuaction->shift; 1382 1383 if (pkind) 1384 reg = NPC_AF_PKINDX_ACTION0(entry); 1385 else 1386 reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry); 1387 1388 rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0); 1389 } 1390 1391 static void npc_config_kpucam(struct rvu *rvu, int blkaddr, 1392 const struct npc_kpu_profile_cam *kpucam, 1393 int kpu, int entry) 1394 { 1395 struct npc_kpu_cam cam0 = {0}; 1396 struct npc_kpu_cam cam1 = {0}; 1397 1398 cam1.state = kpucam->state & kpucam->state_mask; 1399 cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask; 1400 cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask; 1401 cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask; 1402 1403 cam0.state = ~kpucam->state & kpucam->state_mask; 1404 cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask; 1405 cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask; 1406 cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask; 1407 1408 rvu_write64(rvu, blkaddr, 1409 NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0); 1410 rvu_write64(rvu, blkaddr, 1411 NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1); 1412 } 1413 1414 static inline u64 enable_mask(int count) 1415 { 1416 return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL)); 1417 } 1418 1419 static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, 1420 const struct npc_kpu_profile *profile) 1421 { 1422 int entry, num_entries, max_entries; 1423 u64 entry_mask; 1424 1425 if (profile->cam_entries != profile->action_entries) { 1426 dev_err(rvu->dev, 1427 "KPU%d: CAM and action entries [%d != %d] not equal\n", 1428 kpu, profile->cam_entries, profile->action_entries); 1429 } 1430 1431 max_entries = rvu->hw->npc_kpu_entries; 1432 1433 /* Program CAM match entries for previous KPU extracted data */ 1434 num_entries = min_t(int, profile->cam_entries, max_entries); 1435 for (entry = 0; entry < num_entries; entry++) 1436 npc_config_kpucam(rvu, blkaddr, 1437 &profile->cam[entry], kpu, entry); 1438 1439 /* Program this KPU's actions */ 1440 num_entries = min_t(int, profile->action_entries, max_entries); 1441 for (entry = 0; entry < num_entries; entry++) 1442 npc_config_kpuaction(rvu, blkaddr, &profile->action[entry], 1443 kpu, entry, false); 1444 1445 /* Enable all programmed entries */ 1446 num_entries = min_t(int, profile->action_entries, profile->cam_entries); 1447 entry_mask = enable_mask(num_entries); 1448 /* Disable first KPU_MAX_CST_ENT entries for built-in profile */ 1449 if (!rvu->kpu.custom) 1450 entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0); 1451 rvu_write64(rvu, blkaddr, 1452 NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask); 1453 if (num_entries > 64) { 1454 rvu_write64(rvu, blkaddr, 1455 NPC_AF_KPUX_ENTRY_DISX(kpu, 1), 1456 enable_mask(num_entries - 64)); 1457 } 1458 1459 /* Enable this KPU */ 1460 rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01); 1461 } 1462 1463 static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile) 1464 { 1465 profile->custom = 0; 1466 profile->name = def_pfl_name; 1467 profile->version = NPC_KPU_PROFILE_VER; 1468 profile->ikpu = ikpu_action_entries; 1469 profile->pkinds = ARRAY_SIZE(ikpu_action_entries); 1470 profile->kpu = npc_kpu_profiles; 1471 profile->kpus = ARRAY_SIZE(npc_kpu_profiles); 1472 profile->lt_def = &npc_lt_defaults; 1473 profile->mkex = &npc_mkex_default; 1474 1475 return 0; 1476 } 1477 1478 static int npc_apply_custom_kpu(struct rvu *rvu, 1479 struct npc_kpu_profile_adapter *profile) 1480 { 1481 size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0; 1482 struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata; 1483 struct npc_kpu_profile_action *action; 1484 struct npc_kpu_profile_cam *cam; 1485 struct npc_kpu_fwdata *fw_kpu; 1486 int entries; 1487 u16 kpu, entry; 1488 1489 if (rvu->kpu_fwdata_sz < hdr_sz) { 1490 dev_warn(rvu->dev, "Invalid KPU profile size\n"); 1491 return -EINVAL; 1492 } 1493 if (le64_to_cpu(fw->signature) != KPU_SIGN) { 1494 dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n", 1495 fw->signature); 1496 return -EINVAL; 1497 } 1498 /* Verify if the using known profile structure */ 1499 if (NPC_KPU_VER_MAJ(profile->version) > 1500 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) { 1501 dev_warn(rvu->dev, "Not supported Major version: %d > %d\n", 1502 NPC_KPU_VER_MAJ(profile->version), 1503 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)); 1504 return -EINVAL; 1505 } 1506 /* Verify if profile is aligned with the required kernel changes */ 1507 if (NPC_KPU_VER_MIN(profile->version) < 1508 NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) { 1509 dev_warn(rvu->dev, 1510 "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n", 1511 NPC_KPU_VER_MAJ(profile->version), 1512 NPC_KPU_VER_MIN(profile->version), 1513 NPC_KPU_VER_PATCH(profile->version), 1514 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER), 1515 NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER), 1516 NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER)); 1517 return -EINVAL; 1518 } 1519 /* Verify if profile fits the HW */ 1520 if (fw->kpus > profile->kpus) { 1521 dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus, 1522 profile->kpus); 1523 return -EINVAL; 1524 } 1525 1526 profile->custom = 1; 1527 profile->name = fw->name; 1528 profile->version = le64_to_cpu(fw->version); 1529 profile->mkex = &fw->mkex; 1530 profile->lt_def = &fw->lt_def; 1531 1532 for (kpu = 0; kpu < fw->kpus; kpu++) { 1533 fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset); 1534 if (fw_kpu->entries > KPU_MAX_CST_ENT) 1535 dev_warn(rvu->dev, 1536 "Too many custom entries on KPU%d: %d > %d\n", 1537 kpu, fw_kpu->entries, KPU_MAX_CST_ENT); 1538 entries = min(fw_kpu->entries, KPU_MAX_CST_ENT); 1539 cam = (struct npc_kpu_profile_cam *)fw_kpu->data; 1540 offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam); 1541 action = (struct npc_kpu_profile_action *)(fw->data + offset); 1542 offset += fw_kpu->entries * sizeof(*action); 1543 if (rvu->kpu_fwdata_sz < hdr_sz + offset) { 1544 dev_warn(rvu->dev, 1545 "Profile size mismatch on KPU%i parsing.\n", 1546 kpu + 1); 1547 return -EINVAL; 1548 } 1549 for (entry = 0; entry < entries; entry++) { 1550 profile->kpu[kpu].cam[entry] = cam[entry]; 1551 profile->kpu[kpu].action[entry] = action[entry]; 1552 } 1553 } 1554 1555 return 0; 1556 } 1557 1558 static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr, 1559 u64 prfl_sz, const char *kpu_profile) 1560 { 1561 struct npc_kpu_profile_fwdata *kpu_data = NULL; 1562 int rc = -EINVAL; 1563 1564 kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr; 1565 if (le64_to_cpu(kpu_data->signature) == KPU_SIGN && 1566 !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) { 1567 dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n", 1568 kpu_profile); 1569 rvu->kpu_fwdata = kpu_data; 1570 rvu->kpu_fwdata_sz = prfl_sz; 1571 rvu->kpu_prfl_addr = prfl_addr; 1572 rc = 0; 1573 } 1574 1575 return rc; 1576 } 1577 1578 static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz, 1579 const char *kpu_profile) 1580 { 1581 struct npc_coalesced_kpu_prfl *img_data = NULL; 1582 int i = 0, rc = -EINVAL; 1583 void __iomem *kpu_prfl_addr; 1584 u16 offset; 1585 1586 img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr; 1587 if (le64_to_cpu(img_data->signature) == KPU_SIGN && 1588 !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) { 1589 /* Loaded profile is a single KPU profile. */ 1590 rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr, 1591 prfl_sz, kpu_profile); 1592 goto done; 1593 } 1594 1595 /* Loaded profile is coalesced image, offset of first KPU profile.*/ 1596 offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) + 1597 (img_data->num_prfl * sizeof(uint16_t)); 1598 /* Check if mapped image is coalesced image. */ 1599 while (i < img_data->num_prfl) { 1600 /* Profile image offsets are rounded up to next 8 multiple.*/ 1601 offset = ALIGN_8B_CEIL(offset); 1602 kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr + 1603 offset); 1604 rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr, 1605 img_data->prfl_sz[i], kpu_profile); 1606 if (!rc) 1607 break; 1608 /* Calculating offset of profile image based on profile size.*/ 1609 offset += img_data->prfl_sz[i]; 1610 i++; 1611 } 1612 done: 1613 return rc; 1614 } 1615 1616 static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile) 1617 { 1618 int ret = -EINVAL; 1619 u64 prfl_sz; 1620 1621 /* Setting up the mapping for NPC profile image */ 1622 ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz); 1623 if (ret < 0) 1624 goto done; 1625 1626 /* Detect if profile is coalesced or single KPU profile and load */ 1627 ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile); 1628 if (ret == 0) 1629 goto done; 1630 1631 /* Cleaning up if KPU profile image from fwdata is not valid. */ 1632 if (rvu->kpu_prfl_addr) { 1633 iounmap(rvu->kpu_prfl_addr); 1634 rvu->kpu_prfl_addr = NULL; 1635 rvu->kpu_fwdata_sz = 0; 1636 rvu->kpu_fwdata = NULL; 1637 } 1638 1639 done: 1640 return ret; 1641 } 1642 1643 static void npc_load_kpu_profile(struct rvu *rvu) 1644 { 1645 struct npc_kpu_profile_adapter *profile = &rvu->kpu; 1646 const char *kpu_profile = rvu->kpu_pfl_name; 1647 const struct firmware *fw = NULL; 1648 bool retry_fwdb = false; 1649 1650 /* If user not specified profile customization */ 1651 if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN)) 1652 goto revert_to_default; 1653 /* First prepare default KPU, then we'll customize top entries. */ 1654 npc_prepare_default_kpu(profile); 1655 1656 /* Order of preceedence for load loading NPC profile (high to low) 1657 * Firmware binary in filesystem. 1658 * Firmware database method. 1659 * Default KPU profile. 1660 */ 1661 if (!request_firmware(&fw, kpu_profile, rvu->dev)) { 1662 dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n", 1663 kpu_profile); 1664 rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL); 1665 if (rvu->kpu_fwdata) { 1666 memcpy(rvu->kpu_fwdata, fw->data, fw->size); 1667 rvu->kpu_fwdata_sz = fw->size; 1668 } 1669 release_firmware(fw); 1670 retry_fwdb = true; 1671 goto program_kpu; 1672 } 1673 1674 load_image_fwdb: 1675 /* Loading the KPU profile using firmware database */ 1676 if (npc_load_kpu_profile_fwdb(rvu, kpu_profile)) 1677 goto revert_to_default; 1678 1679 program_kpu: 1680 /* Apply profile customization if firmware was loaded. */ 1681 if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) { 1682 /* If image from firmware filesystem fails to load or invalid 1683 * retry with firmware database method. 1684 */ 1685 if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) { 1686 /* Loading image from firmware database failed. */ 1687 if (rvu->kpu_prfl_addr) { 1688 iounmap(rvu->kpu_prfl_addr); 1689 rvu->kpu_prfl_addr = NULL; 1690 } else { 1691 kfree(rvu->kpu_fwdata); 1692 } 1693 rvu->kpu_fwdata = NULL; 1694 rvu->kpu_fwdata_sz = 0; 1695 if (retry_fwdb) { 1696 retry_fwdb = false; 1697 goto load_image_fwdb; 1698 } 1699 } 1700 1701 dev_warn(rvu->dev, 1702 "Can't load KPU profile %s. Using default.\n", 1703 kpu_profile); 1704 kfree(rvu->kpu_fwdata); 1705 rvu->kpu_fwdata = NULL; 1706 goto revert_to_default; 1707 } 1708 1709 dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n", 1710 profile->name, NPC_KPU_VER_MAJ(profile->version), 1711 NPC_KPU_VER_MIN(profile->version), 1712 NPC_KPU_VER_PATCH(profile->version)); 1713 1714 return; 1715 1716 revert_to_default: 1717 npc_prepare_default_kpu(profile); 1718 } 1719 1720 static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) 1721 { 1722 struct rvu_hwinfo *hw = rvu->hw; 1723 int num_pkinds, num_kpus, idx; 1724 1725 /* Disable all KPUs and their entries */ 1726 for (idx = 0; idx < hw->npc_kpus; idx++) { 1727 rvu_write64(rvu, blkaddr, 1728 NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL); 1729 rvu_write64(rvu, blkaddr, 1730 NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL); 1731 rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00); 1732 } 1733 1734 /* Load and customize KPU profile. */ 1735 npc_load_kpu_profile(rvu); 1736 1737 /* First program IKPU profile i.e PKIND configs. 1738 * Check HW max count to avoid configuring junk or 1739 * writing to unsupported CSR addresses. 1740 */ 1741 num_pkinds = rvu->kpu.pkinds; 1742 num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds); 1743 1744 for (idx = 0; idx < num_pkinds; idx++) 1745 npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true); 1746 1747 /* Program KPU CAM and Action profiles */ 1748 num_kpus = rvu->kpu.kpus; 1749 num_kpus = min_t(int, hw->npc_kpus, num_kpus); 1750 1751 for (idx = 0; idx < num_kpus; idx++) 1752 npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]); 1753 } 1754 1755 static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) 1756 { 1757 int nixlf_count = rvu_get_nixlf_count(rvu); 1758 struct npc_mcam *mcam = &rvu->hw->mcam; 1759 int rsvd, err; 1760 u16 index; 1761 int cntr; 1762 u64 cfg; 1763 1764 /* Actual number of MCAM entries vary by entry size */ 1765 cfg = (rvu_read64(rvu, blkaddr, 1766 NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07; 1767 mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize; 1768 mcam->keysize = cfg; 1769 1770 /* Number of banks combined per MCAM entry */ 1771 if (cfg == NPC_MCAM_KEY_X4) 1772 mcam->banks_per_entry = 4; 1773 else if (cfg == NPC_MCAM_KEY_X2) 1774 mcam->banks_per_entry = 2; 1775 else 1776 mcam->banks_per_entry = 1; 1777 1778 /* Reserve one MCAM entry for each of the NIX LF to 1779 * guarantee space to install default matching DMAC rule. 1780 * Also reserve 2 MCAM entries for each PF for default 1781 * channel based matching or 'bcast & promisc' matching to 1782 * support BCAST and PROMISC modes of operation for PFs. 1783 * PF0 is excluded. 1784 */ 1785 rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) + 1786 ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF); 1787 if (mcam->total_entries <= rsvd) { 1788 dev_warn(rvu->dev, 1789 "Insufficient NPC MCAM size %d for pkt I/O, exiting\n", 1790 mcam->total_entries); 1791 return -ENOMEM; 1792 } 1793 1794 mcam->bmap_entries = mcam->total_entries - rsvd; 1795 mcam->nixlf_offset = mcam->bmap_entries; 1796 mcam->pf_offset = mcam->nixlf_offset + nixlf_count; 1797 1798 /* Allocate bitmaps for managing MCAM entries */ 1799 mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries), 1800 sizeof(long), GFP_KERNEL); 1801 if (!mcam->bmap) 1802 return -ENOMEM; 1803 1804 mcam->bmap_reverse = devm_kcalloc(rvu->dev, 1805 BITS_TO_LONGS(mcam->bmap_entries), 1806 sizeof(long), GFP_KERNEL); 1807 if (!mcam->bmap_reverse) 1808 return -ENOMEM; 1809 1810 mcam->bmap_fcnt = mcam->bmap_entries; 1811 1812 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 1813 mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, 1814 sizeof(u16), GFP_KERNEL); 1815 if (!mcam->entry2pfvf_map) 1816 return -ENOMEM; 1817 1818 /* Reserve 1/8th of MCAM entries at the bottom for low priority 1819 * allocations and another 1/8th at the top for high priority 1820 * allocations. 1821 */ 1822 mcam->lprio_count = mcam->bmap_entries / 8; 1823 if (mcam->lprio_count > BITS_PER_LONG) 1824 mcam->lprio_count = round_down(mcam->lprio_count, 1825 BITS_PER_LONG); 1826 mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count; 1827 mcam->hprio_count = mcam->lprio_count; 1828 mcam->hprio_end = mcam->hprio_count; 1829 1830 1831 /* Allocate bitmap for managing MCAM counters and memory 1832 * for saving counter to RVU PFFUNC allocation mapping. 1833 */ 1834 err = rvu_alloc_bitmap(&mcam->counters); 1835 if (err) 1836 return err; 1837 1838 mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max, 1839 sizeof(u16), GFP_KERNEL); 1840 if (!mcam->cntr2pfvf_map) 1841 goto free_mem; 1842 1843 /* Alloc memory for MCAM entry to counter mapping and for tracking 1844 * counter's reference count. 1845 */ 1846 mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, 1847 sizeof(u16), GFP_KERNEL); 1848 if (!mcam->entry2cntr_map) 1849 goto free_mem; 1850 1851 mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max, 1852 sizeof(u16), GFP_KERNEL); 1853 if (!mcam->cntr_refcnt) 1854 goto free_mem; 1855 1856 /* Alloc memory for saving target device of mcam rule */ 1857 mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries, 1858 sizeof(u16), GFP_KERNEL); 1859 if (!mcam->entry2target_pffunc) 1860 goto free_mem; 1861 1862 for (index = 0; index < mcam->bmap_entries; index++) { 1863 mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; 1864 mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; 1865 } 1866 1867 for (cntr = 0; cntr < mcam->counters.max; cntr++) 1868 mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; 1869 1870 mutex_init(&mcam->lock); 1871 1872 return 0; 1873 1874 free_mem: 1875 kfree(mcam->counters.bmap); 1876 return -ENOMEM; 1877 } 1878 1879 static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr) 1880 { 1881 struct npc_pkind *pkind = &rvu->hw->pkind; 1882 struct npc_mcam *mcam = &rvu->hw->mcam; 1883 struct rvu_hwinfo *hw = rvu->hw; 1884 u64 npc_const, npc_const1; 1885 u64 npc_const2 = 0; 1886 1887 npc_const = rvu_read64(rvu, blkaddr, NPC_AF_CONST); 1888 npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1); 1889 if (npc_const1 & BIT_ULL(63)) 1890 npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2); 1891 1892 pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT; 1893 hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL; 1894 hw->npc_kpu_entries = npc_const1 & 0xFFFULL; 1895 hw->npc_kpus = (npc_const >> 8) & 0x1FULL; 1896 hw->npc_intfs = npc_const & 0xFULL; 1897 hw->npc_counters = (npc_const >> 48) & 0xFFFFULL; 1898 1899 mcam->banks = (npc_const >> 44) & 0xFULL; 1900 mcam->banksize = (npc_const >> 28) & 0xFFFFULL; 1901 /* Extended set */ 1902 if (npc_const2) { 1903 hw->npc_ext_set = true; 1904 hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL; 1905 mcam->banksize = npc_const2 & 0xFFFFULL; 1906 } 1907 1908 mcam->counters.max = hw->npc_counters; 1909 } 1910 1911 static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr) 1912 { 1913 struct npc_mcam *mcam = &rvu->hw->mcam; 1914 struct rvu_hwinfo *hw = rvu->hw; 1915 u64 nibble_ena, rx_kex, tx_kex; 1916 u8 intf; 1917 1918 /* Reserve last counter for MCAM RX miss action which is set to 1919 * drop packet. This way we will know how many pkts didn't match 1920 * any MCAM entry. 1921 */ 1922 mcam->counters.max--; 1923 mcam->rx_miss_act_cntr = mcam->counters.max; 1924 1925 rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX]; 1926 tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX]; 1927 nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex); 1928 1929 nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena); 1930 if (nibble_ena) { 1931 tx_kex &= ~NPC_PARSE_NIBBLE; 1932 tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena); 1933 npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex; 1934 } 1935 1936 /* Configure RX interfaces */ 1937 for (intf = 0; intf < hw->npc_intfs; intf++) { 1938 if (is_npc_intf_tx(intf)) 1939 continue; 1940 1941 /* Set RX MCAM search key size. LA..LE (ltype only) + Channel */ 1942 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 1943 rx_kex); 1944 1945 /* If MCAM lookup doesn't result in a match, drop the received 1946 * packet. And map this action to a counter to count dropped 1947 * packets. 1948 */ 1949 rvu_write64(rvu, blkaddr, 1950 NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP); 1951 1952 /* NPC_AF_INTFX_MISS_STAT_ACT[14:12] - counter[11:9] 1953 * NPC_AF_INTFX_MISS_STAT_ACT[8:0] - counter[8:0] 1954 */ 1955 rvu_write64(rvu, blkaddr, 1956 NPC_AF_INTFX_MISS_STAT_ACT(intf), 1957 ((mcam->rx_miss_act_cntr >> 9) << 12) | 1958 BIT_ULL(9) | mcam->rx_miss_act_cntr); 1959 } 1960 1961 /* Configure TX interfaces */ 1962 for (intf = 0; intf < hw->npc_intfs; intf++) { 1963 if (is_npc_intf_rx(intf)) 1964 continue; 1965 1966 /* Extract Ltypes LID_LA to LID_LE */ 1967 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 1968 tx_kex); 1969 1970 /* Set TX miss action to UCAST_DEFAULT i.e 1971 * transmit the packet on NIX LF SQ's default channel. 1972 */ 1973 rvu_write64(rvu, blkaddr, 1974 NPC_AF_INTFX_MISS_ACT(intf), 1975 NIX_TX_ACTIONOP_UCAST_DEFAULT); 1976 } 1977 } 1978 1979 int rvu_npc_init(struct rvu *rvu) 1980 { 1981 struct npc_kpu_profile_adapter *kpu = &rvu->kpu; 1982 struct npc_pkind *pkind = &rvu->hw->pkind; 1983 struct npc_mcam *mcam = &rvu->hw->mcam; 1984 int blkaddr, entry, bank, err; 1985 1986 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1987 if (blkaddr < 0) { 1988 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1989 return -ENODEV; 1990 } 1991 1992 rvu_npc_hw_init(rvu, blkaddr); 1993 1994 /* First disable all MCAM entries, to stop traffic towards NIXLFs */ 1995 for (bank = 0; bank < mcam->banks; bank++) { 1996 for (entry = 0; entry < mcam->banksize; entry++) 1997 rvu_write64(rvu, blkaddr, 1998 NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0); 1999 } 2000 2001 err = rvu_alloc_bitmap(&pkind->rsrc); 2002 if (err) 2003 return err; 2004 /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0', 2005 * no need to configure PKIND for all LBKs separately. 2006 */ 2007 rvu_alloc_rsrc(&pkind->rsrc); 2008 2009 /* Allocate mem for pkind to PF and channel mapping info */ 2010 pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max, 2011 sizeof(u32), GFP_KERNEL); 2012 if (!pkind->pfchan_map) 2013 return -ENOMEM; 2014 2015 /* Configure KPU profile */ 2016 npc_parser_profile_init(rvu, blkaddr); 2017 2018 /* Config Outer L2, IPv4's NPC layer info */ 2019 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2, 2020 (kpu->lt_def->pck_ol2.lid << 8) | (kpu->lt_def->pck_ol2.ltype_match << 4) | 2021 kpu->lt_def->pck_ol2.ltype_mask); 2022 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4, 2023 (kpu->lt_def->pck_oip4.lid << 8) | (kpu->lt_def->pck_oip4.ltype_match << 4) | 2024 kpu->lt_def->pck_oip4.ltype_mask); 2025 2026 /* Config Inner IPV4 NPC layer info */ 2027 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4, 2028 (kpu->lt_def->pck_iip4.lid << 8) | (kpu->lt_def->pck_iip4.ltype_match << 4) | 2029 kpu->lt_def->pck_iip4.ltype_mask); 2030 2031 /* Enable below for Rx pkts. 2032 * - Outer IPv4 header checksum validation. 2033 * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M]. 2034 * - Inner IPv4 header checksum validation. 2035 * - Set non zero checksum error code value 2036 */ 2037 rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, 2038 rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | 2039 BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) | 2040 BIT_ULL(2) | BIT_ULL(1)); 2041 2042 rvu_npc_setup_interfaces(rvu, blkaddr); 2043 2044 /* Configure MKEX profile */ 2045 npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name); 2046 2047 err = npc_mcam_rsrcs_init(rvu, blkaddr); 2048 if (err) 2049 return err; 2050 2051 err = npc_flow_steering_init(rvu, blkaddr); 2052 if (err) { 2053 dev_err(rvu->dev, 2054 "Incorrect mkex profile loaded using default mkex\n"); 2055 npc_load_mkex_profile(rvu, blkaddr, def_pfl_name); 2056 } 2057 2058 return 0; 2059 } 2060 2061 void rvu_npc_freemem(struct rvu *rvu) 2062 { 2063 struct npc_pkind *pkind = &rvu->hw->pkind; 2064 struct npc_mcam *mcam = &rvu->hw->mcam; 2065 2066 kfree(pkind->rsrc.bmap); 2067 kfree(mcam->counters.bmap); 2068 if (rvu->kpu_prfl_addr) 2069 iounmap(rvu->kpu_prfl_addr); 2070 else 2071 kfree(rvu->kpu_fwdata); 2072 mutex_destroy(&mcam->lock); 2073 } 2074 2075 void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, 2076 int blkaddr, int *alloc_cnt, 2077 int *enable_cnt) 2078 { 2079 struct npc_mcam *mcam = &rvu->hw->mcam; 2080 int entry; 2081 2082 *alloc_cnt = 0; 2083 *enable_cnt = 0; 2084 2085 for (entry = 0; entry < mcam->bmap_entries; entry++) { 2086 if (mcam->entry2pfvf_map[entry] == pcifunc) { 2087 (*alloc_cnt)++; 2088 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry)) 2089 (*enable_cnt)++; 2090 } 2091 } 2092 } 2093 2094 void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, 2095 int blkaddr, int *alloc_cnt, 2096 int *enable_cnt) 2097 { 2098 struct npc_mcam *mcam = &rvu->hw->mcam; 2099 int cntr; 2100 2101 *alloc_cnt = 0; 2102 *enable_cnt = 0; 2103 2104 for (cntr = 0; cntr < mcam->counters.max; cntr++) { 2105 if (mcam->cntr2pfvf_map[cntr] == pcifunc) { 2106 (*alloc_cnt)++; 2107 if (mcam->cntr_refcnt[cntr]) 2108 (*enable_cnt)++; 2109 } 2110 } 2111 } 2112 2113 static int npc_mcam_verify_entry(struct npc_mcam *mcam, 2114 u16 pcifunc, int entry) 2115 { 2116 /* verify AF installed entries */ 2117 if (is_pffunc_af(pcifunc)) 2118 return 0; 2119 /* Verify if entry is valid and if it is indeed 2120 * allocated to the requesting PFFUNC. 2121 */ 2122 if (entry >= mcam->bmap_entries) 2123 return NPC_MCAM_INVALID_REQ; 2124 2125 if (pcifunc != mcam->entry2pfvf_map[entry]) 2126 return NPC_MCAM_PERM_DENIED; 2127 2128 return 0; 2129 } 2130 2131 static int npc_mcam_verify_counter(struct npc_mcam *mcam, 2132 u16 pcifunc, int cntr) 2133 { 2134 /* Verify if counter is valid and if it is indeed 2135 * allocated to the requesting PFFUNC. 2136 */ 2137 if (cntr >= mcam->counters.max) 2138 return NPC_MCAM_INVALID_REQ; 2139 2140 if (pcifunc != mcam->cntr2pfvf_map[cntr]) 2141 return NPC_MCAM_PERM_DENIED; 2142 2143 return 0; 2144 } 2145 2146 static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, 2147 int blkaddr, u16 entry, u16 cntr) 2148 { 2149 u16 index = entry & (mcam->banksize - 1); 2150 u16 bank = npc_get_bank(mcam, entry); 2151 2152 /* Set mapping and increment counter's refcnt */ 2153 mcam->entry2cntr_map[entry] = cntr; 2154 mcam->cntr_refcnt[cntr]++; 2155 /* Enable stats 2156 * NPC_AF_MCAMEX_BANKX_STAT_ACT[14:12] - counter[11:9] 2157 * NPC_AF_MCAMEX_BANKX_STAT_ACT[8:0] - counter[8:0] 2158 */ 2159 rvu_write64(rvu, blkaddr, 2160 NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 2161 ((cntr >> 9) << 12) | BIT_ULL(9) | cntr); 2162 } 2163 2164 static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, 2165 struct npc_mcam *mcam, 2166 int blkaddr, u16 entry, u16 cntr) 2167 { 2168 u16 index = entry & (mcam->banksize - 1); 2169 u16 bank = npc_get_bank(mcam, entry); 2170 2171 /* Remove mapping and reduce counter's refcnt */ 2172 mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP; 2173 mcam->cntr_refcnt[cntr]--; 2174 /* Disable stats */ 2175 rvu_write64(rvu, blkaddr, 2176 NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00); 2177 } 2178 2179 /* Sets MCAM entry in bitmap as used. Update 2180 * reverse bitmap too. Should be called with 2181 * 'mcam->lock' held. 2182 */ 2183 static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index) 2184 { 2185 u16 entry, rentry; 2186 2187 entry = index; 2188 rentry = mcam->bmap_entries - index - 1; 2189 2190 __set_bit(entry, mcam->bmap); 2191 __set_bit(rentry, mcam->bmap_reverse); 2192 mcam->bmap_fcnt--; 2193 } 2194 2195 /* Sets MCAM entry in bitmap as free. Update 2196 * reverse bitmap too. Should be called with 2197 * 'mcam->lock' held. 2198 */ 2199 static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index) 2200 { 2201 u16 entry, rentry; 2202 2203 entry = index; 2204 rentry = mcam->bmap_entries - index - 1; 2205 2206 __clear_bit(entry, mcam->bmap); 2207 __clear_bit(rentry, mcam->bmap_reverse); 2208 mcam->bmap_fcnt++; 2209 } 2210 2211 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 2212 int blkaddr, u16 pcifunc) 2213 { 2214 u16 index, cntr; 2215 2216 /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */ 2217 for (index = 0; index < mcam->bmap_entries; index++) { 2218 if (mcam->entry2pfvf_map[index] == pcifunc) { 2219 mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; 2220 /* Free the entry in bitmap */ 2221 npc_mcam_clear_bit(mcam, index); 2222 /* Disable the entry */ 2223 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); 2224 2225 /* Update entry2counter mapping */ 2226 cntr = mcam->entry2cntr_map[index]; 2227 if (cntr != NPC_MCAM_INVALID_MAP) 2228 npc_unmap_mcam_entry_and_cntr(rvu, mcam, 2229 blkaddr, index, 2230 cntr); 2231 mcam->entry2target_pffunc[index] = 0x0; 2232 } 2233 } 2234 } 2235 2236 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 2237 u16 pcifunc) 2238 { 2239 u16 cntr; 2240 2241 /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */ 2242 for (cntr = 0; cntr < mcam->counters.max; cntr++) { 2243 if (mcam->cntr2pfvf_map[cntr] == pcifunc) { 2244 mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; 2245 mcam->cntr_refcnt[cntr] = 0; 2246 rvu_free_rsrc(&mcam->counters, cntr); 2247 /* This API is expected to be called after freeing 2248 * MCAM entries, which inturn will remove 2249 * 'entry to counter' mapping. 2250 * No need to do it again. 2251 */ 2252 } 2253 } 2254 } 2255 2256 /* Find area of contiguous free entries of size 'nr'. 2257 * If not found return max contiguous free entries available. 2258 */ 2259 static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, 2260 u16 nr, u16 *max_area) 2261 { 2262 u16 max_area_start = 0; 2263 u16 index, next, end; 2264 2265 *max_area = 0; 2266 2267 again: 2268 index = find_next_zero_bit(map, size, start); 2269 if (index >= size) 2270 return max_area_start; 2271 2272 end = ((index + nr) >= size) ? size : index + nr; 2273 next = find_next_bit(map, end, index); 2274 if (*max_area < (next - index)) { 2275 *max_area = next - index; 2276 max_area_start = index; 2277 } 2278 2279 if (next < end) { 2280 start = next + 1; 2281 goto again; 2282 } 2283 2284 return max_area_start; 2285 } 2286 2287 /* Find number of free MCAM entries available 2288 * within range i.e in between 'start' and 'end'. 2289 */ 2290 static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) 2291 { 2292 u16 index, next; 2293 u16 fcnt = 0; 2294 2295 again: 2296 if (start >= end) 2297 return fcnt; 2298 2299 index = find_next_zero_bit(map, end, start); 2300 if (index >= end) 2301 return fcnt; 2302 2303 next = find_next_bit(map, end, index); 2304 if (next <= end) { 2305 fcnt += next - index; 2306 start = next + 1; 2307 goto again; 2308 } 2309 2310 fcnt += end - index; 2311 return fcnt; 2312 } 2313 2314 static void 2315 npc_get_mcam_search_range_priority(struct npc_mcam *mcam, 2316 struct npc_mcam_alloc_entry_req *req, 2317 u16 *start, u16 *end, bool *reverse) 2318 { 2319 u16 fcnt; 2320 2321 if (req->priority == NPC_MCAM_HIGHER_PRIO) 2322 goto hprio; 2323 2324 /* For a low priority entry allocation 2325 * - If reference entry is not in hprio zone then 2326 * search range: ref_entry to end. 2327 * - If reference entry is in hprio zone and if 2328 * request can be accomodated in non-hprio zone then 2329 * search range: 'start of middle zone' to 'end' 2330 * - else search in reverse, so that less number of hprio 2331 * zone entries are allocated. 2332 */ 2333 2334 *reverse = false; 2335 *start = req->ref_entry + 1; 2336 *end = mcam->bmap_entries; 2337 2338 if (req->ref_entry >= mcam->hprio_end) 2339 return; 2340 2341 fcnt = npc_mcam_get_free_count(mcam->bmap, 2342 mcam->hprio_end, mcam->bmap_entries); 2343 if (fcnt > req->count) 2344 *start = mcam->hprio_end; 2345 else 2346 *reverse = true; 2347 return; 2348 2349 hprio: 2350 /* For a high priority entry allocation, search is always 2351 * in reverse to preserve hprio zone entries. 2352 * - If reference entry is not in lprio zone then 2353 * search range: 0 to ref_entry. 2354 * - If reference entry is in lprio zone and if 2355 * request can be accomodated in middle zone then 2356 * search range: 'hprio_end' to 'lprio_start' 2357 */ 2358 2359 *reverse = true; 2360 *start = 0; 2361 *end = req->ref_entry; 2362 2363 if (req->ref_entry <= mcam->lprio_start) 2364 return; 2365 2366 fcnt = npc_mcam_get_free_count(mcam->bmap, 2367 mcam->hprio_end, mcam->lprio_start); 2368 if (fcnt < req->count) 2369 return; 2370 *start = mcam->hprio_end; 2371 *end = mcam->lprio_start; 2372 } 2373 2374 static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, 2375 struct npc_mcam_alloc_entry_req *req, 2376 struct npc_mcam_alloc_entry_rsp *rsp) 2377 { 2378 u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; 2379 u16 fcnt, hp_fcnt, lp_fcnt; 2380 u16 start, end, index; 2381 int entry, next_start; 2382 bool reverse = false; 2383 unsigned long *bmap; 2384 u16 max_contig; 2385 2386 mutex_lock(&mcam->lock); 2387 2388 /* Check if there are any free entries */ 2389 if (!mcam->bmap_fcnt) { 2390 mutex_unlock(&mcam->lock); 2391 return NPC_MCAM_ALLOC_FAILED; 2392 } 2393 2394 /* MCAM entries are divided into high priority, middle and 2395 * low priority zones. Idea is to not allocate top and lower 2396 * most entries as much as possible, this is to increase 2397 * probability of honouring priority allocation requests. 2398 * 2399 * Two bitmaps are used for mcam entry management, 2400 * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'. 2401 * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'. 2402 * 2403 * Reverse bitmap is used to allocate entries 2404 * - when a higher priority entry is requested 2405 * - when available free entries are less. 2406 * Lower priority ones out of avaialble free entries are always 2407 * chosen when 'high vs low' question arises. 2408 */ 2409 2410 /* Get the search range for priority allocation request */ 2411 if (req->priority) { 2412 npc_get_mcam_search_range_priority(mcam, req, 2413 &start, &end, &reverse); 2414 goto alloc; 2415 } 2416 2417 /* Find out the search range for non-priority allocation request 2418 * 2419 * Get MCAM free entry count in middle zone. 2420 */ 2421 lp_fcnt = npc_mcam_get_free_count(mcam->bmap, 2422 mcam->lprio_start, 2423 mcam->bmap_entries); 2424 hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end); 2425 fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt; 2426 2427 /* Check if request can be accomodated in the middle zone */ 2428 if (fcnt > req->count) { 2429 start = mcam->hprio_end; 2430 end = mcam->lprio_start; 2431 } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) { 2432 /* Expand search zone from half of hprio zone to 2433 * half of lprio zone. 2434 */ 2435 start = mcam->hprio_end / 2; 2436 end = mcam->bmap_entries - (mcam->lprio_count / 2); 2437 reverse = true; 2438 } else { 2439 /* Not enough free entries, search all entries in reverse, 2440 * so that low priority ones will get used up. 2441 */ 2442 reverse = true; 2443 start = 0; 2444 end = mcam->bmap_entries; 2445 } 2446 2447 alloc: 2448 if (reverse) { 2449 bmap = mcam->bmap_reverse; 2450 start = mcam->bmap_entries - start; 2451 end = mcam->bmap_entries - end; 2452 index = start; 2453 start = end; 2454 end = index; 2455 } else { 2456 bmap = mcam->bmap; 2457 } 2458 2459 if (req->contig) { 2460 /* Allocate requested number of contiguous entries, if 2461 * unsuccessful find max contiguous entries available. 2462 */ 2463 index = npc_mcam_find_zero_area(bmap, end, start, 2464 req->count, &max_contig); 2465 rsp->count = max_contig; 2466 if (reverse) 2467 rsp->entry = mcam->bmap_entries - index - max_contig; 2468 else 2469 rsp->entry = index; 2470 } else { 2471 /* Allocate requested number of non-contiguous entries, 2472 * if unsuccessful allocate as many as possible. 2473 */ 2474 rsp->count = 0; 2475 next_start = start; 2476 for (entry = 0; entry < req->count; entry++) { 2477 index = find_next_zero_bit(bmap, end, next_start); 2478 if (index >= end) 2479 break; 2480 2481 next_start = start + (index - start) + 1; 2482 2483 /* Save the entry's index */ 2484 if (reverse) 2485 index = mcam->bmap_entries - index - 1; 2486 entry_list[entry] = index; 2487 rsp->count++; 2488 } 2489 } 2490 2491 /* If allocating requested no of entries is unsucessful, 2492 * expand the search range to full bitmap length and retry. 2493 */ 2494 if (!req->priority && (rsp->count < req->count) && 2495 ((end - start) != mcam->bmap_entries)) { 2496 reverse = true; 2497 start = 0; 2498 end = mcam->bmap_entries; 2499 goto alloc; 2500 } 2501 2502 /* For priority entry allocation requests, if allocation is 2503 * failed then expand search to max possible range and retry. 2504 */ 2505 if (req->priority && rsp->count < req->count) { 2506 if (req->priority == NPC_MCAM_LOWER_PRIO && 2507 (start != (req->ref_entry + 1))) { 2508 start = req->ref_entry + 1; 2509 end = mcam->bmap_entries; 2510 reverse = false; 2511 goto alloc; 2512 } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) && 2513 ((end - start) != req->ref_entry)) { 2514 start = 0; 2515 end = req->ref_entry; 2516 reverse = true; 2517 goto alloc; 2518 } 2519 } 2520 2521 /* Copy MCAM entry indices into mbox response entry_list. 2522 * Requester always expects indices in ascending order, so 2523 * so reverse the list if reverse bitmap is used for allocation. 2524 */ 2525 if (!req->contig && rsp->count) { 2526 index = 0; 2527 for (entry = rsp->count - 1; entry >= 0; entry--) { 2528 if (reverse) 2529 rsp->entry_list[index++] = entry_list[entry]; 2530 else 2531 rsp->entry_list[entry] = entry_list[entry]; 2532 } 2533 } 2534 2535 /* Mark the allocated entries as used and set nixlf mapping */ 2536 for (entry = 0; entry < rsp->count; entry++) { 2537 index = req->contig ? 2538 (rsp->entry + entry) : rsp->entry_list[entry]; 2539 npc_mcam_set_bit(mcam, index); 2540 mcam->entry2pfvf_map[index] = pcifunc; 2541 mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; 2542 } 2543 2544 /* Update available free count in mbox response */ 2545 rsp->free_count = mcam->bmap_fcnt; 2546 2547 mutex_unlock(&mcam->lock); 2548 return 0; 2549 } 2550 2551 int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, 2552 struct npc_mcam_alloc_entry_req *req, 2553 struct npc_mcam_alloc_entry_rsp *rsp) 2554 { 2555 struct npc_mcam *mcam = &rvu->hw->mcam; 2556 u16 pcifunc = req->hdr.pcifunc; 2557 int blkaddr; 2558 2559 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2560 if (blkaddr < 0) 2561 return NPC_MCAM_INVALID_REQ; 2562 2563 rsp->entry = NPC_MCAM_ENTRY_INVALID; 2564 rsp->free_count = 0; 2565 2566 /* Check if ref_entry is within range */ 2567 if (req->priority && req->ref_entry >= mcam->bmap_entries) { 2568 dev_err(rvu->dev, "%s: reference entry %d is out of range\n", 2569 __func__, req->ref_entry); 2570 return NPC_MCAM_INVALID_REQ; 2571 } 2572 2573 /* ref_entry can't be '0' if requested priority is high. 2574 * Can't be last entry if requested priority is low. 2575 */ 2576 if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) || 2577 ((req->ref_entry == (mcam->bmap_entries - 1)) && 2578 req->priority == NPC_MCAM_LOWER_PRIO)) 2579 return NPC_MCAM_INVALID_REQ; 2580 2581 /* Since list of allocated indices needs to be sent to requester, 2582 * max number of non-contiguous entries per mbox msg is limited. 2583 */ 2584 if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) { 2585 dev_err(rvu->dev, 2586 "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n", 2587 __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES); 2588 return NPC_MCAM_INVALID_REQ; 2589 } 2590 2591 /* Alloc request from PFFUNC with no NIXLF attached should be denied */ 2592 if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) 2593 return NPC_MCAM_ALLOC_DENIED; 2594 2595 return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); 2596 } 2597 2598 int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, 2599 struct npc_mcam_free_entry_req *req, 2600 struct msg_rsp *rsp) 2601 { 2602 struct npc_mcam *mcam = &rvu->hw->mcam; 2603 u16 pcifunc = req->hdr.pcifunc; 2604 int blkaddr, rc = 0; 2605 u16 cntr; 2606 2607 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2608 if (blkaddr < 0) 2609 return NPC_MCAM_INVALID_REQ; 2610 2611 /* Free request from PFFUNC with no NIXLF attached, ignore */ 2612 if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) 2613 return NPC_MCAM_INVALID_REQ; 2614 2615 mutex_lock(&mcam->lock); 2616 2617 if (req->all) 2618 goto free_all; 2619 2620 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2621 if (rc) 2622 goto exit; 2623 2624 mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP; 2625 mcam->entry2target_pffunc[req->entry] = 0x0; 2626 npc_mcam_clear_bit(mcam, req->entry); 2627 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); 2628 2629 /* Update entry2counter mapping */ 2630 cntr = mcam->entry2cntr_map[req->entry]; 2631 if (cntr != NPC_MCAM_INVALID_MAP) 2632 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2633 req->entry, cntr); 2634 2635 goto exit; 2636 2637 free_all: 2638 /* Free up all entries allocated to requesting PFFUNC */ 2639 npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); 2640 exit: 2641 mutex_unlock(&mcam->lock); 2642 return rc; 2643 } 2644 2645 int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu, 2646 struct npc_mcam_read_entry_req *req, 2647 struct npc_mcam_read_entry_rsp *rsp) 2648 { 2649 struct npc_mcam *mcam = &rvu->hw->mcam; 2650 u16 pcifunc = req->hdr.pcifunc; 2651 int blkaddr, rc; 2652 2653 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2654 if (blkaddr < 0) 2655 return NPC_MCAM_INVALID_REQ; 2656 2657 mutex_lock(&mcam->lock); 2658 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2659 if (!rc) { 2660 npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry, 2661 &rsp->entry_data, 2662 &rsp->intf, &rsp->enable); 2663 } 2664 2665 mutex_unlock(&mcam->lock); 2666 return rc; 2667 } 2668 2669 int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, 2670 struct npc_mcam_write_entry_req *req, 2671 struct msg_rsp *rsp) 2672 { 2673 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 2674 struct npc_mcam *mcam = &rvu->hw->mcam; 2675 u16 pcifunc = req->hdr.pcifunc; 2676 u16 channel, chan_mask; 2677 int blkaddr, rc; 2678 u8 nix_intf; 2679 2680 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2681 if (blkaddr < 0) 2682 return NPC_MCAM_INVALID_REQ; 2683 2684 chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; 2685 channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK; 2686 channel &= chan_mask; 2687 2688 mutex_lock(&mcam->lock); 2689 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2690 if (rc) 2691 goto exit; 2692 2693 if (req->set_cntr && 2694 npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) { 2695 rc = NPC_MCAM_INVALID_REQ; 2696 goto exit; 2697 } 2698 2699 if (!is_npc_interface_valid(rvu, req->intf)) { 2700 rc = NPC_MCAM_INVALID_REQ; 2701 goto exit; 2702 } 2703 2704 if (is_npc_intf_tx(req->intf)) 2705 nix_intf = pfvf->nix_tx_intf; 2706 else 2707 nix_intf = pfvf->nix_rx_intf; 2708 2709 if (!is_pffunc_af(pcifunc) && 2710 npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { 2711 rc = NPC_MCAM_INVALID_REQ; 2712 goto exit; 2713 } 2714 2715 if (!is_pffunc_af(pcifunc) && 2716 npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { 2717 rc = NPC_MCAM_INVALID_REQ; 2718 goto exit; 2719 } 2720 2721 /* For AF installed rules, the nix_intf should be set to target NIX */ 2722 if (is_pffunc_af(req->hdr.pcifunc)) 2723 nix_intf = req->intf; 2724 2725 npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf, 2726 &req->entry_data, req->enable_entry); 2727 2728 if (req->set_cntr) 2729 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2730 req->entry, req->cntr); 2731 2732 rc = 0; 2733 exit: 2734 mutex_unlock(&mcam->lock); 2735 return rc; 2736 } 2737 2738 int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, 2739 struct npc_mcam_ena_dis_entry_req *req, 2740 struct msg_rsp *rsp) 2741 { 2742 struct npc_mcam *mcam = &rvu->hw->mcam; 2743 u16 pcifunc = req->hdr.pcifunc; 2744 int blkaddr, rc; 2745 2746 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2747 if (blkaddr < 0) 2748 return NPC_MCAM_INVALID_REQ; 2749 2750 mutex_lock(&mcam->lock); 2751 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2752 mutex_unlock(&mcam->lock); 2753 if (rc) 2754 return rc; 2755 2756 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true); 2757 2758 return 0; 2759 } 2760 2761 int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, 2762 struct npc_mcam_ena_dis_entry_req *req, 2763 struct msg_rsp *rsp) 2764 { 2765 struct npc_mcam *mcam = &rvu->hw->mcam; 2766 u16 pcifunc = req->hdr.pcifunc; 2767 int blkaddr, rc; 2768 2769 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2770 if (blkaddr < 0) 2771 return NPC_MCAM_INVALID_REQ; 2772 2773 mutex_lock(&mcam->lock); 2774 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2775 mutex_unlock(&mcam->lock); 2776 if (rc) 2777 return rc; 2778 2779 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); 2780 2781 return 0; 2782 } 2783 2784 int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, 2785 struct npc_mcam_shift_entry_req *req, 2786 struct npc_mcam_shift_entry_rsp *rsp) 2787 { 2788 struct npc_mcam *mcam = &rvu->hw->mcam; 2789 u16 pcifunc = req->hdr.pcifunc; 2790 u16 old_entry, new_entry; 2791 u16 index, cntr; 2792 int blkaddr, rc; 2793 2794 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2795 if (blkaddr < 0) 2796 return NPC_MCAM_INVALID_REQ; 2797 2798 if (req->shift_count > NPC_MCAM_MAX_SHIFTS) 2799 return NPC_MCAM_INVALID_REQ; 2800 2801 mutex_lock(&mcam->lock); 2802 for (index = 0; index < req->shift_count; index++) { 2803 old_entry = req->curr_entry[index]; 2804 new_entry = req->new_entry[index]; 2805 2806 /* Check if both old and new entries are valid and 2807 * does belong to this PFFUNC or not. 2808 */ 2809 rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry); 2810 if (rc) 2811 break; 2812 2813 rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry); 2814 if (rc) 2815 break; 2816 2817 /* new_entry should not have a counter mapped */ 2818 if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) { 2819 rc = NPC_MCAM_PERM_DENIED; 2820 break; 2821 } 2822 2823 /* Disable the new_entry */ 2824 npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false); 2825 2826 /* Copy rule from old entry to new entry */ 2827 npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry); 2828 2829 /* Copy counter mapping, if any */ 2830 cntr = mcam->entry2cntr_map[old_entry]; 2831 if (cntr != NPC_MCAM_INVALID_MAP) { 2832 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2833 old_entry, cntr); 2834 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2835 new_entry, cntr); 2836 } 2837 2838 /* Enable new_entry and disable old_entry */ 2839 npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true); 2840 npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false); 2841 } 2842 2843 /* If shift has failed then report the failed index */ 2844 if (index != req->shift_count) { 2845 rc = NPC_MCAM_PERM_DENIED; 2846 rsp->failed_entry_idx = index; 2847 } 2848 2849 mutex_unlock(&mcam->lock); 2850 return rc; 2851 } 2852 2853 int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, 2854 struct npc_mcam_alloc_counter_req *req, 2855 struct npc_mcam_alloc_counter_rsp *rsp) 2856 { 2857 struct npc_mcam *mcam = &rvu->hw->mcam; 2858 u16 pcifunc = req->hdr.pcifunc; 2859 u16 max_contig, cntr; 2860 int blkaddr, index; 2861 2862 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2863 if (blkaddr < 0) 2864 return NPC_MCAM_INVALID_REQ; 2865 2866 /* If the request is from a PFFUNC with no NIXLF attached, ignore */ 2867 if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) 2868 return NPC_MCAM_INVALID_REQ; 2869 2870 /* Since list of allocated counter IDs needs to be sent to requester, 2871 * max number of non-contiguous counters per mbox msg is limited. 2872 */ 2873 if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS) 2874 return NPC_MCAM_INVALID_REQ; 2875 2876 mutex_lock(&mcam->lock); 2877 2878 /* Check if unused counters are available or not */ 2879 if (!rvu_rsrc_free_count(&mcam->counters)) { 2880 mutex_unlock(&mcam->lock); 2881 return NPC_MCAM_ALLOC_FAILED; 2882 } 2883 2884 rsp->count = 0; 2885 2886 if (req->contig) { 2887 /* Allocate requested number of contiguous counters, if 2888 * unsuccessful find max contiguous entries available. 2889 */ 2890 index = npc_mcam_find_zero_area(mcam->counters.bmap, 2891 mcam->counters.max, 0, 2892 req->count, &max_contig); 2893 rsp->count = max_contig; 2894 rsp->cntr = index; 2895 for (cntr = index; cntr < (index + max_contig); cntr++) { 2896 __set_bit(cntr, mcam->counters.bmap); 2897 mcam->cntr2pfvf_map[cntr] = pcifunc; 2898 } 2899 } else { 2900 /* Allocate requested number of non-contiguous counters, 2901 * if unsuccessful allocate as many as possible. 2902 */ 2903 for (cntr = 0; cntr < req->count; cntr++) { 2904 index = rvu_alloc_rsrc(&mcam->counters); 2905 if (index < 0) 2906 break; 2907 rsp->cntr_list[cntr] = index; 2908 rsp->count++; 2909 mcam->cntr2pfvf_map[index] = pcifunc; 2910 } 2911 } 2912 2913 mutex_unlock(&mcam->lock); 2914 return 0; 2915 } 2916 2917 int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, 2918 struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) 2919 { 2920 struct npc_mcam *mcam = &rvu->hw->mcam; 2921 u16 index, entry = 0; 2922 int blkaddr, err; 2923 2924 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2925 if (blkaddr < 0) 2926 return NPC_MCAM_INVALID_REQ; 2927 2928 mutex_lock(&mcam->lock); 2929 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2930 if (err) { 2931 mutex_unlock(&mcam->lock); 2932 return err; 2933 } 2934 2935 /* Mark counter as free/unused */ 2936 mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP; 2937 rvu_free_rsrc(&mcam->counters, req->cntr); 2938 2939 /* Disable all MCAM entry's stats which are using this counter */ 2940 while (entry < mcam->bmap_entries) { 2941 if (!mcam->cntr_refcnt[req->cntr]) 2942 break; 2943 2944 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 2945 if (index >= mcam->bmap_entries) 2946 break; 2947 entry = index + 1; 2948 if (mcam->entry2cntr_map[index] != req->cntr) 2949 continue; 2950 2951 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2952 index, req->cntr); 2953 } 2954 2955 mutex_unlock(&mcam->lock); 2956 return 0; 2957 } 2958 2959 int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, 2960 struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp) 2961 { 2962 struct npc_mcam *mcam = &rvu->hw->mcam; 2963 u16 index, entry = 0; 2964 int blkaddr, rc; 2965 2966 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2967 if (blkaddr < 0) 2968 return NPC_MCAM_INVALID_REQ; 2969 2970 mutex_lock(&mcam->lock); 2971 rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2972 if (rc) 2973 goto exit; 2974 2975 /* Unmap the MCAM entry and counter */ 2976 if (!req->all) { 2977 rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry); 2978 if (rc) 2979 goto exit; 2980 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2981 req->entry, req->cntr); 2982 goto exit; 2983 } 2984 2985 /* Disable all MCAM entry's stats which are using this counter */ 2986 while (entry < mcam->bmap_entries) { 2987 if (!mcam->cntr_refcnt[req->cntr]) 2988 break; 2989 2990 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 2991 if (index >= mcam->bmap_entries) 2992 break; 2993 if (mcam->entry2cntr_map[index] != req->cntr) 2994 continue; 2995 2996 entry = index + 1; 2997 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2998 index, req->cntr); 2999 } 3000 exit: 3001 mutex_unlock(&mcam->lock); 3002 return rc; 3003 } 3004 3005 int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, 3006 struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) 3007 { 3008 struct npc_mcam *mcam = &rvu->hw->mcam; 3009 int blkaddr, err; 3010 3011 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3012 if (blkaddr < 0) 3013 return NPC_MCAM_INVALID_REQ; 3014 3015 mutex_lock(&mcam->lock); 3016 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 3017 mutex_unlock(&mcam->lock); 3018 if (err) 3019 return err; 3020 3021 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00); 3022 3023 return 0; 3024 } 3025 3026 int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, 3027 struct npc_mcam_oper_counter_req *req, 3028 struct npc_mcam_oper_counter_rsp *rsp) 3029 { 3030 struct npc_mcam *mcam = &rvu->hw->mcam; 3031 int blkaddr, err; 3032 3033 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3034 if (blkaddr < 0) 3035 return NPC_MCAM_INVALID_REQ; 3036 3037 mutex_lock(&mcam->lock); 3038 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 3039 mutex_unlock(&mcam->lock); 3040 if (err) 3041 return err; 3042 3043 rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr)); 3044 rsp->stat &= BIT_ULL(48) - 1; 3045 3046 return 0; 3047 } 3048 3049 int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, 3050 struct npc_mcam_alloc_and_write_entry_req *req, 3051 struct npc_mcam_alloc_and_write_entry_rsp *rsp) 3052 { 3053 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 3054 struct npc_mcam_alloc_counter_req cntr_req; 3055 struct npc_mcam_alloc_counter_rsp cntr_rsp; 3056 struct npc_mcam_alloc_entry_req entry_req; 3057 struct npc_mcam_alloc_entry_rsp entry_rsp; 3058 struct npc_mcam *mcam = &rvu->hw->mcam; 3059 u16 entry = NPC_MCAM_ENTRY_INVALID; 3060 u16 cntr = NPC_MCAM_ENTRY_INVALID; 3061 u16 channel, chan_mask; 3062 int blkaddr, rc; 3063 u8 nix_intf; 3064 3065 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3066 if (blkaddr < 0) 3067 return NPC_MCAM_INVALID_REQ; 3068 3069 if (!is_npc_interface_valid(rvu, req->intf)) 3070 return NPC_MCAM_INVALID_REQ; 3071 3072 chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; 3073 channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK; 3074 channel &= chan_mask; 3075 3076 if (npc_mcam_verify_channel(rvu, req->hdr.pcifunc, req->intf, channel)) 3077 return NPC_MCAM_INVALID_REQ; 3078 3079 if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, 3080 req->hdr.pcifunc)) 3081 return NPC_MCAM_INVALID_REQ; 3082 3083 /* Try to allocate a MCAM entry */ 3084 entry_req.hdr.pcifunc = req->hdr.pcifunc; 3085 entry_req.contig = true; 3086 entry_req.priority = req->priority; 3087 entry_req.ref_entry = req->ref_entry; 3088 entry_req.count = 1; 3089 3090 rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, 3091 &entry_req, &entry_rsp); 3092 if (rc) 3093 return rc; 3094 3095 if (!entry_rsp.count) 3096 return NPC_MCAM_ALLOC_FAILED; 3097 3098 entry = entry_rsp.entry; 3099 3100 if (!req->alloc_cntr) 3101 goto write_entry; 3102 3103 /* Now allocate counter */ 3104 cntr_req.hdr.pcifunc = req->hdr.pcifunc; 3105 cntr_req.contig = true; 3106 cntr_req.count = 1; 3107 3108 rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); 3109 if (rc) { 3110 /* Free allocated MCAM entry */ 3111 mutex_lock(&mcam->lock); 3112 mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP; 3113 npc_mcam_clear_bit(mcam, entry); 3114 mutex_unlock(&mcam->lock); 3115 return rc; 3116 } 3117 3118 cntr = cntr_rsp.cntr; 3119 3120 write_entry: 3121 mutex_lock(&mcam->lock); 3122 3123 if (is_npc_intf_tx(req->intf)) 3124 nix_intf = pfvf->nix_tx_intf; 3125 else 3126 nix_intf = pfvf->nix_rx_intf; 3127 3128 npc_config_mcam_entry(rvu, mcam, blkaddr, entry, nix_intf, 3129 &req->entry_data, req->enable_entry); 3130 3131 if (req->alloc_cntr) 3132 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr); 3133 mutex_unlock(&mcam->lock); 3134 3135 rsp->entry = entry; 3136 rsp->cntr = cntr; 3137 3138 return 0; 3139 } 3140 3141 #define GET_KEX_CFG(intf) \ 3142 rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf)) 3143 3144 #define GET_KEX_FLAGS(ld) \ 3145 rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld)) 3146 3147 #define GET_KEX_LD(intf, lid, lt, ld) \ 3148 rvu_read64(rvu, BLKADDR_NPC, \ 3149 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)) 3150 3151 #define GET_KEX_LDFLAGS(intf, ld, fl) \ 3152 rvu_read64(rvu, BLKADDR_NPC, \ 3153 NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl)) 3154 3155 int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, 3156 struct npc_get_kex_cfg_rsp *rsp) 3157 { 3158 int lid, lt, ld, fl; 3159 3160 rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX); 3161 rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX); 3162 for (lid = 0; lid < NPC_MAX_LID; lid++) { 3163 for (lt = 0; lt < NPC_MAX_LT; lt++) { 3164 for (ld = 0; ld < NPC_MAX_LD; ld++) { 3165 rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] = 3166 GET_KEX_LD(NIX_INTF_RX, lid, lt, ld); 3167 rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] = 3168 GET_KEX_LD(NIX_INTF_TX, lid, lt, ld); 3169 } 3170 } 3171 } 3172 for (ld = 0; ld < NPC_MAX_LD; ld++) 3173 rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld); 3174 3175 for (ld = 0; ld < NPC_MAX_LD; ld++) { 3176 for (fl = 0; fl < NPC_MAX_LFL; fl++) { 3177 rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] = 3178 GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl); 3179 rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] = 3180 GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl); 3181 } 3182 } 3183 memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN); 3184 return 0; 3185 } 3186 3187 int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, 3188 struct msg_req *req, 3189 struct npc_mcam_read_base_rule_rsp *rsp) 3190 { 3191 struct npc_mcam *mcam = &rvu->hw->mcam; 3192 int index, blkaddr, nixlf, rc = 0; 3193 u16 pcifunc = req->hdr.pcifunc; 3194 struct rvu_pfvf *pfvf; 3195 u8 intf, enable; 3196 3197 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3198 if (blkaddr < 0) 3199 return NPC_MCAM_INVALID_REQ; 3200 3201 /* Return the channel number in case of PF */ 3202 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 3203 pfvf = rvu_get_pfvf(rvu, pcifunc); 3204 rsp->entry.kw[0] = pfvf->rx_chan_base; 3205 rsp->entry.kw_mask[0] = 0xFFFULL; 3206 goto out; 3207 } 3208 3209 /* Find the pkt steering rule installed by PF to this VF */ 3210 mutex_lock(&mcam->lock); 3211 for (index = 0; index < mcam->bmap_entries; index++) { 3212 if (mcam->entry2target_pffunc[index] == pcifunc) 3213 goto read_entry; 3214 } 3215 3216 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3217 if (rc < 0) { 3218 mutex_unlock(&mcam->lock); 3219 goto out; 3220 } 3221 /* Read the default ucast entry if there is no pkt steering rule */ 3222 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 3223 NIXLF_UCAST_ENTRY); 3224 read_entry: 3225 /* Read the mcam entry */ 3226 npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf, 3227 &enable); 3228 mutex_unlock(&mcam->lock); 3229 out: 3230 return rc; 3231 } 3232 3233 int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, 3234 struct npc_mcam_get_stats_req *req, 3235 struct npc_mcam_get_stats_rsp *rsp) 3236 { 3237 struct npc_mcam *mcam = &rvu->hw->mcam; 3238 u16 index, cntr; 3239 int blkaddr; 3240 u64 regval; 3241 u32 bank; 3242 3243 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3244 if (blkaddr < 0) 3245 return NPC_MCAM_INVALID_REQ; 3246 3247 mutex_lock(&mcam->lock); 3248 3249 index = req->entry & (mcam->banksize - 1); 3250 bank = npc_get_bank(mcam, req->entry); 3251 3252 /* read MCAM entry STAT_ACT register */ 3253 regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank)); 3254 3255 if (!(regval & BIT_ULL(9))) { 3256 rsp->stat_ena = 0; 3257 mutex_unlock(&mcam->lock); 3258 return 0; 3259 } 3260 3261 cntr = regval & 0x1FF; 3262 3263 rsp->stat_ena = 1; 3264 rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr)); 3265 rsp->stat &= BIT_ULL(48) - 1; 3266 3267 mutex_unlock(&mcam->lock); 3268 3269 return 0; 3270 } 3271