1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 15 #include "rvu_struct.h" 16 #include "rvu_reg.h" 17 #include "rvu.h" 18 #include "npc.h" 19 #include "cgx.h" 20 #include "npc_profile.h" 21 22 #define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */ 23 #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ 24 25 #define NPC_PARSE_RESULT_DMAC_OFFSET 8 26 #define NPC_HW_TSTAMP_OFFSET 8 27 #define NPC_KEX_CHAN_MASK 0xFFFULL 28 #define NPC_KEX_PF_FUNC_MASK 0xFFFFULL 29 30 #define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8)) 31 32 static const char def_pfl_name[] = "default"; 33 34 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 35 int blkaddr, u16 pcifunc); 36 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 37 u16 pcifunc); 38 39 bool is_npc_intf_tx(u8 intf) 40 { 41 return !!(intf & 0x1); 42 } 43 44 bool is_npc_intf_rx(u8 intf) 45 { 46 return !(intf & 0x1); 47 } 48 49 bool is_npc_interface_valid(struct rvu *rvu, u8 intf) 50 { 51 struct rvu_hwinfo *hw = rvu->hw; 52 53 return intf < hw->npc_intfs; 54 } 55 56 int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena) 57 { 58 /* Due to a HW issue in these silicon versions, parse nibble enable 59 * configuration has to be identical for both Rx and Tx interfaces. 60 */ 61 if (is_rvu_96xx_B0(rvu)) 62 return nibble_ena; 63 return 0; 64 } 65 66 static int npc_mcam_verify_pf_func(struct rvu *rvu, 67 struct mcam_entry *entry_data, u8 intf, 68 u16 pcifunc) 69 { 70 u16 pf_func, pf_func_mask; 71 72 if (is_npc_intf_rx(intf)) 73 return 0; 74 75 pf_func_mask = (entry_data->kw_mask[0] >> 32) & 76 NPC_KEX_PF_FUNC_MASK; 77 pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK; 78 79 pf_func = be16_to_cpu((__force __be16)pf_func); 80 if (pf_func_mask != NPC_KEX_PF_FUNC_MASK || 81 ((pf_func & ~RVU_PFVF_FUNC_MASK) != 82 (pcifunc & ~RVU_PFVF_FUNC_MASK))) 83 return -EINVAL; 84 85 return 0; 86 } 87 88 int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel) 89 { 90 int pf = rvu_get_pf(pcifunc); 91 u8 cgx_id, lmac_id; 92 int base = 0, end; 93 94 if (is_npc_intf_tx(intf)) 95 return 0; 96 97 /* return in case of AF installed rules */ 98 if (is_pffunc_af(pcifunc)) 99 return 0; 100 101 if (is_afvf(pcifunc)) { 102 end = rvu_get_num_lbk_chans(); 103 if (end < 0) 104 return -EINVAL; 105 } else { 106 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 107 base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0x0); 108 /* CGX mapped functions has maximum of 16 channels */ 109 end = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0xF); 110 } 111 112 if (channel < base || channel > end) 113 return -EINVAL; 114 115 return 0; 116 } 117 118 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) 119 { 120 int blkaddr; 121 u64 val = 0; 122 123 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 124 if (blkaddr < 0) 125 return; 126 127 /* Config CPI base for the PKIND */ 128 val = pkind | 1ULL << 62; 129 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val); 130 } 131 132 int rvu_npc_get_pkind(struct rvu *rvu, u16 pf) 133 { 134 struct npc_pkind *pkind = &rvu->hw->pkind; 135 u32 map; 136 int i; 137 138 for (i = 0; i < pkind->rsrc.max; i++) { 139 map = pkind->pfchan_map[i]; 140 if (((map >> 16) & 0x3F) == pf) 141 return i; 142 } 143 return -1; 144 } 145 146 #define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20) 147 148 int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable) 149 { 150 int pkind, blkaddr; 151 u64 val; 152 153 pkind = rvu_npc_get_pkind(rvu, pf); 154 if (pkind < 0) { 155 dev_err(rvu->dev, "%s: pkind not mapped\n", __func__); 156 return -EINVAL; 157 } 158 159 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); 160 if (blkaddr < 0) { 161 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 162 return -EINVAL; 163 } 164 165 val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); 166 val &= ~NPC_AF_ACTION0_PTR_ADVANCE; 167 /* If timestamp is enabled then configure NPC to shift 8 bytes */ 168 if (enable) 169 val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE, 170 NPC_HW_TSTAMP_OFFSET); 171 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); 172 173 return 0; 174 } 175 176 static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc, 177 int nixlf) 178 { 179 struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam); 180 struct rvu *rvu = hw->rvu; 181 int blkaddr = 0, max = 0; 182 struct rvu_block *block; 183 struct rvu_pfvf *pfvf; 184 185 pfvf = rvu_get_pfvf(rvu, pcifunc); 186 /* Given a PF/VF and NIX LF number calculate the unicast mcam 187 * entry index based on the NIX block assigned to the PF/VF. 188 */ 189 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 190 while (blkaddr) { 191 if (pfvf->nix_blkaddr == blkaddr) 192 break; 193 block = &rvu->hw->block[blkaddr]; 194 max += block->lf.max; 195 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 196 } 197 198 return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF; 199 } 200 201 int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, 202 u16 pcifunc, int nixlf, int type) 203 { 204 int pf = rvu_get_pf(pcifunc); 205 int index; 206 207 /* Check if this is for a PF */ 208 if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) { 209 /* Reserved entries exclude PF0 */ 210 pf--; 211 index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF); 212 /* Broadcast address matching entry should be first so 213 * that the packet can be replicated to all VFs. 214 */ 215 if (type == NIXLF_BCAST_ENTRY) 216 return index; 217 else if (type == NIXLF_ALLMULTI_ENTRY) 218 return index + 1; 219 else if (type == NIXLF_PROMISC_ENTRY) 220 return index + 2; 221 } 222 223 return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf); 224 } 225 226 int npc_get_bank(struct npc_mcam *mcam, int index) 227 { 228 int bank = index / mcam->banksize; 229 230 /* 0,1 & 2,3 banks are combined for this keysize */ 231 if (mcam->keysize == NPC_MCAM_KEY_X2) 232 return bank ? 2 : 0; 233 234 return bank; 235 } 236 237 bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, 238 int blkaddr, int index) 239 { 240 int bank = npc_get_bank(mcam, index); 241 u64 cfg; 242 243 index &= (mcam->banksize - 1); 244 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank)); 245 return (cfg & 1); 246 } 247 248 void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 249 int blkaddr, int index, bool enable) 250 { 251 int bank = npc_get_bank(mcam, index); 252 int actbank = bank; 253 254 index &= (mcam->banksize - 1); 255 for (; bank < (actbank + mcam->banks_per_entry); bank++) { 256 rvu_write64(rvu, blkaddr, 257 NPC_AF_MCAMEX_BANKX_CFG(index, bank), 258 enable ? 1 : 0); 259 } 260 } 261 262 static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 263 int blkaddr, int index) 264 { 265 int bank = npc_get_bank(mcam, index); 266 int actbank = bank; 267 268 index &= (mcam->banksize - 1); 269 for (; bank < (actbank + mcam->banks_per_entry); bank++) { 270 rvu_write64(rvu, blkaddr, 271 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0); 272 rvu_write64(rvu, blkaddr, 273 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0); 274 275 rvu_write64(rvu, blkaddr, 276 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0); 277 rvu_write64(rvu, blkaddr, 278 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0); 279 280 rvu_write64(rvu, blkaddr, 281 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0); 282 rvu_write64(rvu, blkaddr, 283 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0); 284 } 285 } 286 287 static void npc_get_keyword(struct mcam_entry *entry, int idx, 288 u64 *cam0, u64 *cam1) 289 { 290 u64 kw_mask = 0x00; 291 292 #define CAM_MASK(n) (BIT_ULL(n) - 1) 293 294 /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and 295 * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1. 296 * 297 * Also, only 48 bits of BANKX_CAMX_W1 are valid. 298 */ 299 switch (idx) { 300 case 0: 301 /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */ 302 *cam1 = entry->kw[0]; 303 kw_mask = entry->kw_mask[0]; 304 break; 305 case 1: 306 /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */ 307 *cam1 = entry->kw[1] & CAM_MASK(48); 308 kw_mask = entry->kw_mask[1] & CAM_MASK(48); 309 break; 310 case 2: 311 /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48> 312 * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0> 313 */ 314 *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16); 315 *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16); 316 kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16); 317 kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16); 318 break; 319 case 3: 320 /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48> 321 * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0> 322 */ 323 *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16); 324 *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16); 325 kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16); 326 kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16); 327 break; 328 case 4: 329 /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32> 330 * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0> 331 */ 332 *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32); 333 *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32); 334 kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32); 335 kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32); 336 break; 337 case 5: 338 /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32> 339 * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0> 340 */ 341 *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32); 342 *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32); 343 kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32); 344 kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32); 345 break; 346 case 6: 347 /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16> 348 * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0> 349 */ 350 *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48); 351 *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48); 352 kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48); 353 kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48); 354 break; 355 case 7: 356 /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */ 357 *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48); 358 kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48); 359 break; 360 } 361 362 *cam1 &= kw_mask; 363 *cam0 = ~*cam1 & kw_mask; 364 } 365 366 static void npc_fill_entryword(struct mcam_entry *entry, int idx, 367 u64 cam0, u64 cam1) 368 { 369 /* Similar to npc_get_keyword, but fills mcam_entry structure from 370 * CAM registers. 371 */ 372 switch (idx) { 373 case 0: 374 entry->kw[0] = cam1; 375 entry->kw_mask[0] = cam1 ^ cam0; 376 break; 377 case 1: 378 entry->kw[1] = cam1; 379 entry->kw_mask[1] = cam1 ^ cam0; 380 break; 381 case 2: 382 entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48; 383 entry->kw[2] = (cam1 >> 16) & CAM_MASK(48); 384 entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48; 385 entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48); 386 break; 387 case 3: 388 entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48; 389 entry->kw[3] = (cam1 >> 16) & CAM_MASK(32); 390 entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48; 391 entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32); 392 break; 393 case 4: 394 entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32; 395 entry->kw[4] = (cam1 >> 32) & CAM_MASK(32); 396 entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32; 397 entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32); 398 break; 399 case 5: 400 entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32; 401 entry->kw[5] = (cam1 >> 32) & CAM_MASK(16); 402 entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32; 403 entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16); 404 break; 405 case 6: 406 entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16; 407 entry->kw[6] = (cam1 >> 48) & CAM_MASK(16); 408 entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16; 409 entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16); 410 break; 411 case 7: 412 entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16; 413 entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16; 414 break; 415 } 416 } 417 418 static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam, 419 int blkaddr, u16 pf_func) 420 { 421 int bank, nixlf, index; 422 423 /* get ucast entry rule entry index */ 424 nix_get_nixlf(rvu, pf_func, &nixlf, NULL); 425 index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf, 426 NIXLF_UCAST_ENTRY); 427 bank = npc_get_bank(mcam, index); 428 index &= (mcam->banksize - 1); 429 430 return rvu_read64(rvu, blkaddr, 431 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 432 } 433 434 static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, 435 int blkaddr, int index, struct mcam_entry *entry, 436 bool *enable) 437 { 438 u16 owner, target_func; 439 struct rvu_pfvf *pfvf; 440 u64 rx_action; 441 442 owner = mcam->entry2pfvf_map[index]; 443 target_func = (entry->action >> 4) & 0xffff; 444 /* do nothing when target is LBK/PF or owner is not PF */ 445 if (is_pffunc_af(owner) || is_afvf(target_func) || 446 (owner & RVU_PFVF_FUNC_MASK) || 447 !(target_func & RVU_PFVF_FUNC_MASK)) 448 return; 449 450 /* save entry2target_pffunc */ 451 pfvf = rvu_get_pfvf(rvu, target_func); 452 mcam->entry2target_pffunc[index] = target_func; 453 454 /* don't enable rule when nixlf not attached or initialized */ 455 if (!(is_nixlf_attached(rvu, target_func) && 456 test_bit(NIXLF_INITIALIZED, &pfvf->flags))) 457 *enable = false; 458 459 /* copy VF default entry action to the VF mcam entry */ 460 rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr, 461 target_func); 462 if (rx_action) 463 entry->action = rx_action; 464 } 465 466 static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 467 int blkaddr, int index, u8 intf, 468 struct mcam_entry *entry, bool enable) 469 { 470 int bank = npc_get_bank(mcam, index); 471 int kw = 0, actbank, actindex; 472 u8 tx_intf_mask = ~intf & 0x3; 473 u8 tx_intf = intf; 474 u64 cam0, cam1; 475 476 actbank = bank; /* Save bank id, to set action later on */ 477 actindex = index; 478 index &= (mcam->banksize - 1); 479 480 /* Disable before mcam entry update */ 481 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); 482 483 /* Clear mcam entry to avoid writes being suppressed by NPC */ 484 npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex); 485 486 /* CAM1 takes the comparison value and 487 * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'. 488 * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0 489 * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1 490 * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare. 491 */ 492 for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { 493 /* Interface should be set in all banks */ 494 if (is_npc_intf_tx(intf)) { 495 /* Last bit must be set and rest don't care 496 * for TX interfaces 497 */ 498 tx_intf_mask = 0x1; 499 tx_intf = intf & tx_intf_mask; 500 tx_intf_mask = ~tx_intf & tx_intf_mask; 501 } 502 503 rvu_write64(rvu, blkaddr, 504 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 505 tx_intf); 506 rvu_write64(rvu, blkaddr, 507 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 508 tx_intf_mask); 509 510 /* Set the match key */ 511 npc_get_keyword(entry, kw, &cam0, &cam1); 512 rvu_write64(rvu, blkaddr, 513 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1); 514 rvu_write64(rvu, blkaddr, 515 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0); 516 517 npc_get_keyword(entry, kw + 1, &cam0, &cam1); 518 rvu_write64(rvu, blkaddr, 519 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1); 520 rvu_write64(rvu, blkaddr, 521 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0); 522 } 523 524 /* PF installing VF rule */ 525 if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries) 526 npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable); 527 528 /* Set 'action' */ 529 rvu_write64(rvu, blkaddr, 530 NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action); 531 532 /* Set TAG 'action' */ 533 rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank), 534 entry->vtag_action); 535 536 /* Enable the entry */ 537 if (enable) 538 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true); 539 } 540 541 void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 542 int blkaddr, u16 src, 543 struct mcam_entry *entry, u8 *intf, u8 *ena) 544 { 545 int sbank = npc_get_bank(mcam, src); 546 int bank, kw = 0; 547 u64 cam0, cam1; 548 549 src &= (mcam->banksize - 1); 550 bank = sbank; 551 552 for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) { 553 cam1 = rvu_read64(rvu, blkaddr, 554 NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1)); 555 cam0 = rvu_read64(rvu, blkaddr, 556 NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0)); 557 npc_fill_entryword(entry, kw, cam0, cam1); 558 559 cam1 = rvu_read64(rvu, blkaddr, 560 NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1)); 561 cam0 = rvu_read64(rvu, blkaddr, 562 NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0)); 563 npc_fill_entryword(entry, kw + 1, cam0, cam1); 564 } 565 566 entry->action = rvu_read64(rvu, blkaddr, 567 NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); 568 entry->vtag_action = 569 rvu_read64(rvu, blkaddr, 570 NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); 571 *intf = rvu_read64(rvu, blkaddr, 572 NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3; 573 *ena = rvu_read64(rvu, blkaddr, 574 NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1; 575 } 576 577 static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 578 int blkaddr, u16 src, u16 dest) 579 { 580 int dbank = npc_get_bank(mcam, dest); 581 int sbank = npc_get_bank(mcam, src); 582 u64 cfg, sreg, dreg; 583 int bank, i; 584 585 src &= (mcam->banksize - 1); 586 dest &= (mcam->banksize - 1); 587 588 /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */ 589 for (bank = 0; bank < mcam->banks_per_entry; bank++) { 590 sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0); 591 dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0); 592 for (i = 0; i < 6; i++) { 593 cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8)); 594 rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg); 595 } 596 } 597 598 /* Copy action */ 599 cfg = rvu_read64(rvu, blkaddr, 600 NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); 601 rvu_write64(rvu, blkaddr, 602 NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg); 603 604 /* Copy TAG action */ 605 cfg = rvu_read64(rvu, blkaddr, 606 NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); 607 rvu_write64(rvu, blkaddr, 608 NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg); 609 610 /* Enable or disable */ 611 cfg = rvu_read64(rvu, blkaddr, 612 NPC_AF_MCAMEX_BANKX_CFG(src, sbank)); 613 rvu_write64(rvu, blkaddr, 614 NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg); 615 } 616 617 static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, 618 int blkaddr, int index) 619 { 620 int bank = npc_get_bank(mcam, index); 621 622 index &= (mcam->banksize - 1); 623 return rvu_read64(rvu, blkaddr, 624 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 625 } 626 627 void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, 628 int nixlf, u64 chan, u8 *mac_addr) 629 { 630 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 631 struct npc_install_flow_req req = { 0 }; 632 struct npc_install_flow_rsp rsp = { 0 }; 633 struct npc_mcam *mcam = &rvu->hw->mcam; 634 struct nix_rx_action action; 635 int blkaddr, index; 636 637 /* AF's VFs work in promiscuous mode */ 638 if (is_afvf(pcifunc)) 639 return; 640 641 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 642 if (blkaddr < 0) 643 return; 644 645 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 646 nixlf, NIXLF_UCAST_ENTRY); 647 648 /* Don't change the action if entry is already enabled 649 * Otherwise RSS action may get overwritten. 650 */ 651 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { 652 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 653 blkaddr, index); 654 } else { 655 *(u64 *)&action = 0x00; 656 action.op = NIX_RX_ACTIONOP_UCAST; 657 action.pf_func = pcifunc; 658 } 659 660 req.default_rule = 1; 661 ether_addr_copy(req.packet.dmac, mac_addr); 662 eth_broadcast_addr((u8 *)&req.mask.dmac); 663 req.features = BIT_ULL(NPC_DMAC); 664 req.channel = chan; 665 req.chan_mask = 0xFFFU; 666 req.intf = pfvf->nix_rx_intf; 667 req.op = action.op; 668 req.hdr.pcifunc = 0; /* AF is requester */ 669 req.vf = action.pf_func; 670 req.index = action.index; 671 req.match_id = action.match_id; 672 req.flow_key_alg = action.flow_key_alg; 673 674 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 675 } 676 677 void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, 678 int nixlf, u64 chan, u8 chan_cnt) 679 { 680 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 681 struct npc_install_flow_req req = { 0 }; 682 struct npc_install_flow_rsp rsp = { 0 }; 683 struct npc_mcam *mcam = &rvu->hw->mcam; 684 struct rvu_hwinfo *hw = rvu->hw; 685 int blkaddr, ucast_idx, index; 686 struct nix_rx_action action; 687 u64 relaxed_mask; 688 689 if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc)) 690 return; 691 692 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 693 if (blkaddr < 0) 694 return; 695 696 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 697 nixlf, NIXLF_PROMISC_ENTRY); 698 699 if (is_cgx_vf(rvu, pcifunc)) 700 index = npc_get_nixlf_mcam_index(mcam, 701 pcifunc & ~RVU_PFVF_FUNC_MASK, 702 nixlf, NIXLF_PROMISC_ENTRY); 703 704 /* If the corresponding PF's ucast action is RSS, 705 * use the same action for promisc also 706 */ 707 ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, 708 nixlf, NIXLF_UCAST_ENTRY); 709 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) 710 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 711 blkaddr, ucast_idx); 712 713 if (action.op != NIX_RX_ACTIONOP_RSS) { 714 *(u64 *)&action = 0x00; 715 action.op = NIX_RX_ACTIONOP_UCAST; 716 } 717 718 /* RX_ACTION set to MCAST for CGX PF's */ 719 if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && 720 is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { 721 *(u64 *)&action = 0x00; 722 action.op = NIX_RX_ACTIONOP_MCAST; 723 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 724 action.index = pfvf->promisc_mce_idx; 725 } 726 727 req.chan_mask = 0xFFFU; 728 if (chan_cnt > 1) { 729 if (!is_power_of_2(chan_cnt)) { 730 dev_err(rvu->dev, 731 "%s: channel count more than 1, must be power of 2\n", __func__); 732 return; 733 } 734 relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1, 735 ilog2(chan_cnt)); 736 req.chan_mask &= relaxed_mask; 737 } 738 739 req.channel = chan; 740 req.intf = pfvf->nix_rx_intf; 741 req.entry = index; 742 req.op = action.op; 743 req.hdr.pcifunc = 0; /* AF is requester */ 744 req.vf = pcifunc; 745 req.index = action.index; 746 req.match_id = action.match_id; 747 req.flow_key_alg = action.flow_key_alg; 748 749 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 750 } 751 752 void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, 753 int nixlf, bool enable) 754 { 755 struct npc_mcam *mcam = &rvu->hw->mcam; 756 int blkaddr, index; 757 758 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 759 if (blkaddr < 0) 760 return; 761 762 /* Get 'pcifunc' of PF device */ 763 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 764 765 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 766 nixlf, NIXLF_PROMISC_ENTRY); 767 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 768 } 769 770 void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, 771 int nixlf, u64 chan) 772 { 773 struct rvu_pfvf *pfvf; 774 struct npc_install_flow_req req = { 0 }; 775 struct npc_install_flow_rsp rsp = { 0 }; 776 struct npc_mcam *mcam = &rvu->hw->mcam; 777 struct rvu_hwinfo *hw = rvu->hw; 778 int blkaddr, index; 779 780 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 781 if (blkaddr < 0) 782 return; 783 784 /* Skip LBK VFs */ 785 if (is_afvf(pcifunc)) 786 return; 787 788 /* If pkt replication is not supported, 789 * then only PF is allowed to add a bcast match entry. 790 */ 791 if (!hw->cap.nix_rx_multicast && is_vf(pcifunc)) 792 return; 793 794 /* Get 'pcifunc' of PF device */ 795 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 796 pfvf = rvu_get_pfvf(rvu, pcifunc); 797 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 798 nixlf, NIXLF_BCAST_ENTRY); 799 800 if (!hw->cap.nix_rx_multicast) { 801 /* Early silicon doesn't support pkt replication, 802 * so install entry with UCAST action, so that PF 803 * receives all broadcast packets. 804 */ 805 req.op = NIX_RX_ACTIONOP_UCAST; 806 } else { 807 req.op = NIX_RX_ACTIONOP_MCAST; 808 req.index = pfvf->bcast_mce_idx; 809 } 810 811 eth_broadcast_addr((u8 *)&req.packet.dmac); 812 eth_broadcast_addr((u8 *)&req.mask.dmac); 813 req.features = BIT_ULL(NPC_DMAC); 814 req.channel = chan; 815 req.chan_mask = 0xFFFU; 816 req.intf = pfvf->nix_rx_intf; 817 req.entry = index; 818 req.hdr.pcifunc = 0; /* AF is requester */ 819 req.vf = pcifunc; 820 821 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 822 } 823 824 void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 825 bool enable) 826 { 827 struct npc_mcam *mcam = &rvu->hw->mcam; 828 int blkaddr, index; 829 830 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 831 if (blkaddr < 0) 832 return; 833 834 /* Get 'pcifunc' of PF device */ 835 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 836 837 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 838 NIXLF_BCAST_ENTRY); 839 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 840 } 841 842 void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 843 u64 chan) 844 { 845 struct npc_install_flow_req req = { 0 }; 846 struct npc_install_flow_rsp rsp = { 0 }; 847 struct npc_mcam *mcam = &rvu->hw->mcam; 848 struct rvu_hwinfo *hw = rvu->hw; 849 int blkaddr, ucast_idx, index; 850 u8 mac_addr[ETH_ALEN] = { 0 }; 851 struct nix_rx_action action; 852 struct rvu_pfvf *pfvf; 853 u16 vf_func; 854 855 /* Only CGX PF/VF can add allmulticast entry */ 856 if (is_afvf(pcifunc)) 857 return; 858 859 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 860 if (blkaddr < 0) 861 return; 862 863 /* Get 'pcifunc' of PF device */ 864 vf_func = pcifunc & RVU_PFVF_FUNC_MASK; 865 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 866 pfvf = rvu_get_pfvf(rvu, pcifunc); 867 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 868 nixlf, NIXLF_ALLMULTI_ENTRY); 869 870 /* If the corresponding PF's ucast action is RSS, 871 * use the same action for multicast entry also 872 */ 873 ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, 874 nixlf, NIXLF_UCAST_ENTRY); 875 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) 876 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 877 blkaddr, ucast_idx); 878 879 if (action.op != NIX_RX_ACTIONOP_RSS) { 880 *(u64 *)&action = 0x00; 881 action.op = NIX_RX_ACTIONOP_UCAST; 882 action.pf_func = pcifunc; 883 } 884 885 /* RX_ACTION set to MCAST for CGX PF's */ 886 if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) { 887 *(u64 *)&action = 0x00; 888 action.op = NIX_RX_ACTIONOP_MCAST; 889 action.index = pfvf->mcast_mce_idx; 890 } 891 892 mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */ 893 ether_addr_copy(req.packet.dmac, mac_addr); 894 ether_addr_copy(req.mask.dmac, mac_addr); 895 req.features = BIT_ULL(NPC_DMAC); 896 897 /* For cn10k the upper two bits of the channel number are 898 * cpt channel number. with masking out these bits in the 899 * mcam entry, same entry used for NIX will allow packets 900 * received from cpt for parsing. 901 */ 902 if (!is_rvu_otx2(rvu)) 903 req.chan_mask = NIX_CHAN_CPT_X2P_MASK; 904 else 905 req.chan_mask = 0xFFFU; 906 907 req.channel = chan; 908 req.intf = pfvf->nix_rx_intf; 909 req.entry = index; 910 req.op = action.op; 911 req.hdr.pcifunc = 0; /* AF is requester */ 912 req.vf = pcifunc | vf_func; 913 req.index = action.index; 914 req.match_id = action.match_id; 915 req.flow_key_alg = action.flow_key_alg; 916 917 rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 918 } 919 920 void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 921 bool enable) 922 { 923 struct npc_mcam *mcam = &rvu->hw->mcam; 924 int blkaddr, index; 925 926 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 927 if (blkaddr < 0) 928 return; 929 930 /* Get 'pcifunc' of PF device */ 931 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 932 933 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 934 NIXLF_ALLMULTI_ENTRY); 935 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 936 } 937 938 static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, 939 int blkaddr, u16 pcifunc, u64 rx_action) 940 { 941 int actindex, index, bank; 942 bool enable; 943 944 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) 945 return; 946 947 mutex_lock(&mcam->lock); 948 for (index = 0; index < mcam->bmap_entries; index++) { 949 if (mcam->entry2target_pffunc[index] == pcifunc) { 950 bank = npc_get_bank(mcam, index); 951 actindex = index; 952 index &= (mcam->banksize - 1); 953 954 /* read vf flow entry enable status */ 955 enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, 956 actindex); 957 /* disable before mcam entry update */ 958 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, 959 false); 960 /* update 'action' */ 961 rvu_write64(rvu, blkaddr, 962 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), 963 rx_action); 964 if (enable) 965 npc_enable_mcam_entry(rvu, mcam, blkaddr, 966 actindex, true); 967 } 968 } 969 mutex_unlock(&mcam->lock); 970 } 971 972 void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, 973 int group, int alg_idx, int mcam_index) 974 { 975 struct npc_mcam *mcam = &rvu->hw->mcam; 976 struct rvu_hwinfo *hw = rvu->hw; 977 struct nix_rx_action action; 978 int blkaddr, index, bank; 979 struct rvu_pfvf *pfvf; 980 981 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 982 if (blkaddr < 0) 983 return; 984 985 /* Check if this is for reserved default entry */ 986 if (mcam_index < 0) { 987 if (group != DEFAULT_RSS_CONTEXT_GROUP) 988 return; 989 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 990 nixlf, NIXLF_UCAST_ENTRY); 991 } else { 992 /* TODO: validate this mcam index */ 993 index = mcam_index; 994 } 995 996 if (index >= mcam->total_entries) 997 return; 998 999 bank = npc_get_bank(mcam, index); 1000 index &= (mcam->banksize - 1); 1001 1002 *(u64 *)&action = rvu_read64(rvu, blkaddr, 1003 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 1004 /* Ignore if no action was set earlier */ 1005 if (!*(u64 *)&action) 1006 return; 1007 1008 action.op = NIX_RX_ACTIONOP_RSS; 1009 action.pf_func = pcifunc; 1010 action.index = group; 1011 action.flow_key_alg = alg_idx; 1012 1013 rvu_write64(rvu, blkaddr, 1014 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); 1015 1016 /* update the VF flow rule action with the VF default entry action */ 1017 if (mcam_index < 0) 1018 npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc, 1019 *(u64 *)&action); 1020 1021 /* update the action change in default rule */ 1022 pfvf = rvu_get_pfvf(rvu, pcifunc); 1023 if (pfvf->def_ucast_rule) 1024 pfvf->def_ucast_rule->rx_action = action; 1025 1026 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 1027 nixlf, NIXLF_PROMISC_ENTRY); 1028 1029 /* If PF's promiscuous entry is enabled, 1030 * Set RSS action for that entry as well 1031 */ 1032 if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) && 1033 is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { 1034 bank = npc_get_bank(mcam, index); 1035 index &= (mcam->banksize - 1); 1036 1037 rvu_write64(rvu, blkaddr, 1038 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), 1039 *(u64 *)&action); 1040 } 1041 } 1042 1043 void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, 1044 int nixlf, int type, bool enable) 1045 { 1046 struct npc_mcam *mcam = &rvu->hw->mcam; 1047 struct rvu_hwinfo *hw = rvu->hw; 1048 struct nix_mce_list *mce_list; 1049 int index, blkaddr, mce_idx; 1050 struct rvu_pfvf *pfvf; 1051 1052 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1053 if (blkaddr < 0) 1054 return; 1055 1056 index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK, 1057 nixlf, type); 1058 1059 /* disable MCAM entry when packet replication is not supported by hw */ 1060 if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) { 1061 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 1062 return; 1063 } 1064 1065 /* return incase mce list is not enabled */ 1066 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 1067 if (hw->cap.nix_rx_multicast && is_vf(pcifunc) && 1068 type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list) 1069 return; 1070 1071 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 1072 1073 nix_update_mce_list(rvu, pcifunc, mce_list, 1074 mce_idx, index, enable); 1075 if (enable) 1076 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 1077 } 1078 1079 static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, 1080 int nixlf, bool enable) 1081 { 1082 struct npc_mcam *mcam = &rvu->hw->mcam; 1083 int index, blkaddr; 1084 1085 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1086 if (blkaddr < 0) 1087 return; 1088 1089 /* Ucast MCAM match entry of this PF/VF */ 1090 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 1091 nixlf, NIXLF_UCAST_ENTRY); 1092 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 1093 1094 /* Nothing to do for VFs, on platforms where pkt replication 1095 * is not supported 1096 */ 1097 if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast) 1098 return; 1099 1100 /* add/delete pf_func to broadcast MCE list */ 1101 npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, 1102 NIXLF_BCAST_ENTRY, enable); 1103 } 1104 1105 void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1106 { 1107 npc_enadis_default_entries(rvu, pcifunc, nixlf, false); 1108 1109 /* Delete multicast and promisc MCAM entries */ 1110 npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, 1111 NIXLF_ALLMULTI_ENTRY, false); 1112 npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, 1113 NIXLF_PROMISC_ENTRY, false); 1114 } 1115 1116 void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1117 { 1118 /* Enables only broadcast match entry. Promisc/Allmulti are enabled 1119 * in set_rx_mode mbox handler. 1120 */ 1121 npc_enadis_default_entries(rvu, pcifunc, nixlf, true); 1122 } 1123 1124 void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1125 { 1126 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 1127 struct npc_mcam *mcam = &rvu->hw->mcam; 1128 struct rvu_npc_mcam_rule *rule, *tmp; 1129 int blkaddr; 1130 1131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1132 if (blkaddr < 0) 1133 return; 1134 1135 mutex_lock(&mcam->lock); 1136 1137 /* Disable MCAM entries directing traffic to this 'pcifunc' */ 1138 list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { 1139 if (is_npc_intf_rx(rule->intf) && 1140 rule->rx_action.pf_func == pcifunc && 1141 rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) { 1142 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1143 rule->entry, false); 1144 rule->enable = false; 1145 /* Indicate that default rule is disabled */ 1146 if (rule->default_rule) { 1147 pfvf->def_ucast_rule = NULL; 1148 list_del(&rule->list); 1149 kfree(rule); 1150 } 1151 } 1152 } 1153 1154 mutex_unlock(&mcam->lock); 1155 1156 npc_mcam_disable_flows(rvu, pcifunc); 1157 1158 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1159 } 1160 1161 void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1162 { 1163 struct npc_mcam *mcam = &rvu->hw->mcam; 1164 struct rvu_npc_mcam_rule *rule, *tmp; 1165 int blkaddr; 1166 1167 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1168 if (blkaddr < 0) 1169 return; 1170 1171 mutex_lock(&mcam->lock); 1172 1173 /* Free all MCAM entries owned by this 'pcifunc' */ 1174 npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); 1175 1176 /* Free all MCAM counters owned by this 'pcifunc' */ 1177 npc_mcam_free_all_counters(rvu, mcam, pcifunc); 1178 1179 /* Delete MCAM entries owned by this 'pcifunc' */ 1180 list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { 1181 if (rule->owner == pcifunc && !rule->default_rule) { 1182 list_del(&rule->list); 1183 kfree(rule); 1184 } 1185 } 1186 1187 mutex_unlock(&mcam->lock); 1188 1189 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1190 } 1191 1192 #define SET_KEX_LD(intf, lid, ltype, ld, cfg) \ 1193 rvu_write64(rvu, blkaddr, \ 1194 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg) 1195 1196 #define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \ 1197 rvu_write64(rvu, blkaddr, \ 1198 NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) 1199 1200 static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr, 1201 struct npc_mcam_kex *mkex, u8 intf) 1202 { 1203 int lid, lt, ld, fl; 1204 1205 if (is_npc_intf_tx(intf)) 1206 return; 1207 1208 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 1209 mkex->keyx_cfg[NIX_INTF_RX]); 1210 1211 /* Program LDATA */ 1212 for (lid = 0; lid < NPC_MAX_LID; lid++) { 1213 for (lt = 0; lt < NPC_MAX_LT; lt++) { 1214 for (ld = 0; ld < NPC_MAX_LD; ld++) 1215 SET_KEX_LD(intf, lid, lt, ld, 1216 mkex->intf_lid_lt_ld[NIX_INTF_RX] 1217 [lid][lt][ld]); 1218 } 1219 } 1220 /* Program LFLAGS */ 1221 for (ld = 0; ld < NPC_MAX_LD; ld++) { 1222 for (fl = 0; fl < NPC_MAX_LFL; fl++) 1223 SET_KEX_LDFLAGS(intf, ld, fl, 1224 mkex->intf_ld_flags[NIX_INTF_RX] 1225 [ld][fl]); 1226 } 1227 } 1228 1229 static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr, 1230 struct npc_mcam_kex *mkex, u8 intf) 1231 { 1232 int lid, lt, ld, fl; 1233 1234 if (is_npc_intf_rx(intf)) 1235 return; 1236 1237 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 1238 mkex->keyx_cfg[NIX_INTF_TX]); 1239 1240 /* Program LDATA */ 1241 for (lid = 0; lid < NPC_MAX_LID; lid++) { 1242 for (lt = 0; lt < NPC_MAX_LT; lt++) { 1243 for (ld = 0; ld < NPC_MAX_LD; ld++) 1244 SET_KEX_LD(intf, lid, lt, ld, 1245 mkex->intf_lid_lt_ld[NIX_INTF_TX] 1246 [lid][lt][ld]); 1247 } 1248 } 1249 /* Program LFLAGS */ 1250 for (ld = 0; ld < NPC_MAX_LD; ld++) { 1251 for (fl = 0; fl < NPC_MAX_LFL; fl++) 1252 SET_KEX_LDFLAGS(intf, ld, fl, 1253 mkex->intf_ld_flags[NIX_INTF_TX] 1254 [ld][fl]); 1255 } 1256 } 1257 1258 static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr, 1259 struct npc_mcam_kex *mkex) 1260 { 1261 struct rvu_hwinfo *hw = rvu->hw; 1262 u8 intf; 1263 int ld; 1264 1265 for (ld = 0; ld < NPC_MAX_LD; ld++) 1266 rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld), 1267 mkex->kex_ld_flags[ld]); 1268 1269 for (intf = 0; intf < hw->npc_intfs; intf++) { 1270 npc_program_mkex_rx(rvu, blkaddr, mkex, intf); 1271 npc_program_mkex_tx(rvu, blkaddr, mkex, intf); 1272 } 1273 } 1274 1275 static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr, 1276 u64 *size) 1277 { 1278 u64 prfl_addr, prfl_sz; 1279 1280 if (!rvu->fwdata) 1281 return -EINVAL; 1282 1283 prfl_addr = rvu->fwdata->mcam_addr; 1284 prfl_sz = rvu->fwdata->mcam_sz; 1285 1286 if (!prfl_addr || !prfl_sz) 1287 return -EINVAL; 1288 1289 *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz); 1290 if (!(*prfl_img_addr)) 1291 return -ENOMEM; 1292 1293 *size = prfl_sz; 1294 1295 return 0; 1296 } 1297 1298 /* strtoull of "mkexprof" with base:36 */ 1299 #define MKEX_END_SIGN 0xdeadbeef 1300 1301 static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr, 1302 const char *mkex_profile) 1303 { 1304 struct device *dev = &rvu->pdev->dev; 1305 struct npc_mcam_kex *mcam_kex; 1306 void __iomem *mkex_prfl_addr = NULL; 1307 u64 prfl_sz; 1308 int ret; 1309 1310 /* If user not selected mkex profile */ 1311 if (rvu->kpu_fwdata_sz || 1312 !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN)) 1313 goto program_mkex; 1314 1315 /* Setting up the mapping for mkex profile image */ 1316 ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz); 1317 if (ret < 0) 1318 goto program_mkex; 1319 1320 mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr; 1321 1322 while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) { 1323 /* Compare with mkex mod_param name string */ 1324 if (mcam_kex->mkex_sign == MKEX_SIGN && 1325 !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) { 1326 /* Due to an errata (35786) in A0/B0 pass silicon, 1327 * parse nibble enable configuration has to be 1328 * identical for both Rx and Tx interfaces. 1329 */ 1330 if (!is_rvu_96xx_B0(rvu) || 1331 mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX]) 1332 rvu->kpu.mkex = mcam_kex; 1333 goto program_mkex; 1334 } 1335 1336 mcam_kex++; 1337 prfl_sz -= sizeof(struct npc_mcam_kex); 1338 } 1339 dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile); 1340 1341 program_mkex: 1342 dev_info(rvu->dev, "Using %s mkex profile\n", rvu->kpu.mkex->name); 1343 /* Program selected mkex profile */ 1344 npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex); 1345 if (mkex_prfl_addr) 1346 iounmap(mkex_prfl_addr); 1347 } 1348 1349 static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, 1350 const struct npc_kpu_profile_action *kpuaction, 1351 int kpu, int entry, bool pkind) 1352 { 1353 struct npc_kpu_action0 action0 = {0}; 1354 struct npc_kpu_action1 action1 = {0}; 1355 u64 reg; 1356 1357 action1.errlev = kpuaction->errlev; 1358 action1.errcode = kpuaction->errcode; 1359 action1.dp0_offset = kpuaction->dp0_offset; 1360 action1.dp1_offset = kpuaction->dp1_offset; 1361 action1.dp2_offset = kpuaction->dp2_offset; 1362 1363 if (pkind) 1364 reg = NPC_AF_PKINDX_ACTION1(entry); 1365 else 1366 reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry); 1367 1368 rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1); 1369 1370 action0.byp_count = kpuaction->bypass_count; 1371 action0.capture_ena = kpuaction->cap_ena; 1372 action0.parse_done = kpuaction->parse_done; 1373 action0.next_state = kpuaction->next_state; 1374 action0.capture_lid = kpuaction->lid; 1375 action0.capture_ltype = kpuaction->ltype; 1376 action0.capture_flags = kpuaction->flags; 1377 action0.ptr_advance = kpuaction->ptr_advance; 1378 action0.var_len_offset = kpuaction->offset; 1379 action0.var_len_mask = kpuaction->mask; 1380 action0.var_len_right = kpuaction->right; 1381 action0.var_len_shift = kpuaction->shift; 1382 1383 if (pkind) 1384 reg = NPC_AF_PKINDX_ACTION0(entry); 1385 else 1386 reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry); 1387 1388 rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0); 1389 } 1390 1391 static void npc_config_kpucam(struct rvu *rvu, int blkaddr, 1392 const struct npc_kpu_profile_cam *kpucam, 1393 int kpu, int entry) 1394 { 1395 struct npc_kpu_cam cam0 = {0}; 1396 struct npc_kpu_cam cam1 = {0}; 1397 1398 cam1.state = kpucam->state & kpucam->state_mask; 1399 cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask; 1400 cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask; 1401 cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask; 1402 1403 cam0.state = ~kpucam->state & kpucam->state_mask; 1404 cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask; 1405 cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask; 1406 cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask; 1407 1408 rvu_write64(rvu, blkaddr, 1409 NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0); 1410 rvu_write64(rvu, blkaddr, 1411 NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1); 1412 } 1413 1414 static inline u64 enable_mask(int count) 1415 { 1416 return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL)); 1417 } 1418 1419 static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, 1420 const struct npc_kpu_profile *profile) 1421 { 1422 int entry, num_entries, max_entries; 1423 u64 entry_mask; 1424 1425 if (profile->cam_entries != profile->action_entries) { 1426 dev_err(rvu->dev, 1427 "KPU%d: CAM and action entries [%d != %d] not equal\n", 1428 kpu, profile->cam_entries, profile->action_entries); 1429 } 1430 1431 max_entries = rvu->hw->npc_kpu_entries; 1432 1433 /* Program CAM match entries for previous KPU extracted data */ 1434 num_entries = min_t(int, profile->cam_entries, max_entries); 1435 for (entry = 0; entry < num_entries; entry++) 1436 npc_config_kpucam(rvu, blkaddr, 1437 &profile->cam[entry], kpu, entry); 1438 1439 /* Program this KPU's actions */ 1440 num_entries = min_t(int, profile->action_entries, max_entries); 1441 for (entry = 0; entry < num_entries; entry++) 1442 npc_config_kpuaction(rvu, blkaddr, &profile->action[entry], 1443 kpu, entry, false); 1444 1445 /* Enable all programmed entries */ 1446 num_entries = min_t(int, profile->action_entries, profile->cam_entries); 1447 entry_mask = enable_mask(num_entries); 1448 /* Disable first KPU_MAX_CST_ENT entries for built-in profile */ 1449 if (!rvu->kpu.custom) 1450 entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0); 1451 rvu_write64(rvu, blkaddr, 1452 NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask); 1453 if (num_entries > 64) { 1454 rvu_write64(rvu, blkaddr, 1455 NPC_AF_KPUX_ENTRY_DISX(kpu, 1), 1456 enable_mask(num_entries - 64)); 1457 } 1458 1459 /* Enable this KPU */ 1460 rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01); 1461 } 1462 1463 static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile) 1464 { 1465 profile->custom = 0; 1466 profile->name = def_pfl_name; 1467 profile->version = NPC_KPU_PROFILE_VER; 1468 profile->ikpu = ikpu_action_entries; 1469 profile->pkinds = ARRAY_SIZE(ikpu_action_entries); 1470 profile->kpu = npc_kpu_profiles; 1471 profile->kpus = ARRAY_SIZE(npc_kpu_profiles); 1472 profile->lt_def = &npc_lt_defaults; 1473 profile->mkex = &npc_mkex_default; 1474 1475 return 0; 1476 } 1477 1478 static int npc_apply_custom_kpu(struct rvu *rvu, 1479 struct npc_kpu_profile_adapter *profile) 1480 { 1481 size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0; 1482 struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata; 1483 struct npc_kpu_profile_action *action; 1484 struct npc_kpu_profile_cam *cam; 1485 struct npc_kpu_fwdata *fw_kpu; 1486 int entries; 1487 u16 kpu, entry; 1488 1489 if (rvu->kpu_fwdata_sz < hdr_sz) { 1490 dev_warn(rvu->dev, "Invalid KPU profile size\n"); 1491 return -EINVAL; 1492 } 1493 if (le64_to_cpu(fw->signature) != KPU_SIGN) { 1494 dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n", 1495 fw->signature); 1496 return -EINVAL; 1497 } 1498 /* Verify if the using known profile structure */ 1499 if (NPC_KPU_VER_MAJ(profile->version) > 1500 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) { 1501 dev_warn(rvu->dev, "Not supported Major version: %d > %d\n", 1502 NPC_KPU_VER_MAJ(profile->version), 1503 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)); 1504 return -EINVAL; 1505 } 1506 /* Verify if profile is aligned with the required kernel changes */ 1507 if (NPC_KPU_VER_MIN(profile->version) < 1508 NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) { 1509 dev_warn(rvu->dev, 1510 "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n", 1511 NPC_KPU_VER_MAJ(profile->version), 1512 NPC_KPU_VER_MIN(profile->version), 1513 NPC_KPU_VER_PATCH(profile->version), 1514 NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER), 1515 NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER), 1516 NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER)); 1517 return -EINVAL; 1518 } 1519 /* Verify if profile fits the HW */ 1520 if (fw->kpus > profile->kpus) { 1521 dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus, 1522 profile->kpus); 1523 return -EINVAL; 1524 } 1525 1526 profile->custom = 1; 1527 profile->name = fw->name; 1528 profile->version = le64_to_cpu(fw->version); 1529 profile->mkex = &fw->mkex; 1530 profile->lt_def = &fw->lt_def; 1531 1532 for (kpu = 0; kpu < fw->kpus; kpu++) { 1533 fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset); 1534 if (fw_kpu->entries > KPU_MAX_CST_ENT) 1535 dev_warn(rvu->dev, 1536 "Too many custom entries on KPU%d: %d > %d\n", 1537 kpu, fw_kpu->entries, KPU_MAX_CST_ENT); 1538 entries = min(fw_kpu->entries, KPU_MAX_CST_ENT); 1539 cam = (struct npc_kpu_profile_cam *)fw_kpu->data; 1540 offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam); 1541 action = (struct npc_kpu_profile_action *)(fw->data + offset); 1542 offset += fw_kpu->entries * sizeof(*action); 1543 if (rvu->kpu_fwdata_sz < hdr_sz + offset) { 1544 dev_warn(rvu->dev, 1545 "Profile size mismatch on KPU%i parsing.\n", 1546 kpu + 1); 1547 return -EINVAL; 1548 } 1549 for (entry = 0; entry < entries; entry++) { 1550 profile->kpu[kpu].cam[entry] = cam[entry]; 1551 profile->kpu[kpu].action[entry] = action[entry]; 1552 } 1553 } 1554 1555 return 0; 1556 } 1557 1558 static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr, 1559 u64 prfl_sz, const char *kpu_profile) 1560 { 1561 struct npc_kpu_profile_fwdata *kpu_data = NULL; 1562 int rc = -EINVAL; 1563 1564 kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr; 1565 if (le64_to_cpu(kpu_data->signature) == KPU_SIGN && 1566 !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) { 1567 dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n", 1568 kpu_profile); 1569 rvu->kpu_fwdata = kpu_data; 1570 rvu->kpu_fwdata_sz = prfl_sz; 1571 rvu->kpu_prfl_addr = prfl_addr; 1572 rc = 0; 1573 } 1574 1575 return rc; 1576 } 1577 1578 static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz, 1579 const char *kpu_profile) 1580 { 1581 struct npc_coalesced_kpu_prfl *img_data = NULL; 1582 int i = 0, rc = -EINVAL; 1583 void __iomem *kpu_prfl_addr; 1584 u16 offset; 1585 1586 img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr; 1587 if (le64_to_cpu(img_data->signature) == KPU_SIGN && 1588 !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) { 1589 /* Loaded profile is a single KPU profile. */ 1590 rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr, 1591 prfl_sz, kpu_profile); 1592 goto done; 1593 } 1594 1595 /* Loaded profile is coalesced image, offset of first KPU profile.*/ 1596 offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) + 1597 (img_data->num_prfl * sizeof(uint16_t)); 1598 /* Check if mapped image is coalesced image. */ 1599 while (i < img_data->num_prfl) { 1600 /* Profile image offsets are rounded up to next 8 multiple.*/ 1601 offset = ALIGN_8B_CEIL(offset); 1602 kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr + 1603 offset); 1604 rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr, 1605 img_data->prfl_sz[i], kpu_profile); 1606 if (!rc) 1607 break; 1608 /* Calculating offset of profile image based on profile size.*/ 1609 offset += img_data->prfl_sz[i]; 1610 i++; 1611 } 1612 done: 1613 return rc; 1614 } 1615 1616 static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile) 1617 { 1618 int ret = -EINVAL; 1619 u64 prfl_sz; 1620 1621 /* Setting up the mapping for NPC profile image */ 1622 ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz); 1623 if (ret < 0) 1624 goto done; 1625 1626 /* Detect if profile is coalesced or single KPU profile and load */ 1627 ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile); 1628 if (ret == 0) 1629 goto done; 1630 1631 /* Cleaning up if KPU profile image from fwdata is not valid. */ 1632 if (rvu->kpu_prfl_addr) { 1633 iounmap(rvu->kpu_prfl_addr); 1634 rvu->kpu_prfl_addr = NULL; 1635 rvu->kpu_fwdata_sz = 0; 1636 rvu->kpu_fwdata = NULL; 1637 } 1638 1639 done: 1640 return ret; 1641 } 1642 1643 static void npc_load_kpu_profile(struct rvu *rvu) 1644 { 1645 struct npc_kpu_profile_adapter *profile = &rvu->kpu; 1646 const char *kpu_profile = rvu->kpu_pfl_name; 1647 const struct firmware *fw = NULL; 1648 bool retry_fwdb = false; 1649 1650 /* If user not specified profile customization */ 1651 if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN)) 1652 goto revert_to_default; 1653 /* First prepare default KPU, then we'll customize top entries. */ 1654 npc_prepare_default_kpu(profile); 1655 1656 /* Order of preceedence for load loading NPC profile (high to low) 1657 * Firmware binary in filesystem. 1658 * Firmware database method. 1659 * Default KPU profile. 1660 */ 1661 if (!request_firmware(&fw, kpu_profile, rvu->dev)) { 1662 dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n", 1663 kpu_profile); 1664 rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL); 1665 if (rvu->kpu_fwdata) { 1666 memcpy(rvu->kpu_fwdata, fw->data, fw->size); 1667 rvu->kpu_fwdata_sz = fw->size; 1668 } 1669 release_firmware(fw); 1670 retry_fwdb = true; 1671 goto program_kpu; 1672 } 1673 1674 load_image_fwdb: 1675 /* Loading the KPU profile using firmware database */ 1676 if (npc_load_kpu_profile_fwdb(rvu, kpu_profile)) 1677 goto revert_to_default; 1678 1679 program_kpu: 1680 /* Apply profile customization if firmware was loaded. */ 1681 if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) { 1682 /* If image from firmware filesystem fails to load or invalid 1683 * retry with firmware database method. 1684 */ 1685 if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) { 1686 /* Loading image from firmware database failed. */ 1687 if (rvu->kpu_prfl_addr) { 1688 iounmap(rvu->kpu_prfl_addr); 1689 rvu->kpu_prfl_addr = NULL; 1690 } else { 1691 kfree(rvu->kpu_fwdata); 1692 } 1693 rvu->kpu_fwdata = NULL; 1694 rvu->kpu_fwdata_sz = 0; 1695 if (retry_fwdb) { 1696 retry_fwdb = false; 1697 goto load_image_fwdb; 1698 } 1699 } 1700 1701 dev_warn(rvu->dev, 1702 "Can't load KPU profile %s. Using default.\n", 1703 kpu_profile); 1704 kfree(rvu->kpu_fwdata); 1705 rvu->kpu_fwdata = NULL; 1706 goto revert_to_default; 1707 } 1708 1709 dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n", 1710 profile->name, NPC_KPU_VER_MAJ(profile->version), 1711 NPC_KPU_VER_MIN(profile->version), 1712 NPC_KPU_VER_PATCH(profile->version)); 1713 1714 return; 1715 1716 revert_to_default: 1717 npc_prepare_default_kpu(profile); 1718 } 1719 1720 static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) 1721 { 1722 struct rvu_hwinfo *hw = rvu->hw; 1723 int num_pkinds, num_kpus, idx; 1724 struct npc_pkind *pkind; 1725 1726 /* Disable all KPUs and their entries */ 1727 for (idx = 0; idx < hw->npc_kpus; idx++) { 1728 rvu_write64(rvu, blkaddr, 1729 NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL); 1730 rvu_write64(rvu, blkaddr, 1731 NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL); 1732 rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00); 1733 } 1734 1735 /* Load and customize KPU profile. */ 1736 npc_load_kpu_profile(rvu); 1737 1738 /* First program IKPU profile i.e PKIND configs. 1739 * Check HW max count to avoid configuring junk or 1740 * writing to unsupported CSR addresses. 1741 */ 1742 pkind = &hw->pkind; 1743 num_pkinds = rvu->kpu.pkinds; 1744 num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds); 1745 1746 for (idx = 0; idx < num_pkinds; idx++) 1747 npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true); 1748 1749 /* Program KPU CAM and Action profiles */ 1750 num_kpus = rvu->kpu.kpus; 1751 num_kpus = min_t(int, hw->npc_kpus, num_kpus); 1752 1753 for (idx = 0; idx < num_kpus; idx++) 1754 npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]); 1755 } 1756 1757 static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) 1758 { 1759 int nixlf_count = rvu_get_nixlf_count(rvu); 1760 struct npc_mcam *mcam = &rvu->hw->mcam; 1761 int rsvd, err; 1762 u16 index; 1763 int cntr; 1764 u64 cfg; 1765 1766 /* Actual number of MCAM entries vary by entry size */ 1767 cfg = (rvu_read64(rvu, blkaddr, 1768 NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07; 1769 mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize; 1770 mcam->keysize = cfg; 1771 1772 /* Number of banks combined per MCAM entry */ 1773 if (cfg == NPC_MCAM_KEY_X4) 1774 mcam->banks_per_entry = 4; 1775 else if (cfg == NPC_MCAM_KEY_X2) 1776 mcam->banks_per_entry = 2; 1777 else 1778 mcam->banks_per_entry = 1; 1779 1780 /* Reserve one MCAM entry for each of the NIX LF to 1781 * guarantee space to install default matching DMAC rule. 1782 * Also reserve 2 MCAM entries for each PF for default 1783 * channel based matching or 'bcast & promisc' matching to 1784 * support BCAST and PROMISC modes of operation for PFs. 1785 * PF0 is excluded. 1786 */ 1787 rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) + 1788 ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF); 1789 if (mcam->total_entries <= rsvd) { 1790 dev_warn(rvu->dev, 1791 "Insufficient NPC MCAM size %d for pkt I/O, exiting\n", 1792 mcam->total_entries); 1793 return -ENOMEM; 1794 } 1795 1796 mcam->bmap_entries = mcam->total_entries - rsvd; 1797 mcam->nixlf_offset = mcam->bmap_entries; 1798 mcam->pf_offset = mcam->nixlf_offset + nixlf_count; 1799 1800 /* Allocate bitmaps for managing MCAM entries */ 1801 mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries), 1802 sizeof(long), GFP_KERNEL); 1803 if (!mcam->bmap) 1804 return -ENOMEM; 1805 1806 mcam->bmap_reverse = devm_kcalloc(rvu->dev, 1807 BITS_TO_LONGS(mcam->bmap_entries), 1808 sizeof(long), GFP_KERNEL); 1809 if (!mcam->bmap_reverse) 1810 return -ENOMEM; 1811 1812 mcam->bmap_fcnt = mcam->bmap_entries; 1813 1814 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 1815 mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, 1816 sizeof(u16), GFP_KERNEL); 1817 if (!mcam->entry2pfvf_map) 1818 return -ENOMEM; 1819 1820 /* Reserve 1/8th of MCAM entries at the bottom for low priority 1821 * allocations and another 1/8th at the top for high priority 1822 * allocations. 1823 */ 1824 mcam->lprio_count = mcam->bmap_entries / 8; 1825 if (mcam->lprio_count > BITS_PER_LONG) 1826 mcam->lprio_count = round_down(mcam->lprio_count, 1827 BITS_PER_LONG); 1828 mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count; 1829 mcam->hprio_count = mcam->lprio_count; 1830 mcam->hprio_end = mcam->hprio_count; 1831 1832 1833 /* Allocate bitmap for managing MCAM counters and memory 1834 * for saving counter to RVU PFFUNC allocation mapping. 1835 */ 1836 err = rvu_alloc_bitmap(&mcam->counters); 1837 if (err) 1838 return err; 1839 1840 mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max, 1841 sizeof(u16), GFP_KERNEL); 1842 if (!mcam->cntr2pfvf_map) 1843 goto free_mem; 1844 1845 /* Alloc memory for MCAM entry to counter mapping and for tracking 1846 * counter's reference count. 1847 */ 1848 mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, 1849 sizeof(u16), GFP_KERNEL); 1850 if (!mcam->entry2cntr_map) 1851 goto free_mem; 1852 1853 mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max, 1854 sizeof(u16), GFP_KERNEL); 1855 if (!mcam->cntr_refcnt) 1856 goto free_mem; 1857 1858 /* Alloc memory for saving target device of mcam rule */ 1859 mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries, 1860 sizeof(u16), GFP_KERNEL); 1861 if (!mcam->entry2target_pffunc) 1862 goto free_mem; 1863 1864 for (index = 0; index < mcam->bmap_entries; index++) { 1865 mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; 1866 mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; 1867 } 1868 1869 for (cntr = 0; cntr < mcam->counters.max; cntr++) 1870 mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; 1871 1872 mutex_init(&mcam->lock); 1873 1874 return 0; 1875 1876 free_mem: 1877 kfree(mcam->counters.bmap); 1878 return -ENOMEM; 1879 } 1880 1881 static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr) 1882 { 1883 struct npc_pkind *pkind = &rvu->hw->pkind; 1884 struct npc_mcam *mcam = &rvu->hw->mcam; 1885 struct rvu_hwinfo *hw = rvu->hw; 1886 u64 npc_const, npc_const1; 1887 u64 npc_const2 = 0; 1888 1889 npc_const = rvu_read64(rvu, blkaddr, NPC_AF_CONST); 1890 npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1); 1891 if (npc_const1 & BIT_ULL(63)) 1892 npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2); 1893 1894 pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL; 1895 hw->npc_kpu_entries = npc_const1 & 0xFFFULL; 1896 hw->npc_kpus = (npc_const >> 8) & 0x1FULL; 1897 hw->npc_intfs = npc_const & 0xFULL; 1898 hw->npc_counters = (npc_const >> 48) & 0xFFFFULL; 1899 1900 mcam->banks = (npc_const >> 44) & 0xFULL; 1901 mcam->banksize = (npc_const >> 28) & 0xFFFFULL; 1902 /* Extended set */ 1903 if (npc_const2) { 1904 hw->npc_ext_set = true; 1905 hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL; 1906 mcam->banksize = npc_const2 & 0xFFFFULL; 1907 } 1908 1909 mcam->counters.max = hw->npc_counters; 1910 } 1911 1912 static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr) 1913 { 1914 struct npc_mcam *mcam = &rvu->hw->mcam; 1915 struct rvu_hwinfo *hw = rvu->hw; 1916 u64 nibble_ena, rx_kex, tx_kex; 1917 u8 intf; 1918 1919 /* Reserve last counter for MCAM RX miss action which is set to 1920 * drop packet. This way we will know how many pkts didn't match 1921 * any MCAM entry. 1922 */ 1923 mcam->counters.max--; 1924 mcam->rx_miss_act_cntr = mcam->counters.max; 1925 1926 rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX]; 1927 tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX]; 1928 nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex); 1929 1930 nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena); 1931 if (nibble_ena) { 1932 tx_kex &= ~NPC_PARSE_NIBBLE; 1933 tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena); 1934 npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex; 1935 } 1936 1937 /* Configure RX interfaces */ 1938 for (intf = 0; intf < hw->npc_intfs; intf++) { 1939 if (is_npc_intf_tx(intf)) 1940 continue; 1941 1942 /* Set RX MCAM search key size. LA..LE (ltype only) + Channel */ 1943 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 1944 rx_kex); 1945 1946 /* If MCAM lookup doesn't result in a match, drop the received 1947 * packet. And map this action to a counter to count dropped 1948 * packets. 1949 */ 1950 rvu_write64(rvu, blkaddr, 1951 NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP); 1952 1953 /* NPC_AF_INTFX_MISS_STAT_ACT[14:12] - counter[11:9] 1954 * NPC_AF_INTFX_MISS_STAT_ACT[8:0] - counter[8:0] 1955 */ 1956 rvu_write64(rvu, blkaddr, 1957 NPC_AF_INTFX_MISS_STAT_ACT(intf), 1958 ((mcam->rx_miss_act_cntr >> 9) << 12) | 1959 BIT_ULL(9) | mcam->rx_miss_act_cntr); 1960 } 1961 1962 /* Configure TX interfaces */ 1963 for (intf = 0; intf < hw->npc_intfs; intf++) { 1964 if (is_npc_intf_rx(intf)) 1965 continue; 1966 1967 /* Extract Ltypes LID_LA to LID_LE */ 1968 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), 1969 tx_kex); 1970 1971 /* Set TX miss action to UCAST_DEFAULT i.e 1972 * transmit the packet on NIX LF SQ's default channel. 1973 */ 1974 rvu_write64(rvu, blkaddr, 1975 NPC_AF_INTFX_MISS_ACT(intf), 1976 NIX_TX_ACTIONOP_UCAST_DEFAULT); 1977 } 1978 } 1979 1980 int rvu_npc_init(struct rvu *rvu) 1981 { 1982 struct npc_kpu_profile_adapter *kpu = &rvu->kpu; 1983 struct npc_pkind *pkind = &rvu->hw->pkind; 1984 struct npc_mcam *mcam = &rvu->hw->mcam; 1985 int blkaddr, entry, bank, err; 1986 1987 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1988 if (blkaddr < 0) { 1989 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1990 return -ENODEV; 1991 } 1992 1993 rvu_npc_hw_init(rvu, blkaddr); 1994 1995 /* First disable all MCAM entries, to stop traffic towards NIXLFs */ 1996 for (bank = 0; bank < mcam->banks; bank++) { 1997 for (entry = 0; entry < mcam->banksize; entry++) 1998 rvu_write64(rvu, blkaddr, 1999 NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0); 2000 } 2001 2002 err = rvu_alloc_bitmap(&pkind->rsrc); 2003 if (err) 2004 return err; 2005 2006 /* Allocate mem for pkind to PF and channel mapping info */ 2007 pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max, 2008 sizeof(u32), GFP_KERNEL); 2009 if (!pkind->pfchan_map) 2010 return -ENOMEM; 2011 2012 /* Configure KPU profile */ 2013 npc_parser_profile_init(rvu, blkaddr); 2014 2015 /* Config Outer L2, IPv4's NPC layer info */ 2016 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2, 2017 (kpu->lt_def->pck_ol2.lid << 8) | (kpu->lt_def->pck_ol2.ltype_match << 4) | 2018 kpu->lt_def->pck_ol2.ltype_mask); 2019 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4, 2020 (kpu->lt_def->pck_oip4.lid << 8) | (kpu->lt_def->pck_oip4.ltype_match << 4) | 2021 kpu->lt_def->pck_oip4.ltype_mask); 2022 2023 /* Config Inner IPV4 NPC layer info */ 2024 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4, 2025 (kpu->lt_def->pck_iip4.lid << 8) | (kpu->lt_def->pck_iip4.ltype_match << 4) | 2026 kpu->lt_def->pck_iip4.ltype_mask); 2027 2028 /* Enable below for Rx pkts. 2029 * - Outer IPv4 header checksum validation. 2030 * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M]. 2031 * - Inner IPv4 header checksum validation. 2032 * - Set non zero checksum error code value 2033 */ 2034 rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, 2035 rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | 2036 BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) | 2037 BIT_ULL(2) | BIT_ULL(1)); 2038 2039 rvu_npc_setup_interfaces(rvu, blkaddr); 2040 2041 /* Configure MKEX profile */ 2042 npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name); 2043 2044 err = npc_mcam_rsrcs_init(rvu, blkaddr); 2045 if (err) 2046 return err; 2047 2048 err = npc_flow_steering_init(rvu, blkaddr); 2049 if (err) { 2050 dev_err(rvu->dev, 2051 "Incorrect mkex profile loaded using default mkex\n"); 2052 npc_load_mkex_profile(rvu, blkaddr, def_pfl_name); 2053 } 2054 2055 return 0; 2056 } 2057 2058 void rvu_npc_freemem(struct rvu *rvu) 2059 { 2060 struct npc_pkind *pkind = &rvu->hw->pkind; 2061 struct npc_mcam *mcam = &rvu->hw->mcam; 2062 2063 kfree(pkind->rsrc.bmap); 2064 kfree(mcam->counters.bmap); 2065 if (rvu->kpu_prfl_addr) 2066 iounmap(rvu->kpu_prfl_addr); 2067 else 2068 kfree(rvu->kpu_fwdata); 2069 mutex_destroy(&mcam->lock); 2070 } 2071 2072 void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, 2073 int blkaddr, int *alloc_cnt, 2074 int *enable_cnt) 2075 { 2076 struct npc_mcam *mcam = &rvu->hw->mcam; 2077 int entry; 2078 2079 *alloc_cnt = 0; 2080 *enable_cnt = 0; 2081 2082 for (entry = 0; entry < mcam->bmap_entries; entry++) { 2083 if (mcam->entry2pfvf_map[entry] == pcifunc) { 2084 (*alloc_cnt)++; 2085 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry)) 2086 (*enable_cnt)++; 2087 } 2088 } 2089 } 2090 2091 void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, 2092 int blkaddr, int *alloc_cnt, 2093 int *enable_cnt) 2094 { 2095 struct npc_mcam *mcam = &rvu->hw->mcam; 2096 int cntr; 2097 2098 *alloc_cnt = 0; 2099 *enable_cnt = 0; 2100 2101 for (cntr = 0; cntr < mcam->counters.max; cntr++) { 2102 if (mcam->cntr2pfvf_map[cntr] == pcifunc) { 2103 (*alloc_cnt)++; 2104 if (mcam->cntr_refcnt[cntr]) 2105 (*enable_cnt)++; 2106 } 2107 } 2108 } 2109 2110 static int npc_mcam_verify_entry(struct npc_mcam *mcam, 2111 u16 pcifunc, int entry) 2112 { 2113 /* verify AF installed entries */ 2114 if (is_pffunc_af(pcifunc)) 2115 return 0; 2116 /* Verify if entry is valid and if it is indeed 2117 * allocated to the requesting PFFUNC. 2118 */ 2119 if (entry >= mcam->bmap_entries) 2120 return NPC_MCAM_INVALID_REQ; 2121 2122 if (pcifunc != mcam->entry2pfvf_map[entry]) 2123 return NPC_MCAM_PERM_DENIED; 2124 2125 return 0; 2126 } 2127 2128 static int npc_mcam_verify_counter(struct npc_mcam *mcam, 2129 u16 pcifunc, int cntr) 2130 { 2131 /* Verify if counter is valid and if it is indeed 2132 * allocated to the requesting PFFUNC. 2133 */ 2134 if (cntr >= mcam->counters.max) 2135 return NPC_MCAM_INVALID_REQ; 2136 2137 if (pcifunc != mcam->cntr2pfvf_map[cntr]) 2138 return NPC_MCAM_PERM_DENIED; 2139 2140 return 0; 2141 } 2142 2143 static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, 2144 int blkaddr, u16 entry, u16 cntr) 2145 { 2146 u16 index = entry & (mcam->banksize - 1); 2147 u16 bank = npc_get_bank(mcam, entry); 2148 2149 /* Set mapping and increment counter's refcnt */ 2150 mcam->entry2cntr_map[entry] = cntr; 2151 mcam->cntr_refcnt[cntr]++; 2152 /* Enable stats 2153 * NPC_AF_MCAMEX_BANKX_STAT_ACT[14:12] - counter[11:9] 2154 * NPC_AF_MCAMEX_BANKX_STAT_ACT[8:0] - counter[8:0] 2155 */ 2156 rvu_write64(rvu, blkaddr, 2157 NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 2158 ((cntr >> 9) << 12) | BIT_ULL(9) | cntr); 2159 } 2160 2161 static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, 2162 struct npc_mcam *mcam, 2163 int blkaddr, u16 entry, u16 cntr) 2164 { 2165 u16 index = entry & (mcam->banksize - 1); 2166 u16 bank = npc_get_bank(mcam, entry); 2167 2168 /* Remove mapping and reduce counter's refcnt */ 2169 mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP; 2170 mcam->cntr_refcnt[cntr]--; 2171 /* Disable stats */ 2172 rvu_write64(rvu, blkaddr, 2173 NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00); 2174 } 2175 2176 /* Sets MCAM entry in bitmap as used. Update 2177 * reverse bitmap too. Should be called with 2178 * 'mcam->lock' held. 2179 */ 2180 static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index) 2181 { 2182 u16 entry, rentry; 2183 2184 entry = index; 2185 rentry = mcam->bmap_entries - index - 1; 2186 2187 __set_bit(entry, mcam->bmap); 2188 __set_bit(rentry, mcam->bmap_reverse); 2189 mcam->bmap_fcnt--; 2190 } 2191 2192 /* Sets MCAM entry in bitmap as free. Update 2193 * reverse bitmap too. Should be called with 2194 * 'mcam->lock' held. 2195 */ 2196 static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index) 2197 { 2198 u16 entry, rentry; 2199 2200 entry = index; 2201 rentry = mcam->bmap_entries - index - 1; 2202 2203 __clear_bit(entry, mcam->bmap); 2204 __clear_bit(rentry, mcam->bmap_reverse); 2205 mcam->bmap_fcnt++; 2206 } 2207 2208 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 2209 int blkaddr, u16 pcifunc) 2210 { 2211 u16 index, cntr; 2212 2213 /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */ 2214 for (index = 0; index < mcam->bmap_entries; index++) { 2215 if (mcam->entry2pfvf_map[index] == pcifunc) { 2216 mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; 2217 /* Free the entry in bitmap */ 2218 npc_mcam_clear_bit(mcam, index); 2219 /* Disable the entry */ 2220 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); 2221 2222 /* Update entry2counter mapping */ 2223 cntr = mcam->entry2cntr_map[index]; 2224 if (cntr != NPC_MCAM_INVALID_MAP) 2225 npc_unmap_mcam_entry_and_cntr(rvu, mcam, 2226 blkaddr, index, 2227 cntr); 2228 mcam->entry2target_pffunc[index] = 0x0; 2229 } 2230 } 2231 } 2232 2233 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 2234 u16 pcifunc) 2235 { 2236 u16 cntr; 2237 2238 /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */ 2239 for (cntr = 0; cntr < mcam->counters.max; cntr++) { 2240 if (mcam->cntr2pfvf_map[cntr] == pcifunc) { 2241 mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; 2242 mcam->cntr_refcnt[cntr] = 0; 2243 rvu_free_rsrc(&mcam->counters, cntr); 2244 /* This API is expected to be called after freeing 2245 * MCAM entries, which inturn will remove 2246 * 'entry to counter' mapping. 2247 * No need to do it again. 2248 */ 2249 } 2250 } 2251 } 2252 2253 /* Find area of contiguous free entries of size 'nr'. 2254 * If not found return max contiguous free entries available. 2255 */ 2256 static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, 2257 u16 nr, u16 *max_area) 2258 { 2259 u16 max_area_start = 0; 2260 u16 index, next, end; 2261 2262 *max_area = 0; 2263 2264 again: 2265 index = find_next_zero_bit(map, size, start); 2266 if (index >= size) 2267 return max_area_start; 2268 2269 end = ((index + nr) >= size) ? size : index + nr; 2270 next = find_next_bit(map, end, index); 2271 if (*max_area < (next - index)) { 2272 *max_area = next - index; 2273 max_area_start = index; 2274 } 2275 2276 if (next < end) { 2277 start = next + 1; 2278 goto again; 2279 } 2280 2281 return max_area_start; 2282 } 2283 2284 /* Find number of free MCAM entries available 2285 * within range i.e in between 'start' and 'end'. 2286 */ 2287 static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) 2288 { 2289 u16 index, next; 2290 u16 fcnt = 0; 2291 2292 again: 2293 if (start >= end) 2294 return fcnt; 2295 2296 index = find_next_zero_bit(map, end, start); 2297 if (index >= end) 2298 return fcnt; 2299 2300 next = find_next_bit(map, end, index); 2301 if (next <= end) { 2302 fcnt += next - index; 2303 start = next + 1; 2304 goto again; 2305 } 2306 2307 fcnt += end - index; 2308 return fcnt; 2309 } 2310 2311 static void 2312 npc_get_mcam_search_range_priority(struct npc_mcam *mcam, 2313 struct npc_mcam_alloc_entry_req *req, 2314 u16 *start, u16 *end, bool *reverse) 2315 { 2316 u16 fcnt; 2317 2318 if (req->priority == NPC_MCAM_HIGHER_PRIO) 2319 goto hprio; 2320 2321 /* For a low priority entry allocation 2322 * - If reference entry is not in hprio zone then 2323 * search range: ref_entry to end. 2324 * - If reference entry is in hprio zone and if 2325 * request can be accomodated in non-hprio zone then 2326 * search range: 'start of middle zone' to 'end' 2327 * - else search in reverse, so that less number of hprio 2328 * zone entries are allocated. 2329 */ 2330 2331 *reverse = false; 2332 *start = req->ref_entry + 1; 2333 *end = mcam->bmap_entries; 2334 2335 if (req->ref_entry >= mcam->hprio_end) 2336 return; 2337 2338 fcnt = npc_mcam_get_free_count(mcam->bmap, 2339 mcam->hprio_end, mcam->bmap_entries); 2340 if (fcnt > req->count) 2341 *start = mcam->hprio_end; 2342 else 2343 *reverse = true; 2344 return; 2345 2346 hprio: 2347 /* For a high priority entry allocation, search is always 2348 * in reverse to preserve hprio zone entries. 2349 * - If reference entry is not in lprio zone then 2350 * search range: 0 to ref_entry. 2351 * - If reference entry is in lprio zone and if 2352 * request can be accomodated in middle zone then 2353 * search range: 'hprio_end' to 'lprio_start' 2354 */ 2355 2356 *reverse = true; 2357 *start = 0; 2358 *end = req->ref_entry; 2359 2360 if (req->ref_entry <= mcam->lprio_start) 2361 return; 2362 2363 fcnt = npc_mcam_get_free_count(mcam->bmap, 2364 mcam->hprio_end, mcam->lprio_start); 2365 if (fcnt < req->count) 2366 return; 2367 *start = mcam->hprio_end; 2368 *end = mcam->lprio_start; 2369 } 2370 2371 static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, 2372 struct npc_mcam_alloc_entry_req *req, 2373 struct npc_mcam_alloc_entry_rsp *rsp) 2374 { 2375 u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; 2376 u16 fcnt, hp_fcnt, lp_fcnt; 2377 u16 start, end, index; 2378 int entry, next_start; 2379 bool reverse = false; 2380 unsigned long *bmap; 2381 u16 max_contig; 2382 2383 mutex_lock(&mcam->lock); 2384 2385 /* Check if there are any free entries */ 2386 if (!mcam->bmap_fcnt) { 2387 mutex_unlock(&mcam->lock); 2388 return NPC_MCAM_ALLOC_FAILED; 2389 } 2390 2391 /* MCAM entries are divided into high priority, middle and 2392 * low priority zones. Idea is to not allocate top and lower 2393 * most entries as much as possible, this is to increase 2394 * probability of honouring priority allocation requests. 2395 * 2396 * Two bitmaps are used for mcam entry management, 2397 * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'. 2398 * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'. 2399 * 2400 * Reverse bitmap is used to allocate entries 2401 * - when a higher priority entry is requested 2402 * - when available free entries are less. 2403 * Lower priority ones out of avaialble free entries are always 2404 * chosen when 'high vs low' question arises. 2405 */ 2406 2407 /* Get the search range for priority allocation request */ 2408 if (req->priority) { 2409 npc_get_mcam_search_range_priority(mcam, req, 2410 &start, &end, &reverse); 2411 goto alloc; 2412 } 2413 2414 /* Find out the search range for non-priority allocation request 2415 * 2416 * Get MCAM free entry count in middle zone. 2417 */ 2418 lp_fcnt = npc_mcam_get_free_count(mcam->bmap, 2419 mcam->lprio_start, 2420 mcam->bmap_entries); 2421 hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end); 2422 fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt; 2423 2424 /* Check if request can be accomodated in the middle zone */ 2425 if (fcnt > req->count) { 2426 start = mcam->hprio_end; 2427 end = mcam->lprio_start; 2428 } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) { 2429 /* Expand search zone from half of hprio zone to 2430 * half of lprio zone. 2431 */ 2432 start = mcam->hprio_end / 2; 2433 end = mcam->bmap_entries - (mcam->lprio_count / 2); 2434 reverse = true; 2435 } else { 2436 /* Not enough free entries, search all entries in reverse, 2437 * so that low priority ones will get used up. 2438 */ 2439 reverse = true; 2440 start = 0; 2441 end = mcam->bmap_entries; 2442 } 2443 2444 alloc: 2445 if (reverse) { 2446 bmap = mcam->bmap_reverse; 2447 start = mcam->bmap_entries - start; 2448 end = mcam->bmap_entries - end; 2449 index = start; 2450 start = end; 2451 end = index; 2452 } else { 2453 bmap = mcam->bmap; 2454 } 2455 2456 if (req->contig) { 2457 /* Allocate requested number of contiguous entries, if 2458 * unsuccessful find max contiguous entries available. 2459 */ 2460 index = npc_mcam_find_zero_area(bmap, end, start, 2461 req->count, &max_contig); 2462 rsp->count = max_contig; 2463 if (reverse) 2464 rsp->entry = mcam->bmap_entries - index - max_contig; 2465 else 2466 rsp->entry = index; 2467 } else { 2468 /* Allocate requested number of non-contiguous entries, 2469 * if unsuccessful allocate as many as possible. 2470 */ 2471 rsp->count = 0; 2472 next_start = start; 2473 for (entry = 0; entry < req->count; entry++) { 2474 index = find_next_zero_bit(bmap, end, next_start); 2475 if (index >= end) 2476 break; 2477 2478 next_start = start + (index - start) + 1; 2479 2480 /* Save the entry's index */ 2481 if (reverse) 2482 index = mcam->bmap_entries - index - 1; 2483 entry_list[entry] = index; 2484 rsp->count++; 2485 } 2486 } 2487 2488 /* If allocating requested no of entries is unsucessful, 2489 * expand the search range to full bitmap length and retry. 2490 */ 2491 if (!req->priority && (rsp->count < req->count) && 2492 ((end - start) != mcam->bmap_entries)) { 2493 reverse = true; 2494 start = 0; 2495 end = mcam->bmap_entries; 2496 goto alloc; 2497 } 2498 2499 /* For priority entry allocation requests, if allocation is 2500 * failed then expand search to max possible range and retry. 2501 */ 2502 if (req->priority && rsp->count < req->count) { 2503 if (req->priority == NPC_MCAM_LOWER_PRIO && 2504 (start != (req->ref_entry + 1))) { 2505 start = req->ref_entry + 1; 2506 end = mcam->bmap_entries; 2507 reverse = false; 2508 goto alloc; 2509 } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) && 2510 ((end - start) != req->ref_entry)) { 2511 start = 0; 2512 end = req->ref_entry; 2513 reverse = true; 2514 goto alloc; 2515 } 2516 } 2517 2518 /* Copy MCAM entry indices into mbox response entry_list. 2519 * Requester always expects indices in ascending order, so 2520 * so reverse the list if reverse bitmap is used for allocation. 2521 */ 2522 if (!req->contig && rsp->count) { 2523 index = 0; 2524 for (entry = rsp->count - 1; entry >= 0; entry--) { 2525 if (reverse) 2526 rsp->entry_list[index++] = entry_list[entry]; 2527 else 2528 rsp->entry_list[entry] = entry_list[entry]; 2529 } 2530 } 2531 2532 /* Mark the allocated entries as used and set nixlf mapping */ 2533 for (entry = 0; entry < rsp->count; entry++) { 2534 index = req->contig ? 2535 (rsp->entry + entry) : rsp->entry_list[entry]; 2536 npc_mcam_set_bit(mcam, index); 2537 mcam->entry2pfvf_map[index] = pcifunc; 2538 mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; 2539 } 2540 2541 /* Update available free count in mbox response */ 2542 rsp->free_count = mcam->bmap_fcnt; 2543 2544 mutex_unlock(&mcam->lock); 2545 return 0; 2546 } 2547 2548 int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, 2549 struct npc_mcam_alloc_entry_req *req, 2550 struct npc_mcam_alloc_entry_rsp *rsp) 2551 { 2552 struct npc_mcam *mcam = &rvu->hw->mcam; 2553 u16 pcifunc = req->hdr.pcifunc; 2554 int blkaddr; 2555 2556 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2557 if (blkaddr < 0) 2558 return NPC_MCAM_INVALID_REQ; 2559 2560 rsp->entry = NPC_MCAM_ENTRY_INVALID; 2561 rsp->free_count = 0; 2562 2563 /* Check if ref_entry is within range */ 2564 if (req->priority && req->ref_entry >= mcam->bmap_entries) { 2565 dev_err(rvu->dev, "%s: reference entry %d is out of range\n", 2566 __func__, req->ref_entry); 2567 return NPC_MCAM_INVALID_REQ; 2568 } 2569 2570 /* ref_entry can't be '0' if requested priority is high. 2571 * Can't be last entry if requested priority is low. 2572 */ 2573 if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) || 2574 ((req->ref_entry == (mcam->bmap_entries - 1)) && 2575 req->priority == NPC_MCAM_LOWER_PRIO)) 2576 return NPC_MCAM_INVALID_REQ; 2577 2578 /* Since list of allocated indices needs to be sent to requester, 2579 * max number of non-contiguous entries per mbox msg is limited. 2580 */ 2581 if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) { 2582 dev_err(rvu->dev, 2583 "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n", 2584 __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES); 2585 return NPC_MCAM_INVALID_REQ; 2586 } 2587 2588 /* Alloc request from PFFUNC with no NIXLF attached should be denied */ 2589 if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) 2590 return NPC_MCAM_ALLOC_DENIED; 2591 2592 return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); 2593 } 2594 2595 int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, 2596 struct npc_mcam_free_entry_req *req, 2597 struct msg_rsp *rsp) 2598 { 2599 struct npc_mcam *mcam = &rvu->hw->mcam; 2600 u16 pcifunc = req->hdr.pcifunc; 2601 int blkaddr, rc = 0; 2602 u16 cntr; 2603 2604 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2605 if (blkaddr < 0) 2606 return NPC_MCAM_INVALID_REQ; 2607 2608 /* Free request from PFFUNC with no NIXLF attached, ignore */ 2609 if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) 2610 return NPC_MCAM_INVALID_REQ; 2611 2612 mutex_lock(&mcam->lock); 2613 2614 if (req->all) 2615 goto free_all; 2616 2617 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2618 if (rc) 2619 goto exit; 2620 2621 mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP; 2622 mcam->entry2target_pffunc[req->entry] = 0x0; 2623 npc_mcam_clear_bit(mcam, req->entry); 2624 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); 2625 2626 /* Update entry2counter mapping */ 2627 cntr = mcam->entry2cntr_map[req->entry]; 2628 if (cntr != NPC_MCAM_INVALID_MAP) 2629 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2630 req->entry, cntr); 2631 2632 goto exit; 2633 2634 free_all: 2635 /* Free up all entries allocated to requesting PFFUNC */ 2636 npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); 2637 exit: 2638 mutex_unlock(&mcam->lock); 2639 return rc; 2640 } 2641 2642 int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu, 2643 struct npc_mcam_read_entry_req *req, 2644 struct npc_mcam_read_entry_rsp *rsp) 2645 { 2646 struct npc_mcam *mcam = &rvu->hw->mcam; 2647 u16 pcifunc = req->hdr.pcifunc; 2648 int blkaddr, rc; 2649 2650 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2651 if (blkaddr < 0) 2652 return NPC_MCAM_INVALID_REQ; 2653 2654 mutex_lock(&mcam->lock); 2655 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2656 if (!rc) { 2657 npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry, 2658 &rsp->entry_data, 2659 &rsp->intf, &rsp->enable); 2660 } 2661 2662 mutex_unlock(&mcam->lock); 2663 return rc; 2664 } 2665 2666 int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, 2667 struct npc_mcam_write_entry_req *req, 2668 struct msg_rsp *rsp) 2669 { 2670 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 2671 struct npc_mcam *mcam = &rvu->hw->mcam; 2672 u16 pcifunc = req->hdr.pcifunc; 2673 u16 channel, chan_mask; 2674 int blkaddr, rc; 2675 u8 nix_intf; 2676 2677 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2678 if (blkaddr < 0) 2679 return NPC_MCAM_INVALID_REQ; 2680 2681 chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; 2682 channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK; 2683 channel &= chan_mask; 2684 2685 mutex_lock(&mcam->lock); 2686 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2687 if (rc) 2688 goto exit; 2689 2690 if (req->set_cntr && 2691 npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) { 2692 rc = NPC_MCAM_INVALID_REQ; 2693 goto exit; 2694 } 2695 2696 if (!is_npc_interface_valid(rvu, req->intf)) { 2697 rc = NPC_MCAM_INVALID_REQ; 2698 goto exit; 2699 } 2700 2701 if (is_npc_intf_tx(req->intf)) 2702 nix_intf = pfvf->nix_tx_intf; 2703 else 2704 nix_intf = pfvf->nix_rx_intf; 2705 2706 if (!is_pffunc_af(pcifunc) && 2707 npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { 2708 rc = NPC_MCAM_INVALID_REQ; 2709 goto exit; 2710 } 2711 2712 if (!is_pffunc_af(pcifunc) && 2713 npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { 2714 rc = NPC_MCAM_INVALID_REQ; 2715 goto exit; 2716 } 2717 2718 /* For AF installed rules, the nix_intf should be set to target NIX */ 2719 if (is_pffunc_af(req->hdr.pcifunc)) 2720 nix_intf = req->intf; 2721 2722 npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf, 2723 &req->entry_data, req->enable_entry); 2724 2725 if (req->set_cntr) 2726 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2727 req->entry, req->cntr); 2728 2729 rc = 0; 2730 exit: 2731 mutex_unlock(&mcam->lock); 2732 return rc; 2733 } 2734 2735 int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, 2736 struct npc_mcam_ena_dis_entry_req *req, 2737 struct msg_rsp *rsp) 2738 { 2739 struct npc_mcam *mcam = &rvu->hw->mcam; 2740 u16 pcifunc = req->hdr.pcifunc; 2741 int blkaddr, rc; 2742 2743 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2744 if (blkaddr < 0) 2745 return NPC_MCAM_INVALID_REQ; 2746 2747 mutex_lock(&mcam->lock); 2748 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2749 mutex_unlock(&mcam->lock); 2750 if (rc) 2751 return rc; 2752 2753 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true); 2754 2755 return 0; 2756 } 2757 2758 int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, 2759 struct npc_mcam_ena_dis_entry_req *req, 2760 struct msg_rsp *rsp) 2761 { 2762 struct npc_mcam *mcam = &rvu->hw->mcam; 2763 u16 pcifunc = req->hdr.pcifunc; 2764 int blkaddr, rc; 2765 2766 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2767 if (blkaddr < 0) 2768 return NPC_MCAM_INVALID_REQ; 2769 2770 mutex_lock(&mcam->lock); 2771 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 2772 mutex_unlock(&mcam->lock); 2773 if (rc) 2774 return rc; 2775 2776 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); 2777 2778 return 0; 2779 } 2780 2781 int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, 2782 struct npc_mcam_shift_entry_req *req, 2783 struct npc_mcam_shift_entry_rsp *rsp) 2784 { 2785 struct npc_mcam *mcam = &rvu->hw->mcam; 2786 u16 pcifunc = req->hdr.pcifunc; 2787 u16 old_entry, new_entry; 2788 u16 index, cntr; 2789 int blkaddr, rc; 2790 2791 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2792 if (blkaddr < 0) 2793 return NPC_MCAM_INVALID_REQ; 2794 2795 if (req->shift_count > NPC_MCAM_MAX_SHIFTS) 2796 return NPC_MCAM_INVALID_REQ; 2797 2798 mutex_lock(&mcam->lock); 2799 for (index = 0; index < req->shift_count; index++) { 2800 old_entry = req->curr_entry[index]; 2801 new_entry = req->new_entry[index]; 2802 2803 /* Check if both old and new entries are valid and 2804 * does belong to this PFFUNC or not. 2805 */ 2806 rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry); 2807 if (rc) 2808 break; 2809 2810 rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry); 2811 if (rc) 2812 break; 2813 2814 /* new_entry should not have a counter mapped */ 2815 if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) { 2816 rc = NPC_MCAM_PERM_DENIED; 2817 break; 2818 } 2819 2820 /* Disable the new_entry */ 2821 npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false); 2822 2823 /* Copy rule from old entry to new entry */ 2824 npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry); 2825 2826 /* Copy counter mapping, if any */ 2827 cntr = mcam->entry2cntr_map[old_entry]; 2828 if (cntr != NPC_MCAM_INVALID_MAP) { 2829 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2830 old_entry, cntr); 2831 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2832 new_entry, cntr); 2833 } 2834 2835 /* Enable new_entry and disable old_entry */ 2836 npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true); 2837 npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false); 2838 } 2839 2840 /* If shift has failed then report the failed index */ 2841 if (index != req->shift_count) { 2842 rc = NPC_MCAM_PERM_DENIED; 2843 rsp->failed_entry_idx = index; 2844 } 2845 2846 mutex_unlock(&mcam->lock); 2847 return rc; 2848 } 2849 2850 int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, 2851 struct npc_mcam_alloc_counter_req *req, 2852 struct npc_mcam_alloc_counter_rsp *rsp) 2853 { 2854 struct npc_mcam *mcam = &rvu->hw->mcam; 2855 u16 pcifunc = req->hdr.pcifunc; 2856 u16 max_contig, cntr; 2857 int blkaddr, index; 2858 2859 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2860 if (blkaddr < 0) 2861 return NPC_MCAM_INVALID_REQ; 2862 2863 /* If the request is from a PFFUNC with no NIXLF attached, ignore */ 2864 if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) 2865 return NPC_MCAM_INVALID_REQ; 2866 2867 /* Since list of allocated counter IDs needs to be sent to requester, 2868 * max number of non-contiguous counters per mbox msg is limited. 2869 */ 2870 if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS) 2871 return NPC_MCAM_INVALID_REQ; 2872 2873 mutex_lock(&mcam->lock); 2874 2875 /* Check if unused counters are available or not */ 2876 if (!rvu_rsrc_free_count(&mcam->counters)) { 2877 mutex_unlock(&mcam->lock); 2878 return NPC_MCAM_ALLOC_FAILED; 2879 } 2880 2881 rsp->count = 0; 2882 2883 if (req->contig) { 2884 /* Allocate requested number of contiguous counters, if 2885 * unsuccessful find max contiguous entries available. 2886 */ 2887 index = npc_mcam_find_zero_area(mcam->counters.bmap, 2888 mcam->counters.max, 0, 2889 req->count, &max_contig); 2890 rsp->count = max_contig; 2891 rsp->cntr = index; 2892 for (cntr = index; cntr < (index + max_contig); cntr++) { 2893 __set_bit(cntr, mcam->counters.bmap); 2894 mcam->cntr2pfvf_map[cntr] = pcifunc; 2895 } 2896 } else { 2897 /* Allocate requested number of non-contiguous counters, 2898 * if unsuccessful allocate as many as possible. 2899 */ 2900 for (cntr = 0; cntr < req->count; cntr++) { 2901 index = rvu_alloc_rsrc(&mcam->counters); 2902 if (index < 0) 2903 break; 2904 rsp->cntr_list[cntr] = index; 2905 rsp->count++; 2906 mcam->cntr2pfvf_map[index] = pcifunc; 2907 } 2908 } 2909 2910 mutex_unlock(&mcam->lock); 2911 return 0; 2912 } 2913 2914 int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, 2915 struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) 2916 { 2917 struct npc_mcam *mcam = &rvu->hw->mcam; 2918 u16 index, entry = 0; 2919 int blkaddr, err; 2920 2921 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2922 if (blkaddr < 0) 2923 return NPC_MCAM_INVALID_REQ; 2924 2925 mutex_lock(&mcam->lock); 2926 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2927 if (err) { 2928 mutex_unlock(&mcam->lock); 2929 return err; 2930 } 2931 2932 /* Mark counter as free/unused */ 2933 mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP; 2934 rvu_free_rsrc(&mcam->counters, req->cntr); 2935 2936 /* Disable all MCAM entry's stats which are using this counter */ 2937 while (entry < mcam->bmap_entries) { 2938 if (!mcam->cntr_refcnt[req->cntr]) 2939 break; 2940 2941 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 2942 if (index >= mcam->bmap_entries) 2943 break; 2944 entry = index + 1; 2945 if (mcam->entry2cntr_map[index] != req->cntr) 2946 continue; 2947 2948 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2949 index, req->cntr); 2950 } 2951 2952 mutex_unlock(&mcam->lock); 2953 return 0; 2954 } 2955 2956 int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, 2957 struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp) 2958 { 2959 struct npc_mcam *mcam = &rvu->hw->mcam; 2960 u16 index, entry = 0; 2961 int blkaddr, rc; 2962 2963 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2964 if (blkaddr < 0) 2965 return NPC_MCAM_INVALID_REQ; 2966 2967 mutex_lock(&mcam->lock); 2968 rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2969 if (rc) 2970 goto exit; 2971 2972 /* Unmap the MCAM entry and counter */ 2973 if (!req->all) { 2974 rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry); 2975 if (rc) 2976 goto exit; 2977 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2978 req->entry, req->cntr); 2979 goto exit; 2980 } 2981 2982 /* Disable all MCAM entry's stats which are using this counter */ 2983 while (entry < mcam->bmap_entries) { 2984 if (!mcam->cntr_refcnt[req->cntr]) 2985 break; 2986 2987 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 2988 if (index >= mcam->bmap_entries) 2989 break; 2990 if (mcam->entry2cntr_map[index] != req->cntr) 2991 continue; 2992 2993 entry = index + 1; 2994 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2995 index, req->cntr); 2996 } 2997 exit: 2998 mutex_unlock(&mcam->lock); 2999 return rc; 3000 } 3001 3002 int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, 3003 struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) 3004 { 3005 struct npc_mcam *mcam = &rvu->hw->mcam; 3006 int blkaddr, err; 3007 3008 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3009 if (blkaddr < 0) 3010 return NPC_MCAM_INVALID_REQ; 3011 3012 mutex_lock(&mcam->lock); 3013 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 3014 mutex_unlock(&mcam->lock); 3015 if (err) 3016 return err; 3017 3018 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00); 3019 3020 return 0; 3021 } 3022 3023 int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, 3024 struct npc_mcam_oper_counter_req *req, 3025 struct npc_mcam_oper_counter_rsp *rsp) 3026 { 3027 struct npc_mcam *mcam = &rvu->hw->mcam; 3028 int blkaddr, err; 3029 3030 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3031 if (blkaddr < 0) 3032 return NPC_MCAM_INVALID_REQ; 3033 3034 mutex_lock(&mcam->lock); 3035 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 3036 mutex_unlock(&mcam->lock); 3037 if (err) 3038 return err; 3039 3040 rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr)); 3041 rsp->stat &= BIT_ULL(48) - 1; 3042 3043 return 0; 3044 } 3045 3046 int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, 3047 struct npc_mcam_alloc_and_write_entry_req *req, 3048 struct npc_mcam_alloc_and_write_entry_rsp *rsp) 3049 { 3050 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 3051 struct npc_mcam_alloc_counter_req cntr_req; 3052 struct npc_mcam_alloc_counter_rsp cntr_rsp; 3053 struct npc_mcam_alloc_entry_req entry_req; 3054 struct npc_mcam_alloc_entry_rsp entry_rsp; 3055 struct npc_mcam *mcam = &rvu->hw->mcam; 3056 u16 entry = NPC_MCAM_ENTRY_INVALID; 3057 u16 cntr = NPC_MCAM_ENTRY_INVALID; 3058 u16 channel, chan_mask; 3059 int blkaddr, rc; 3060 u8 nix_intf; 3061 3062 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3063 if (blkaddr < 0) 3064 return NPC_MCAM_INVALID_REQ; 3065 3066 if (!is_npc_interface_valid(rvu, req->intf)) 3067 return NPC_MCAM_INVALID_REQ; 3068 3069 chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; 3070 channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK; 3071 channel &= chan_mask; 3072 3073 if (npc_mcam_verify_channel(rvu, req->hdr.pcifunc, req->intf, channel)) 3074 return NPC_MCAM_INVALID_REQ; 3075 3076 if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, 3077 req->hdr.pcifunc)) 3078 return NPC_MCAM_INVALID_REQ; 3079 3080 /* Try to allocate a MCAM entry */ 3081 entry_req.hdr.pcifunc = req->hdr.pcifunc; 3082 entry_req.contig = true; 3083 entry_req.priority = req->priority; 3084 entry_req.ref_entry = req->ref_entry; 3085 entry_req.count = 1; 3086 3087 rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, 3088 &entry_req, &entry_rsp); 3089 if (rc) 3090 return rc; 3091 3092 if (!entry_rsp.count) 3093 return NPC_MCAM_ALLOC_FAILED; 3094 3095 entry = entry_rsp.entry; 3096 3097 if (!req->alloc_cntr) 3098 goto write_entry; 3099 3100 /* Now allocate counter */ 3101 cntr_req.hdr.pcifunc = req->hdr.pcifunc; 3102 cntr_req.contig = true; 3103 cntr_req.count = 1; 3104 3105 rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); 3106 if (rc) { 3107 /* Free allocated MCAM entry */ 3108 mutex_lock(&mcam->lock); 3109 mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP; 3110 npc_mcam_clear_bit(mcam, entry); 3111 mutex_unlock(&mcam->lock); 3112 return rc; 3113 } 3114 3115 cntr = cntr_rsp.cntr; 3116 3117 write_entry: 3118 mutex_lock(&mcam->lock); 3119 3120 if (is_npc_intf_tx(req->intf)) 3121 nix_intf = pfvf->nix_tx_intf; 3122 else 3123 nix_intf = pfvf->nix_rx_intf; 3124 3125 npc_config_mcam_entry(rvu, mcam, blkaddr, entry, nix_intf, 3126 &req->entry_data, req->enable_entry); 3127 3128 if (req->alloc_cntr) 3129 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr); 3130 mutex_unlock(&mcam->lock); 3131 3132 rsp->entry = entry; 3133 rsp->cntr = cntr; 3134 3135 return 0; 3136 } 3137 3138 #define GET_KEX_CFG(intf) \ 3139 rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf)) 3140 3141 #define GET_KEX_FLAGS(ld) \ 3142 rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld)) 3143 3144 #define GET_KEX_LD(intf, lid, lt, ld) \ 3145 rvu_read64(rvu, BLKADDR_NPC, \ 3146 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)) 3147 3148 #define GET_KEX_LDFLAGS(intf, ld, fl) \ 3149 rvu_read64(rvu, BLKADDR_NPC, \ 3150 NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl)) 3151 3152 int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, 3153 struct npc_get_kex_cfg_rsp *rsp) 3154 { 3155 int lid, lt, ld, fl; 3156 3157 rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX); 3158 rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX); 3159 for (lid = 0; lid < NPC_MAX_LID; lid++) { 3160 for (lt = 0; lt < NPC_MAX_LT; lt++) { 3161 for (ld = 0; ld < NPC_MAX_LD; ld++) { 3162 rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] = 3163 GET_KEX_LD(NIX_INTF_RX, lid, lt, ld); 3164 rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] = 3165 GET_KEX_LD(NIX_INTF_TX, lid, lt, ld); 3166 } 3167 } 3168 } 3169 for (ld = 0; ld < NPC_MAX_LD; ld++) 3170 rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld); 3171 3172 for (ld = 0; ld < NPC_MAX_LD; ld++) { 3173 for (fl = 0; fl < NPC_MAX_LFL; fl++) { 3174 rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] = 3175 GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl); 3176 rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] = 3177 GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl); 3178 } 3179 } 3180 memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN); 3181 return 0; 3182 } 3183 3184 int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, 3185 struct msg_req *req, 3186 struct npc_mcam_read_base_rule_rsp *rsp) 3187 { 3188 struct npc_mcam *mcam = &rvu->hw->mcam; 3189 int index, blkaddr, nixlf, rc = 0; 3190 u16 pcifunc = req->hdr.pcifunc; 3191 struct rvu_pfvf *pfvf; 3192 u8 intf, enable; 3193 3194 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3195 if (blkaddr < 0) 3196 return NPC_MCAM_INVALID_REQ; 3197 3198 /* Return the channel number in case of PF */ 3199 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 3200 pfvf = rvu_get_pfvf(rvu, pcifunc); 3201 rsp->entry.kw[0] = pfvf->rx_chan_base; 3202 rsp->entry.kw_mask[0] = 0xFFFULL; 3203 goto out; 3204 } 3205 3206 /* Find the pkt steering rule installed by PF to this VF */ 3207 mutex_lock(&mcam->lock); 3208 for (index = 0; index < mcam->bmap_entries; index++) { 3209 if (mcam->entry2target_pffunc[index] == pcifunc) 3210 goto read_entry; 3211 } 3212 3213 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3214 if (rc < 0) { 3215 mutex_unlock(&mcam->lock); 3216 goto out; 3217 } 3218 /* Read the default ucast entry if there is no pkt steering rule */ 3219 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 3220 NIXLF_UCAST_ENTRY); 3221 read_entry: 3222 /* Read the mcam entry */ 3223 npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf, 3224 &enable); 3225 mutex_unlock(&mcam->lock); 3226 out: 3227 return rc; 3228 } 3229 3230 int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, 3231 struct npc_mcam_get_stats_req *req, 3232 struct npc_mcam_get_stats_rsp *rsp) 3233 { 3234 struct npc_mcam *mcam = &rvu->hw->mcam; 3235 u16 index, cntr; 3236 int blkaddr; 3237 u64 regval; 3238 u32 bank; 3239 3240 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3241 if (blkaddr < 0) 3242 return NPC_MCAM_INVALID_REQ; 3243 3244 mutex_lock(&mcam->lock); 3245 3246 index = req->entry & (mcam->banksize - 1); 3247 bank = npc_get_bank(mcam, req->entry); 3248 3249 /* read MCAM entry STAT_ACT register */ 3250 regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank)); 3251 3252 if (!(regval & BIT_ULL(9))) { 3253 rsp->stat_ena = 0; 3254 mutex_unlock(&mcam->lock); 3255 return 0; 3256 } 3257 3258 cntr = regval & 0x1FF; 3259 3260 rsp->stat_ena = 1; 3261 rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr)); 3262 rsp->stat &= BIT_ULL(48) - 1; 3263 3264 mutex_unlock(&mcam->lock); 3265 3266 return 0; 3267 } 3268