1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 15 #include "rvu_struct.h" 16 #include "rvu_reg.h" 17 #include "rvu.h" 18 #include "npc.h" 19 #include "cgx.h" 20 #include "npc_profile.h" 21 22 #define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */ 23 #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ 24 25 #define NIXLF_UCAST_ENTRY 0 26 #define NIXLF_BCAST_ENTRY 1 27 #define NIXLF_PROMISC_ENTRY 2 28 29 #define NPC_PARSE_RESULT_DMAC_OFFSET 8 30 #define NPC_HW_TSTAMP_OFFSET 8 31 32 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 33 int blkaddr, u16 pcifunc); 34 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 35 u16 pcifunc); 36 37 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) 38 { 39 int blkaddr; 40 u64 val = 0; 41 42 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 43 if (blkaddr < 0) 44 return; 45 46 /* Config CPI base for the PKIND */ 47 val = pkind | 1ULL << 62; 48 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val); 49 } 50 51 int rvu_npc_get_pkind(struct rvu *rvu, u16 pf) 52 { 53 struct npc_pkind *pkind = &rvu->hw->pkind; 54 u32 map; 55 int i; 56 57 for (i = 0; i < pkind->rsrc.max; i++) { 58 map = pkind->pfchan_map[i]; 59 if (((map >> 16) & 0x3F) == pf) 60 return i; 61 } 62 return -1; 63 } 64 65 #define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20) 66 67 int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable) 68 { 69 int pkind, blkaddr; 70 u64 val; 71 72 pkind = rvu_npc_get_pkind(rvu, pf); 73 if (pkind < 0) { 74 dev_err(rvu->dev, "%s: pkind not mapped\n", __func__); 75 return -EINVAL; 76 } 77 78 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); 79 if (blkaddr < 0) { 80 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 81 return -EINVAL; 82 } 83 84 val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); 85 val &= ~NPC_AF_ACTION0_PTR_ADVANCE; 86 /* If timestamp is enabled then configure NPC to shift 8 bytes */ 87 if (enable) 88 val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE, 89 NPC_HW_TSTAMP_OFFSET); 90 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); 91 92 return 0; 93 } 94 95 static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, 96 u16 pcifunc, int nixlf, int type) 97 { 98 int pf = rvu_get_pf(pcifunc); 99 int index; 100 101 /* Check if this is for a PF */ 102 if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) { 103 /* Reserved entries exclude PF0 */ 104 pf--; 105 index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF); 106 /* Broadcast address matching entry should be first so 107 * that the packet can be replicated to all VFs. 108 */ 109 if (type == NIXLF_BCAST_ENTRY) 110 return index; 111 else if (type == NIXLF_PROMISC_ENTRY) 112 return index + 1; 113 } 114 115 return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF)); 116 } 117 118 static int npc_get_bank(struct npc_mcam *mcam, int index) 119 { 120 int bank = index / mcam->banksize; 121 122 /* 0,1 & 2,3 banks are combined for this keysize */ 123 if (mcam->keysize == NPC_MCAM_KEY_X2) 124 return bank ? 2 : 0; 125 126 return bank; 127 } 128 129 static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, 130 int blkaddr, int index) 131 { 132 int bank = npc_get_bank(mcam, index); 133 u64 cfg; 134 135 index &= (mcam->banksize - 1); 136 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank)); 137 return (cfg & 1); 138 } 139 140 static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 141 int blkaddr, int index, bool enable) 142 { 143 int bank = npc_get_bank(mcam, index); 144 int actbank = bank; 145 146 index &= (mcam->banksize - 1); 147 for (; bank < (actbank + mcam->banks_per_entry); bank++) { 148 rvu_write64(rvu, blkaddr, 149 NPC_AF_MCAMEX_BANKX_CFG(index, bank), 150 enable ? 1 : 0); 151 } 152 } 153 154 static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 155 int blkaddr, int index) 156 { 157 int bank = npc_get_bank(mcam, index); 158 int actbank = bank; 159 160 index &= (mcam->banksize - 1); 161 for (; bank < (actbank + mcam->banks_per_entry); bank++) { 162 rvu_write64(rvu, blkaddr, 163 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0); 164 rvu_write64(rvu, blkaddr, 165 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0); 166 167 rvu_write64(rvu, blkaddr, 168 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0); 169 rvu_write64(rvu, blkaddr, 170 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0); 171 172 rvu_write64(rvu, blkaddr, 173 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0); 174 rvu_write64(rvu, blkaddr, 175 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0); 176 } 177 } 178 179 static void npc_get_keyword(struct mcam_entry *entry, int idx, 180 u64 *cam0, u64 *cam1) 181 { 182 u64 kw_mask = 0x00; 183 184 #define CAM_MASK(n) (BIT_ULL(n) - 1) 185 186 /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and 187 * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1. 188 * 189 * Also, only 48 bits of BANKX_CAMX_W1 are valid. 190 */ 191 switch (idx) { 192 case 0: 193 /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */ 194 *cam1 = entry->kw[0]; 195 kw_mask = entry->kw_mask[0]; 196 break; 197 case 1: 198 /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */ 199 *cam1 = entry->kw[1] & CAM_MASK(48); 200 kw_mask = entry->kw_mask[1] & CAM_MASK(48); 201 break; 202 case 2: 203 /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48> 204 * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0> 205 */ 206 *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16); 207 *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16); 208 kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16); 209 kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16); 210 break; 211 case 3: 212 /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48> 213 * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0> 214 */ 215 *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16); 216 *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16); 217 kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16); 218 kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16); 219 break; 220 case 4: 221 /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32> 222 * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0> 223 */ 224 *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32); 225 *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32); 226 kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32); 227 kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32); 228 break; 229 case 5: 230 /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32> 231 * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0> 232 */ 233 *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32); 234 *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32); 235 kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32); 236 kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32); 237 break; 238 case 6: 239 /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16> 240 * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0> 241 */ 242 *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48); 243 *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48); 244 kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48); 245 kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48); 246 break; 247 case 7: 248 /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */ 249 *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48); 250 kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48); 251 break; 252 } 253 254 *cam1 &= kw_mask; 255 *cam0 = ~*cam1 & kw_mask; 256 } 257 258 static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 259 int blkaddr, int index, u8 intf, 260 struct mcam_entry *entry, bool enable) 261 { 262 int bank = npc_get_bank(mcam, index); 263 int kw = 0, actbank, actindex; 264 u64 cam0, cam1; 265 266 actbank = bank; /* Save bank id, to set action later on */ 267 actindex = index; 268 index &= (mcam->banksize - 1); 269 270 /* Disable before mcam entry update */ 271 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); 272 273 /* Clear mcam entry to avoid writes being suppressed by NPC */ 274 npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex); 275 276 /* CAM1 takes the comparison value and 277 * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'. 278 * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0 279 * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1 280 * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare. 281 */ 282 for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { 283 /* Interface should be set in all banks */ 284 rvu_write64(rvu, blkaddr, 285 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 286 intf); 287 rvu_write64(rvu, blkaddr, 288 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 289 ~intf & 0x3); 290 291 /* Set the match key */ 292 npc_get_keyword(entry, kw, &cam0, &cam1); 293 rvu_write64(rvu, blkaddr, 294 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1); 295 rvu_write64(rvu, blkaddr, 296 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0); 297 298 npc_get_keyword(entry, kw + 1, &cam0, &cam1); 299 rvu_write64(rvu, blkaddr, 300 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1); 301 rvu_write64(rvu, blkaddr, 302 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0); 303 } 304 305 /* Set 'action' */ 306 rvu_write64(rvu, blkaddr, 307 NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action); 308 309 /* Set TAG 'action' */ 310 rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank), 311 entry->vtag_action); 312 313 /* Enable the entry */ 314 if (enable) 315 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true); 316 } 317 318 static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 319 int blkaddr, u16 src, u16 dest) 320 { 321 int dbank = npc_get_bank(mcam, dest); 322 int sbank = npc_get_bank(mcam, src); 323 u64 cfg, sreg, dreg; 324 int bank, i; 325 326 src &= (mcam->banksize - 1); 327 dest &= (mcam->banksize - 1); 328 329 /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */ 330 for (bank = 0; bank < mcam->banks_per_entry; bank++) { 331 sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0); 332 dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0); 333 for (i = 0; i < 6; i++) { 334 cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8)); 335 rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg); 336 } 337 } 338 339 /* Copy action */ 340 cfg = rvu_read64(rvu, blkaddr, 341 NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); 342 rvu_write64(rvu, blkaddr, 343 NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg); 344 345 /* Copy TAG action */ 346 cfg = rvu_read64(rvu, blkaddr, 347 NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); 348 rvu_write64(rvu, blkaddr, 349 NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg); 350 351 /* Enable or disable */ 352 cfg = rvu_read64(rvu, blkaddr, 353 NPC_AF_MCAMEX_BANKX_CFG(src, sbank)); 354 rvu_write64(rvu, blkaddr, 355 NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg); 356 } 357 358 static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, 359 int blkaddr, int index) 360 { 361 int bank = npc_get_bank(mcam, index); 362 363 index &= (mcam->banksize - 1); 364 return rvu_read64(rvu, blkaddr, 365 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 366 } 367 368 void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, 369 int nixlf, u64 chan, u8 *mac_addr) 370 { 371 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 372 struct npc_mcam *mcam = &rvu->hw->mcam; 373 struct mcam_entry entry = { {0} }; 374 struct nix_rx_action action; 375 int blkaddr, index, kwi; 376 u64 mac = 0; 377 378 /* AF's VFs work in promiscuous mode */ 379 if (is_afvf(pcifunc)) 380 return; 381 382 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 383 if (blkaddr < 0) 384 return; 385 386 for (index = ETH_ALEN - 1; index >= 0; index--) 387 mac |= ((u64)*mac_addr++) << (8 * index); 388 389 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 390 nixlf, NIXLF_UCAST_ENTRY); 391 392 /* Match ingress channel and DMAC */ 393 entry.kw[0] = chan; 394 entry.kw_mask[0] = 0xFFFULL; 395 396 kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64); 397 entry.kw[kwi] = mac; 398 entry.kw_mask[kwi] = BIT_ULL(48) - 1; 399 400 /* Don't change the action if entry is already enabled 401 * Otherwise RSS action may get overwritten. 402 */ 403 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { 404 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 405 blkaddr, index); 406 } else { 407 *(u64 *)&action = 0x00; 408 action.op = NIX_RX_ACTIONOP_UCAST; 409 action.pf_func = pcifunc; 410 } 411 412 entry.action = *(u64 *)&action; 413 npc_config_mcam_entry(rvu, mcam, blkaddr, index, 414 NIX_INTF_RX, &entry, true); 415 416 /* add VLAN matching, setup action and save entry back for later */ 417 entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20; 418 entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20; 419 420 entry.vtag_action = VTAG0_VALID_BIT | 421 FIELD_PREP(VTAG0_TYPE_MASK, 0) | 422 FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) | 423 FIELD_PREP(VTAG0_RELPTR_MASK, 12); 424 425 memcpy(&pfvf->entry, &entry, sizeof(entry)); 426 } 427 428 void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, 429 int nixlf, u64 chan, bool allmulti) 430 { 431 struct npc_mcam *mcam = &rvu->hw->mcam; 432 int blkaddr, ucast_idx, index, kwi; 433 struct mcam_entry entry = { {0} }; 434 struct nix_rx_action action = { }; 435 436 /* Only PF or AF VF can add a promiscuous entry */ 437 if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc)) 438 return; 439 440 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 441 if (blkaddr < 0) 442 return; 443 444 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 445 nixlf, NIXLF_PROMISC_ENTRY); 446 447 entry.kw[0] = chan; 448 entry.kw_mask[0] = 0xFFFULL; 449 450 if (allmulti) { 451 kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64); 452 entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */ 453 entry.kw_mask[kwi] = BIT_ULL(40); 454 } 455 456 ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, 457 nixlf, NIXLF_UCAST_ENTRY); 458 459 /* If the corresponding PF's ucast action is RSS, 460 * use the same action for promisc also 461 */ 462 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) 463 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 464 blkaddr, ucast_idx); 465 466 if (action.op != NIX_RX_ACTIONOP_RSS) { 467 *(u64 *)&action = 0x00; 468 action.op = NIX_RX_ACTIONOP_UCAST; 469 action.pf_func = pcifunc; 470 } 471 472 entry.action = *(u64 *)&action; 473 npc_config_mcam_entry(rvu, mcam, blkaddr, index, 474 NIX_INTF_RX, &entry, true); 475 } 476 477 static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc, 478 int nixlf, bool enable) 479 { 480 struct npc_mcam *mcam = &rvu->hw->mcam; 481 int blkaddr, index; 482 483 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 484 if (blkaddr < 0) 485 return; 486 487 /* Only PF's have a promiscuous entry */ 488 if (pcifunc & RVU_PFVF_FUNC_MASK) 489 return; 490 491 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 492 nixlf, NIXLF_PROMISC_ENTRY); 493 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 494 } 495 496 void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) 497 { 498 npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false); 499 } 500 501 void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) 502 { 503 npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true); 504 } 505 506 void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, 507 int nixlf, u64 chan) 508 { 509 struct npc_mcam *mcam = &rvu->hw->mcam; 510 struct mcam_entry entry = { {0} }; 511 struct rvu_hwinfo *hw = rvu->hw; 512 struct nix_rx_action action; 513 struct rvu_pfvf *pfvf; 514 int blkaddr, index; 515 516 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 517 if (blkaddr < 0) 518 return; 519 520 /* Skip LBK VFs */ 521 if (is_afvf(pcifunc)) 522 return; 523 524 /* If pkt replication is not supported, 525 * then only PF is allowed to add a bcast match entry. 526 */ 527 if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK) 528 return; 529 530 /* Get 'pcifunc' of PF device */ 531 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 532 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 533 nixlf, NIXLF_BCAST_ENTRY); 534 535 /* Match ingress channel */ 536 entry.kw[0] = chan; 537 entry.kw_mask[0] = 0xfffull; 538 539 /* Match broadcast MAC address. 540 * DMAC is extracted at 0th bit of PARSE_KEX::KW1 541 */ 542 entry.kw[1] = 0xffffffffffffull; 543 entry.kw_mask[1] = 0xffffffffffffull; 544 545 *(u64 *)&action = 0x00; 546 if (!hw->cap.nix_rx_multicast) { 547 /* Early silicon doesn't support pkt replication, 548 * so install entry with UCAST action, so that PF 549 * receives all broadcast packets. 550 */ 551 action.op = NIX_RX_ACTIONOP_UCAST; 552 action.pf_func = pcifunc; 553 } else { 554 pfvf = rvu_get_pfvf(rvu, pcifunc); 555 action.index = pfvf->bcast_mce_idx; 556 action.op = NIX_RX_ACTIONOP_MCAST; 557 } 558 559 entry.action = *(u64 *)&action; 560 npc_config_mcam_entry(rvu, mcam, blkaddr, index, 561 NIX_INTF_RX, &entry, true); 562 } 563 564 void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc) 565 { 566 struct npc_mcam *mcam = &rvu->hw->mcam; 567 int blkaddr, index; 568 569 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 570 if (blkaddr < 0) 571 return; 572 573 /* Get 'pcifunc' of PF device */ 574 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 575 576 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY); 577 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); 578 } 579 580 void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, 581 int group, int alg_idx, int mcam_index) 582 { 583 struct npc_mcam *mcam = &rvu->hw->mcam; 584 struct nix_rx_action action; 585 int blkaddr, index, bank; 586 587 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 588 if (blkaddr < 0) 589 return; 590 591 /* Check if this is for reserved default entry */ 592 if (mcam_index < 0) { 593 if (group != DEFAULT_RSS_CONTEXT_GROUP) 594 return; 595 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 596 nixlf, NIXLF_UCAST_ENTRY); 597 } else { 598 /* TODO: validate this mcam index */ 599 index = mcam_index; 600 } 601 602 if (index >= mcam->total_entries) 603 return; 604 605 bank = npc_get_bank(mcam, index); 606 index &= (mcam->banksize - 1); 607 608 *(u64 *)&action = rvu_read64(rvu, blkaddr, 609 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 610 /* Ignore if no action was set earlier */ 611 if (!*(u64 *)&action) 612 return; 613 614 action.op = NIX_RX_ACTIONOP_RSS; 615 action.pf_func = pcifunc; 616 action.index = group; 617 action.flow_key_alg = alg_idx; 618 619 rvu_write64(rvu, blkaddr, 620 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); 621 622 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 623 nixlf, NIXLF_PROMISC_ENTRY); 624 625 /* If PF's promiscuous entry is enabled, 626 * Set RSS action for that entry as well 627 */ 628 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { 629 bank = npc_get_bank(mcam, index); 630 index &= (mcam->banksize - 1); 631 632 rvu_write64(rvu, blkaddr, 633 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), 634 *(u64 *)&action); 635 } 636 637 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); 638 } 639 640 static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, 641 int nixlf, bool enable) 642 { 643 struct npc_mcam *mcam = &rvu->hw->mcam; 644 struct nix_rx_action action; 645 int index, bank, blkaddr; 646 647 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 648 if (blkaddr < 0) 649 return; 650 651 /* Ucast MCAM match entry of this PF/VF */ 652 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 653 nixlf, NIXLF_UCAST_ENTRY); 654 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 655 656 /* For PF, ena/dis promisc and bcast MCAM match entries */ 657 if (pcifunc & RVU_PFVF_FUNC_MASK) 658 return; 659 660 /* For bcast, enable/disable only if it's action is not 661 * packet replication, incase if action is replication 662 * then this PF's nixlf is removed from bcast replication 663 * list. 664 */ 665 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 666 nixlf, NIXLF_BCAST_ENTRY); 667 bank = npc_get_bank(mcam, index); 668 *(u64 *)&action = rvu_read64(rvu, blkaddr, 669 NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank)); 670 if (action.op != NIX_RX_ACTIONOP_MCAST) 671 npc_enable_mcam_entry(rvu, mcam, 672 blkaddr, index, enable); 673 if (enable) 674 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf); 675 else 676 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); 677 678 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); 679 } 680 681 void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 682 { 683 npc_enadis_default_entries(rvu, pcifunc, nixlf, false); 684 } 685 686 void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 687 { 688 npc_enadis_default_entries(rvu, pcifunc, nixlf, true); 689 } 690 691 void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 692 { 693 struct npc_mcam *mcam = &rvu->hw->mcam; 694 int blkaddr; 695 696 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 697 if (blkaddr < 0) 698 return; 699 700 mutex_lock(&mcam->lock); 701 702 /* Disable and free all MCAM entries mapped to this 'pcifunc' */ 703 npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); 704 705 /* Free all MCAM counters mapped to this 'pcifunc' */ 706 npc_mcam_free_all_counters(rvu, mcam, pcifunc); 707 708 mutex_unlock(&mcam->lock); 709 710 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 711 } 712 713 #define SET_KEX_LD(intf, lid, ltype, ld, cfg) \ 714 rvu_write64(rvu, blkaddr, \ 715 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg) 716 717 #define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \ 718 rvu_write64(rvu, blkaddr, \ 719 NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) 720 721 #define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \ 722 (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \ 723 ((flags_ena) << 6) | ((key_ofs) & 0x3F)) 724 725 static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr) 726 { 727 struct npc_mcam *mcam = &rvu->hw->mcam; 728 int lid, ltype; 729 int lid_count; 730 u64 cfg; 731 732 cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); 733 lid_count = (cfg >> 4) & 0xF; 734 735 /* First clear any existing config i.e 736 * disable LDATA and FLAGS extraction. 737 */ 738 for (lid = 0; lid < lid_count; lid++) { 739 for (ltype = 0; ltype < 16; ltype++) { 740 SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL); 741 SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL); 742 SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL); 743 SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL); 744 745 SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL); 746 SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL); 747 SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL); 748 SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL); 749 } 750 } 751 752 if (mcam->keysize != NPC_MCAM_KEY_X2) 753 return; 754 755 /* Default MCAM KEX profile */ 756 /* Layer A: Ethernet: */ 757 758 /* DMAC: 6 bytes, KW1[47:0] */ 759 cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET); 760 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg); 761 762 /* Ethertype: 2 bytes, KW0[47:32] */ 763 cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4); 764 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg); 765 766 /* Layer B: Single VLAN (CTAG) */ 767 /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ 768 cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4); 769 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg); 770 771 /* Layer B: Stacked VLAN (STAG|QinQ) */ 772 /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ 773 cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4); 774 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, cfg); 775 776 /* Layer C: IPv4 */ 777 /* SIP+DIP: 8 bytes, KW2[63:0] */ 778 cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10); 779 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg); 780 /* TOS: 1 byte, KW1[63:56] */ 781 cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf); 782 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg); 783 784 /* Layer D:UDP */ 785 /* SPORT: 2 bytes, KW3[15:0] */ 786 cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18); 787 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg); 788 /* DPORT: 2 bytes, KW3[31:16] */ 789 cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a); 790 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg); 791 792 /* Layer D:TCP */ 793 /* SPORT: 2 bytes, KW3[15:0] */ 794 cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18); 795 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg); 796 /* DPORT: 2 bytes, KW3[31:16] */ 797 cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a); 798 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg); 799 } 800 801 static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr, 802 struct npc_mcam_kex *mkex) 803 { 804 int lid, lt, ld, fl; 805 806 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), 807 mkex->keyx_cfg[NIX_INTF_RX]); 808 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), 809 mkex->keyx_cfg[NIX_INTF_TX]); 810 811 for (ld = 0; ld < NPC_MAX_LD; ld++) 812 rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld), 813 mkex->kex_ld_flags[ld]); 814 815 for (lid = 0; lid < NPC_MAX_LID; lid++) { 816 for (lt = 0; lt < NPC_MAX_LT; lt++) { 817 for (ld = 0; ld < NPC_MAX_LD; ld++) { 818 SET_KEX_LD(NIX_INTF_RX, lid, lt, ld, 819 mkex->intf_lid_lt_ld[NIX_INTF_RX] 820 [lid][lt][ld]); 821 822 SET_KEX_LD(NIX_INTF_TX, lid, lt, ld, 823 mkex->intf_lid_lt_ld[NIX_INTF_TX] 824 [lid][lt][ld]); 825 } 826 } 827 } 828 829 for (ld = 0; ld < NPC_MAX_LD; ld++) { 830 for (fl = 0; fl < NPC_MAX_LFL; fl++) { 831 SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl, 832 mkex->intf_ld_flags[NIX_INTF_RX] 833 [ld][fl]); 834 835 SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl, 836 mkex->intf_ld_flags[NIX_INTF_TX] 837 [ld][fl]); 838 } 839 } 840 } 841 842 /* strtoull of "mkexprof" with base:36 */ 843 #define MKEX_SIGN 0x19bbfdbd15f 844 #define MKEX_END_SIGN 0xdeadbeef 845 846 static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr) 847 { 848 const char *mkex_profile = rvu->mkex_pfl_name; 849 struct device *dev = &rvu->pdev->dev; 850 void __iomem *mkex_prfl_addr = NULL; 851 struct npc_mcam_kex *mcam_kex; 852 u64 prfl_addr; 853 u64 prfl_sz; 854 855 /* If user not selected mkex profile */ 856 if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN)) 857 goto load_default; 858 859 if (!rvu->fwdata) 860 goto load_default; 861 prfl_addr = rvu->fwdata->mcam_addr; 862 prfl_sz = rvu->fwdata->mcam_sz; 863 864 if (!prfl_addr || !prfl_sz) 865 goto load_default; 866 867 mkex_prfl_addr = ioremap_wc(prfl_addr, prfl_sz); 868 if (!mkex_prfl_addr) 869 goto load_default; 870 871 mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr; 872 873 while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) { 874 /* Compare with mkex mod_param name string */ 875 if (mcam_kex->mkex_sign == MKEX_SIGN && 876 !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) { 877 /* Due to an errata (35786) in A0/B0 pass silicon, 878 * parse nibble enable configuration has to be 879 * identical for both Rx and Tx interfaces. 880 */ 881 if (is_rvu_96xx_B0(rvu) && 882 mcam_kex->keyx_cfg[NIX_INTF_RX] != 883 mcam_kex->keyx_cfg[NIX_INTF_TX]) 884 goto load_default; 885 886 /* Program selected mkex profile */ 887 npc_program_mkex_profile(rvu, blkaddr, mcam_kex); 888 889 goto unmap; 890 } 891 892 mcam_kex++; 893 prfl_sz -= sizeof(struct npc_mcam_kex); 894 } 895 dev_warn(dev, "Failed to load requested profile: %s\n", 896 rvu->mkex_pfl_name); 897 898 load_default: 899 dev_info(rvu->dev, "Using default mkex profile\n"); 900 /* Config packet data and flags extraction into PARSE result */ 901 npc_config_ldata_extract(rvu, blkaddr); 902 903 unmap: 904 if (mkex_prfl_addr) 905 iounmap(mkex_prfl_addr); 906 } 907 908 static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, 909 struct npc_kpu_profile_action *kpuaction, 910 int kpu, int entry, bool pkind) 911 { 912 struct npc_kpu_action0 action0 = {0}; 913 struct npc_kpu_action1 action1 = {0}; 914 u64 reg; 915 916 action1.errlev = kpuaction->errlev; 917 action1.errcode = kpuaction->errcode; 918 action1.dp0_offset = kpuaction->dp0_offset; 919 action1.dp1_offset = kpuaction->dp1_offset; 920 action1.dp2_offset = kpuaction->dp2_offset; 921 922 if (pkind) 923 reg = NPC_AF_PKINDX_ACTION1(entry); 924 else 925 reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry); 926 927 rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1); 928 929 action0.byp_count = kpuaction->bypass_count; 930 action0.capture_ena = kpuaction->cap_ena; 931 action0.parse_done = kpuaction->parse_done; 932 action0.next_state = kpuaction->next_state; 933 action0.capture_lid = kpuaction->lid; 934 action0.capture_ltype = kpuaction->ltype; 935 action0.capture_flags = kpuaction->flags; 936 action0.ptr_advance = kpuaction->ptr_advance; 937 action0.var_len_offset = kpuaction->offset; 938 action0.var_len_mask = kpuaction->mask; 939 action0.var_len_right = kpuaction->right; 940 action0.var_len_shift = kpuaction->shift; 941 942 if (pkind) 943 reg = NPC_AF_PKINDX_ACTION0(entry); 944 else 945 reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry); 946 947 rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0); 948 } 949 950 static void npc_config_kpucam(struct rvu *rvu, int blkaddr, 951 struct npc_kpu_profile_cam *kpucam, 952 int kpu, int entry) 953 { 954 struct npc_kpu_cam cam0 = {0}; 955 struct npc_kpu_cam cam1 = {0}; 956 957 cam1.state = kpucam->state & kpucam->state_mask; 958 cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask; 959 cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask; 960 cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask; 961 962 cam0.state = ~kpucam->state & kpucam->state_mask; 963 cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask; 964 cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask; 965 cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask; 966 967 rvu_write64(rvu, blkaddr, 968 NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0); 969 rvu_write64(rvu, blkaddr, 970 NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1); 971 } 972 973 static inline u64 enable_mask(int count) 974 { 975 return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL)); 976 } 977 978 static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, 979 struct npc_kpu_profile *profile) 980 { 981 int entry, num_entries, max_entries; 982 983 if (profile->cam_entries != profile->action_entries) { 984 dev_err(rvu->dev, 985 "KPU%d: CAM and action entries [%d != %d] not equal\n", 986 kpu, profile->cam_entries, profile->action_entries); 987 } 988 989 max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF; 990 991 /* Program CAM match entries for previous KPU extracted data */ 992 num_entries = min_t(int, profile->cam_entries, max_entries); 993 for (entry = 0; entry < num_entries; entry++) 994 npc_config_kpucam(rvu, blkaddr, 995 &profile->cam[entry], kpu, entry); 996 997 /* Program this KPU's actions */ 998 num_entries = min_t(int, profile->action_entries, max_entries); 999 for (entry = 0; entry < num_entries; entry++) 1000 npc_config_kpuaction(rvu, blkaddr, &profile->action[entry], 1001 kpu, entry, false); 1002 1003 /* Enable all programmed entries */ 1004 num_entries = min_t(int, profile->action_entries, profile->cam_entries); 1005 rvu_write64(rvu, blkaddr, 1006 NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries)); 1007 if (num_entries > 64) { 1008 rvu_write64(rvu, blkaddr, 1009 NPC_AF_KPUX_ENTRY_DISX(kpu, 1), 1010 enable_mask(num_entries - 64)); 1011 } 1012 1013 /* Enable this KPU */ 1014 rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01); 1015 } 1016 1017 static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) 1018 { 1019 struct rvu_hwinfo *hw = rvu->hw; 1020 int num_pkinds, num_kpus, idx; 1021 struct npc_pkind *pkind; 1022 1023 /* Get HW limits */ 1024 hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F; 1025 1026 /* Disable all KPUs and their entries */ 1027 for (idx = 0; idx < hw->npc_kpus; idx++) { 1028 rvu_write64(rvu, blkaddr, 1029 NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL); 1030 rvu_write64(rvu, blkaddr, 1031 NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL); 1032 rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00); 1033 } 1034 1035 /* First program IKPU profile i.e PKIND configs. 1036 * Check HW max count to avoid configuring junk or 1037 * writing to unsupported CSR addresses. 1038 */ 1039 pkind = &hw->pkind; 1040 num_pkinds = ARRAY_SIZE(ikpu_action_entries); 1041 num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds); 1042 1043 for (idx = 0; idx < num_pkinds; idx++) 1044 npc_config_kpuaction(rvu, blkaddr, 1045 &ikpu_action_entries[idx], 0, idx, true); 1046 1047 /* Program KPU CAM and Action profiles */ 1048 num_kpus = ARRAY_SIZE(npc_kpu_profiles); 1049 num_kpus = min_t(int, hw->npc_kpus, num_kpus); 1050 1051 for (idx = 0; idx < num_kpus; idx++) 1052 npc_program_kpu_profile(rvu, blkaddr, 1053 idx, &npc_kpu_profiles[idx]); 1054 } 1055 1056 static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) 1057 { 1058 int nixlf_count = rvu_get_nixlf_count(rvu); 1059 struct npc_mcam *mcam = &rvu->hw->mcam; 1060 int rsvd, err; 1061 u64 cfg; 1062 1063 /* Get HW limits */ 1064 cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); 1065 mcam->banks = (cfg >> 44) & 0xF; 1066 mcam->banksize = (cfg >> 28) & 0xFFFF; 1067 mcam->counters.max = (cfg >> 48) & 0xFFFF; 1068 1069 /* Actual number of MCAM entries vary by entry size */ 1070 cfg = (rvu_read64(rvu, blkaddr, 1071 NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07; 1072 mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize; 1073 mcam->keysize = cfg; 1074 1075 /* Number of banks combined per MCAM entry */ 1076 if (cfg == NPC_MCAM_KEY_X4) 1077 mcam->banks_per_entry = 4; 1078 else if (cfg == NPC_MCAM_KEY_X2) 1079 mcam->banks_per_entry = 2; 1080 else 1081 mcam->banks_per_entry = 1; 1082 1083 /* Reserve one MCAM entry for each of the NIX LF to 1084 * guarantee space to install default matching DMAC rule. 1085 * Also reserve 2 MCAM entries for each PF for default 1086 * channel based matching or 'bcast & promisc' matching to 1087 * support BCAST and PROMISC modes of operation for PFs. 1088 * PF0 is excluded. 1089 */ 1090 rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) + 1091 ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF); 1092 if (mcam->total_entries <= rsvd) { 1093 dev_warn(rvu->dev, 1094 "Insufficient NPC MCAM size %d for pkt I/O, exiting\n", 1095 mcam->total_entries); 1096 return -ENOMEM; 1097 } 1098 1099 mcam->bmap_entries = mcam->total_entries - rsvd; 1100 mcam->nixlf_offset = mcam->bmap_entries; 1101 mcam->pf_offset = mcam->nixlf_offset + nixlf_count; 1102 1103 /* Allocate bitmaps for managing MCAM entries */ 1104 mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries), 1105 sizeof(long), GFP_KERNEL); 1106 if (!mcam->bmap) 1107 return -ENOMEM; 1108 1109 mcam->bmap_reverse = devm_kcalloc(rvu->dev, 1110 BITS_TO_LONGS(mcam->bmap_entries), 1111 sizeof(long), GFP_KERNEL); 1112 if (!mcam->bmap_reverse) 1113 return -ENOMEM; 1114 1115 mcam->bmap_fcnt = mcam->bmap_entries; 1116 1117 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 1118 mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, 1119 sizeof(u16), GFP_KERNEL); 1120 if (!mcam->entry2pfvf_map) 1121 return -ENOMEM; 1122 1123 /* Reserve 1/8th of MCAM entries at the bottom for low priority 1124 * allocations and another 1/8th at the top for high priority 1125 * allocations. 1126 */ 1127 mcam->lprio_count = mcam->bmap_entries / 8; 1128 if (mcam->lprio_count > BITS_PER_LONG) 1129 mcam->lprio_count = round_down(mcam->lprio_count, 1130 BITS_PER_LONG); 1131 mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count; 1132 mcam->hprio_count = mcam->lprio_count; 1133 mcam->hprio_end = mcam->hprio_count; 1134 1135 /* Reserve last counter for MCAM RX miss action which is set to 1136 * drop pkt. This way we will know how many pkts didn't match 1137 * any MCAM entry. 1138 */ 1139 mcam->counters.max--; 1140 mcam->rx_miss_act_cntr = mcam->counters.max; 1141 1142 /* Allocate bitmap for managing MCAM counters and memory 1143 * for saving counter to RVU PFFUNC allocation mapping. 1144 */ 1145 err = rvu_alloc_bitmap(&mcam->counters); 1146 if (err) 1147 return err; 1148 1149 mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max, 1150 sizeof(u16), GFP_KERNEL); 1151 if (!mcam->cntr2pfvf_map) 1152 goto free_mem; 1153 1154 /* Alloc memory for MCAM entry to counter mapping and for tracking 1155 * counter's reference count. 1156 */ 1157 mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, 1158 sizeof(u16), GFP_KERNEL); 1159 if (!mcam->entry2cntr_map) 1160 goto free_mem; 1161 1162 mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max, 1163 sizeof(u16), GFP_KERNEL); 1164 if (!mcam->cntr_refcnt) 1165 goto free_mem; 1166 1167 mutex_init(&mcam->lock); 1168 1169 return 0; 1170 1171 free_mem: 1172 kfree(mcam->counters.bmap); 1173 return -ENOMEM; 1174 } 1175 1176 int rvu_npc_init(struct rvu *rvu) 1177 { 1178 struct npc_pkind *pkind = &rvu->hw->pkind; 1179 struct npc_mcam *mcam = &rvu->hw->mcam; 1180 u64 keyz = NPC_MCAM_KEY_X2; 1181 int blkaddr, entry, bank, err; 1182 u64 cfg, nibble_ena; 1183 1184 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1185 if (blkaddr < 0) { 1186 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1187 return -ENODEV; 1188 } 1189 1190 /* First disable all MCAM entries, to stop traffic towards NIXLFs */ 1191 cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); 1192 for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) { 1193 for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++) 1194 rvu_write64(rvu, blkaddr, 1195 NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0); 1196 } 1197 1198 /* Allocate resource bimap for pkind*/ 1199 pkind->rsrc.max = (rvu_read64(rvu, blkaddr, 1200 NPC_AF_CONST1) >> 12) & 0xFF; 1201 err = rvu_alloc_bitmap(&pkind->rsrc); 1202 if (err) 1203 return err; 1204 1205 /* Allocate mem for pkind to PF and channel mapping info */ 1206 pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max, 1207 sizeof(u32), GFP_KERNEL); 1208 if (!pkind->pfchan_map) 1209 return -ENOMEM; 1210 1211 /* Configure KPU profile */ 1212 npc_parser_profile_init(rvu, blkaddr); 1213 1214 /* Config Outer L2, IPv4's NPC layer info */ 1215 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2, 1216 (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F); 1217 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4, 1218 (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F); 1219 1220 /* Config Inner IPV4 NPC layer info */ 1221 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4, 1222 (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F); 1223 1224 /* Enable below for Rx pkts. 1225 * - Outer IPv4 header checksum validation. 1226 * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M]. 1227 * - Inner IPv4 header checksum validation. 1228 * - Set non zero checksum error code value 1229 */ 1230 rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, 1231 rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | 1232 BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) | 1233 BIT_ULL(2) | BIT_ULL(1)); 1234 1235 /* Set RX and TX side MCAM search key size. 1236 * LA..LD (ltype only) + Channel 1237 */ 1238 nibble_ena = 0x49247; 1239 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), 1240 ((keyz & 0x3) << 32) | nibble_ena); 1241 /* Due to an errata (35786) in A0 pass silicon, parse nibble enable 1242 * configuration has to be identical for both Rx and Tx interfaces. 1243 */ 1244 if (!is_rvu_96xx_B0(rvu)) 1245 nibble_ena = (1ULL << 19) - 1; 1246 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), 1247 ((keyz & 0x3) << 32) | nibble_ena); 1248 1249 err = npc_mcam_rsrcs_init(rvu, blkaddr); 1250 if (err) 1251 return err; 1252 1253 /* Configure MKEX profile */ 1254 npc_load_mkex_profile(rvu, blkaddr); 1255 1256 /* Set TX miss action to UCAST_DEFAULT i.e 1257 * transmit the packet on NIX LF SQ's default channel. 1258 */ 1259 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX), 1260 NIX_TX_ACTIONOP_UCAST_DEFAULT); 1261 1262 /* If MCAM lookup doesn't result in a match, drop the received packet. 1263 * And map this action to a counter to count dropped pkts. 1264 */ 1265 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX), 1266 NIX_RX_ACTIONOP_DROP); 1267 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX), 1268 BIT_ULL(9) | mcam->rx_miss_act_cntr); 1269 1270 return 0; 1271 } 1272 1273 void rvu_npc_freemem(struct rvu *rvu) 1274 { 1275 struct npc_pkind *pkind = &rvu->hw->pkind; 1276 struct npc_mcam *mcam = &rvu->hw->mcam; 1277 1278 kfree(pkind->rsrc.bmap); 1279 kfree(mcam->counters.bmap); 1280 mutex_destroy(&mcam->lock); 1281 } 1282 1283 void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, 1284 int blkaddr, int *alloc_cnt, 1285 int *enable_cnt) 1286 { 1287 struct npc_mcam *mcam = &rvu->hw->mcam; 1288 int entry; 1289 1290 *alloc_cnt = 0; 1291 *enable_cnt = 0; 1292 1293 for (entry = 0; entry < mcam->bmap_entries; entry++) { 1294 if (mcam->entry2pfvf_map[entry] == pcifunc) { 1295 (*alloc_cnt)++; 1296 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry)) 1297 (*enable_cnt)++; 1298 } 1299 } 1300 } 1301 1302 void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, 1303 int blkaddr, int *alloc_cnt, 1304 int *enable_cnt) 1305 { 1306 struct npc_mcam *mcam = &rvu->hw->mcam; 1307 int cntr; 1308 1309 *alloc_cnt = 0; 1310 *enable_cnt = 0; 1311 1312 for (cntr = 0; cntr < mcam->counters.max; cntr++) { 1313 if (mcam->cntr2pfvf_map[cntr] == pcifunc) { 1314 (*alloc_cnt)++; 1315 if (mcam->cntr_refcnt[cntr]) 1316 (*enable_cnt)++; 1317 } 1318 } 1319 } 1320 1321 static int npc_mcam_verify_entry(struct npc_mcam *mcam, 1322 u16 pcifunc, int entry) 1323 { 1324 /* Verify if entry is valid and if it is indeed 1325 * allocated to the requesting PFFUNC. 1326 */ 1327 if (entry >= mcam->bmap_entries) 1328 return NPC_MCAM_INVALID_REQ; 1329 1330 if (pcifunc != mcam->entry2pfvf_map[entry]) 1331 return NPC_MCAM_PERM_DENIED; 1332 1333 return 0; 1334 } 1335 1336 static int npc_mcam_verify_counter(struct npc_mcam *mcam, 1337 u16 pcifunc, int cntr) 1338 { 1339 /* Verify if counter is valid and if it is indeed 1340 * allocated to the requesting PFFUNC. 1341 */ 1342 if (cntr >= mcam->counters.max) 1343 return NPC_MCAM_INVALID_REQ; 1344 1345 if (pcifunc != mcam->cntr2pfvf_map[cntr]) 1346 return NPC_MCAM_PERM_DENIED; 1347 1348 return 0; 1349 } 1350 1351 static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, 1352 int blkaddr, u16 entry, u16 cntr) 1353 { 1354 u16 index = entry & (mcam->banksize - 1); 1355 u16 bank = npc_get_bank(mcam, entry); 1356 1357 /* Set mapping and increment counter's refcnt */ 1358 mcam->entry2cntr_map[entry] = cntr; 1359 mcam->cntr_refcnt[cntr]++; 1360 /* Enable stats */ 1361 rvu_write64(rvu, blkaddr, 1362 NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 1363 BIT_ULL(9) | cntr); 1364 } 1365 1366 static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, 1367 struct npc_mcam *mcam, 1368 int blkaddr, u16 entry, u16 cntr) 1369 { 1370 u16 index = entry & (mcam->banksize - 1); 1371 u16 bank = npc_get_bank(mcam, entry); 1372 1373 /* Remove mapping and reduce counter's refcnt */ 1374 mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP; 1375 mcam->cntr_refcnt[cntr]--; 1376 /* Disable stats */ 1377 rvu_write64(rvu, blkaddr, 1378 NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00); 1379 } 1380 1381 /* Sets MCAM entry in bitmap as used. Update 1382 * reverse bitmap too. Should be called with 1383 * 'mcam->lock' held. 1384 */ 1385 static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index) 1386 { 1387 u16 entry, rentry; 1388 1389 entry = index; 1390 rentry = mcam->bmap_entries - index - 1; 1391 1392 __set_bit(entry, mcam->bmap); 1393 __set_bit(rentry, mcam->bmap_reverse); 1394 mcam->bmap_fcnt--; 1395 } 1396 1397 /* Sets MCAM entry in bitmap as free. Update 1398 * reverse bitmap too. Should be called with 1399 * 'mcam->lock' held. 1400 */ 1401 static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index) 1402 { 1403 u16 entry, rentry; 1404 1405 entry = index; 1406 rentry = mcam->bmap_entries - index - 1; 1407 1408 __clear_bit(entry, mcam->bmap); 1409 __clear_bit(rentry, mcam->bmap_reverse); 1410 mcam->bmap_fcnt++; 1411 } 1412 1413 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 1414 int blkaddr, u16 pcifunc) 1415 { 1416 u16 index, cntr; 1417 1418 /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */ 1419 for (index = 0; index < mcam->bmap_entries; index++) { 1420 if (mcam->entry2pfvf_map[index] == pcifunc) { 1421 mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; 1422 /* Free the entry in bitmap */ 1423 npc_mcam_clear_bit(mcam, index); 1424 /* Disable the entry */ 1425 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); 1426 1427 /* Update entry2counter mapping */ 1428 cntr = mcam->entry2cntr_map[index]; 1429 if (cntr != NPC_MCAM_INVALID_MAP) 1430 npc_unmap_mcam_entry_and_cntr(rvu, mcam, 1431 blkaddr, index, 1432 cntr); 1433 } 1434 } 1435 } 1436 1437 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 1438 u16 pcifunc) 1439 { 1440 u16 cntr; 1441 1442 /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */ 1443 for (cntr = 0; cntr < mcam->counters.max; cntr++) { 1444 if (mcam->cntr2pfvf_map[cntr] == pcifunc) { 1445 mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; 1446 mcam->cntr_refcnt[cntr] = 0; 1447 rvu_free_rsrc(&mcam->counters, cntr); 1448 /* This API is expected to be called after freeing 1449 * MCAM entries, which inturn will remove 1450 * 'entry to counter' mapping. 1451 * No need to do it again. 1452 */ 1453 } 1454 } 1455 } 1456 1457 /* Find area of contiguous free entries of size 'nr'. 1458 * If not found return max contiguous free entries available. 1459 */ 1460 static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, 1461 u16 nr, u16 *max_area) 1462 { 1463 u16 max_area_start = 0; 1464 u16 index, next, end; 1465 1466 *max_area = 0; 1467 1468 again: 1469 index = find_next_zero_bit(map, size, start); 1470 if (index >= size) 1471 return max_area_start; 1472 1473 end = ((index + nr) >= size) ? size : index + nr; 1474 next = find_next_bit(map, end, index); 1475 if (*max_area < (next - index)) { 1476 *max_area = next - index; 1477 max_area_start = index; 1478 } 1479 1480 if (next < end) { 1481 start = next + 1; 1482 goto again; 1483 } 1484 1485 return max_area_start; 1486 } 1487 1488 /* Find number of free MCAM entries available 1489 * within range i.e in between 'start' and 'end'. 1490 */ 1491 static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) 1492 { 1493 u16 index, next; 1494 u16 fcnt = 0; 1495 1496 again: 1497 if (start >= end) 1498 return fcnt; 1499 1500 index = find_next_zero_bit(map, end, start); 1501 if (index >= end) 1502 return fcnt; 1503 1504 next = find_next_bit(map, end, index); 1505 if (next <= end) { 1506 fcnt += next - index; 1507 start = next + 1; 1508 goto again; 1509 } 1510 1511 fcnt += end - index; 1512 return fcnt; 1513 } 1514 1515 static void 1516 npc_get_mcam_search_range_priority(struct npc_mcam *mcam, 1517 struct npc_mcam_alloc_entry_req *req, 1518 u16 *start, u16 *end, bool *reverse) 1519 { 1520 u16 fcnt; 1521 1522 if (req->priority == NPC_MCAM_HIGHER_PRIO) 1523 goto hprio; 1524 1525 /* For a low priority entry allocation 1526 * - If reference entry is not in hprio zone then 1527 * search range: ref_entry to end. 1528 * - If reference entry is in hprio zone and if 1529 * request can be accomodated in non-hprio zone then 1530 * search range: 'start of middle zone' to 'end' 1531 * - else search in reverse, so that less number of hprio 1532 * zone entries are allocated. 1533 */ 1534 1535 *reverse = false; 1536 *start = req->ref_entry + 1; 1537 *end = mcam->bmap_entries; 1538 1539 if (req->ref_entry >= mcam->hprio_end) 1540 return; 1541 1542 fcnt = npc_mcam_get_free_count(mcam->bmap, 1543 mcam->hprio_end, mcam->bmap_entries); 1544 if (fcnt > req->count) 1545 *start = mcam->hprio_end; 1546 else 1547 *reverse = true; 1548 return; 1549 1550 hprio: 1551 /* For a high priority entry allocation, search is always 1552 * in reverse to preserve hprio zone entries. 1553 * - If reference entry is not in lprio zone then 1554 * search range: 0 to ref_entry. 1555 * - If reference entry is in lprio zone and if 1556 * request can be accomodated in middle zone then 1557 * search range: 'hprio_end' to 'lprio_start' 1558 */ 1559 1560 *reverse = true; 1561 *start = 0; 1562 *end = req->ref_entry; 1563 1564 if (req->ref_entry <= mcam->lprio_start) 1565 return; 1566 1567 fcnt = npc_mcam_get_free_count(mcam->bmap, 1568 mcam->hprio_end, mcam->lprio_start); 1569 if (fcnt < req->count) 1570 return; 1571 *start = mcam->hprio_end; 1572 *end = mcam->lprio_start; 1573 } 1574 1575 static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, 1576 struct npc_mcam_alloc_entry_req *req, 1577 struct npc_mcam_alloc_entry_rsp *rsp) 1578 { 1579 u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; 1580 u16 fcnt, hp_fcnt, lp_fcnt; 1581 u16 start, end, index; 1582 int entry, next_start; 1583 bool reverse = false; 1584 unsigned long *bmap; 1585 u16 max_contig; 1586 1587 mutex_lock(&mcam->lock); 1588 1589 /* Check if there are any free entries */ 1590 if (!mcam->bmap_fcnt) { 1591 mutex_unlock(&mcam->lock); 1592 return NPC_MCAM_ALLOC_FAILED; 1593 } 1594 1595 /* MCAM entries are divided into high priority, middle and 1596 * low priority zones. Idea is to not allocate top and lower 1597 * most entries as much as possible, this is to increase 1598 * probability of honouring priority allocation requests. 1599 * 1600 * Two bitmaps are used for mcam entry management, 1601 * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'. 1602 * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'. 1603 * 1604 * Reverse bitmap is used to allocate entries 1605 * - when a higher priority entry is requested 1606 * - when available free entries are less. 1607 * Lower priority ones out of avaialble free entries are always 1608 * chosen when 'high vs low' question arises. 1609 */ 1610 1611 /* Get the search range for priority allocation request */ 1612 if (req->priority) { 1613 npc_get_mcam_search_range_priority(mcam, req, 1614 &start, &end, &reverse); 1615 goto alloc; 1616 } 1617 1618 /* Find out the search range for non-priority allocation request 1619 * 1620 * Get MCAM free entry count in middle zone. 1621 */ 1622 lp_fcnt = npc_mcam_get_free_count(mcam->bmap, 1623 mcam->lprio_start, 1624 mcam->bmap_entries); 1625 hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end); 1626 fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt; 1627 1628 /* Check if request can be accomodated in the middle zone */ 1629 if (fcnt > req->count) { 1630 start = mcam->hprio_end; 1631 end = mcam->lprio_start; 1632 } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) { 1633 /* Expand search zone from half of hprio zone to 1634 * half of lprio zone. 1635 */ 1636 start = mcam->hprio_end / 2; 1637 end = mcam->bmap_entries - (mcam->lprio_count / 2); 1638 reverse = true; 1639 } else { 1640 /* Not enough free entries, search all entries in reverse, 1641 * so that low priority ones will get used up. 1642 */ 1643 reverse = true; 1644 start = 0; 1645 end = mcam->bmap_entries; 1646 } 1647 1648 alloc: 1649 if (reverse) { 1650 bmap = mcam->bmap_reverse; 1651 start = mcam->bmap_entries - start; 1652 end = mcam->bmap_entries - end; 1653 index = start; 1654 start = end; 1655 end = index; 1656 } else { 1657 bmap = mcam->bmap; 1658 } 1659 1660 if (req->contig) { 1661 /* Allocate requested number of contiguous entries, if 1662 * unsuccessful find max contiguous entries available. 1663 */ 1664 index = npc_mcam_find_zero_area(bmap, end, start, 1665 req->count, &max_contig); 1666 rsp->count = max_contig; 1667 if (reverse) 1668 rsp->entry = mcam->bmap_entries - index - max_contig; 1669 else 1670 rsp->entry = index; 1671 } else { 1672 /* Allocate requested number of non-contiguous entries, 1673 * if unsuccessful allocate as many as possible. 1674 */ 1675 rsp->count = 0; 1676 next_start = start; 1677 for (entry = 0; entry < req->count; entry++) { 1678 index = find_next_zero_bit(bmap, end, next_start); 1679 if (index >= end) 1680 break; 1681 1682 next_start = start + (index - start) + 1; 1683 1684 /* Save the entry's index */ 1685 if (reverse) 1686 index = mcam->bmap_entries - index - 1; 1687 entry_list[entry] = index; 1688 rsp->count++; 1689 } 1690 } 1691 1692 /* If allocating requested no of entries is unsucessful, 1693 * expand the search range to full bitmap length and retry. 1694 */ 1695 if (!req->priority && (rsp->count < req->count) && 1696 ((end - start) != mcam->bmap_entries)) { 1697 reverse = true; 1698 start = 0; 1699 end = mcam->bmap_entries; 1700 goto alloc; 1701 } 1702 1703 /* For priority entry allocation requests, if allocation is 1704 * failed then expand search to max possible range and retry. 1705 */ 1706 if (req->priority && rsp->count < req->count) { 1707 if (req->priority == NPC_MCAM_LOWER_PRIO && 1708 (start != (req->ref_entry + 1))) { 1709 start = req->ref_entry + 1; 1710 end = mcam->bmap_entries; 1711 reverse = false; 1712 goto alloc; 1713 } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) && 1714 ((end - start) != req->ref_entry)) { 1715 start = 0; 1716 end = req->ref_entry; 1717 reverse = true; 1718 goto alloc; 1719 } 1720 } 1721 1722 /* Copy MCAM entry indices into mbox response entry_list. 1723 * Requester always expects indices in ascending order, so 1724 * so reverse the list if reverse bitmap is used for allocation. 1725 */ 1726 if (!req->contig && rsp->count) { 1727 index = 0; 1728 for (entry = rsp->count - 1; entry >= 0; entry--) { 1729 if (reverse) 1730 rsp->entry_list[index++] = entry_list[entry]; 1731 else 1732 rsp->entry_list[entry] = entry_list[entry]; 1733 } 1734 } 1735 1736 /* Mark the allocated entries as used and set nixlf mapping */ 1737 for (entry = 0; entry < rsp->count; entry++) { 1738 index = req->contig ? 1739 (rsp->entry + entry) : rsp->entry_list[entry]; 1740 npc_mcam_set_bit(mcam, index); 1741 mcam->entry2pfvf_map[index] = pcifunc; 1742 mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; 1743 } 1744 1745 /* Update available free count in mbox response */ 1746 rsp->free_count = mcam->bmap_fcnt; 1747 1748 mutex_unlock(&mcam->lock); 1749 return 0; 1750 } 1751 1752 int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, 1753 struct npc_mcam_alloc_entry_req *req, 1754 struct npc_mcam_alloc_entry_rsp *rsp) 1755 { 1756 struct npc_mcam *mcam = &rvu->hw->mcam; 1757 u16 pcifunc = req->hdr.pcifunc; 1758 int blkaddr; 1759 1760 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1761 if (blkaddr < 0) 1762 return NPC_MCAM_INVALID_REQ; 1763 1764 rsp->entry = NPC_MCAM_ENTRY_INVALID; 1765 rsp->free_count = 0; 1766 1767 /* Check if ref_entry is within range */ 1768 if (req->priority && req->ref_entry >= mcam->bmap_entries) 1769 return NPC_MCAM_INVALID_REQ; 1770 1771 /* ref_entry can't be '0' if requested priority is high. 1772 * Can't be last entry if requested priority is low. 1773 */ 1774 if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) || 1775 ((req->ref_entry == (mcam->bmap_entries - 1)) && 1776 req->priority == NPC_MCAM_LOWER_PRIO)) 1777 return NPC_MCAM_INVALID_REQ; 1778 1779 /* Since list of allocated indices needs to be sent to requester, 1780 * max number of non-contiguous entries per mbox msg is limited. 1781 */ 1782 if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) 1783 return NPC_MCAM_INVALID_REQ; 1784 1785 /* Alloc request from PFFUNC with no NIXLF attached should be denied */ 1786 if (!is_nixlf_attached(rvu, pcifunc)) 1787 return NPC_MCAM_ALLOC_DENIED; 1788 1789 return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); 1790 } 1791 1792 int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, 1793 struct npc_mcam_free_entry_req *req, 1794 struct msg_rsp *rsp) 1795 { 1796 struct npc_mcam *mcam = &rvu->hw->mcam; 1797 u16 pcifunc = req->hdr.pcifunc; 1798 int blkaddr, rc = 0; 1799 u16 cntr; 1800 1801 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1802 if (blkaddr < 0) 1803 return NPC_MCAM_INVALID_REQ; 1804 1805 /* Free request from PFFUNC with no NIXLF attached, ignore */ 1806 if (!is_nixlf_attached(rvu, pcifunc)) 1807 return NPC_MCAM_INVALID_REQ; 1808 1809 mutex_lock(&mcam->lock); 1810 1811 if (req->all) 1812 goto free_all; 1813 1814 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 1815 if (rc) 1816 goto exit; 1817 1818 mcam->entry2pfvf_map[req->entry] = 0; 1819 npc_mcam_clear_bit(mcam, req->entry); 1820 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); 1821 1822 /* Update entry2counter mapping */ 1823 cntr = mcam->entry2cntr_map[req->entry]; 1824 if (cntr != NPC_MCAM_INVALID_MAP) 1825 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 1826 req->entry, cntr); 1827 1828 goto exit; 1829 1830 free_all: 1831 /* Free up all entries allocated to requesting PFFUNC */ 1832 npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); 1833 exit: 1834 mutex_unlock(&mcam->lock); 1835 return rc; 1836 } 1837 1838 int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, 1839 struct npc_mcam_write_entry_req *req, 1840 struct msg_rsp *rsp) 1841 { 1842 struct npc_mcam *mcam = &rvu->hw->mcam; 1843 u16 pcifunc = req->hdr.pcifunc; 1844 int blkaddr, rc; 1845 1846 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1847 if (blkaddr < 0) 1848 return NPC_MCAM_INVALID_REQ; 1849 1850 mutex_lock(&mcam->lock); 1851 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 1852 if (rc) 1853 goto exit; 1854 1855 if (req->set_cntr && 1856 npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) { 1857 rc = NPC_MCAM_INVALID_REQ; 1858 goto exit; 1859 } 1860 1861 if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) { 1862 rc = NPC_MCAM_INVALID_REQ; 1863 goto exit; 1864 } 1865 1866 npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf, 1867 &req->entry_data, req->enable_entry); 1868 1869 if (req->set_cntr) 1870 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 1871 req->entry, req->cntr); 1872 1873 rc = 0; 1874 exit: 1875 mutex_unlock(&mcam->lock); 1876 return rc; 1877 } 1878 1879 int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, 1880 struct npc_mcam_ena_dis_entry_req *req, 1881 struct msg_rsp *rsp) 1882 { 1883 struct npc_mcam *mcam = &rvu->hw->mcam; 1884 u16 pcifunc = req->hdr.pcifunc; 1885 int blkaddr, rc; 1886 1887 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1888 if (blkaddr < 0) 1889 return NPC_MCAM_INVALID_REQ; 1890 1891 mutex_lock(&mcam->lock); 1892 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 1893 mutex_unlock(&mcam->lock); 1894 if (rc) 1895 return rc; 1896 1897 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true); 1898 1899 return 0; 1900 } 1901 1902 int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, 1903 struct npc_mcam_ena_dis_entry_req *req, 1904 struct msg_rsp *rsp) 1905 { 1906 struct npc_mcam *mcam = &rvu->hw->mcam; 1907 u16 pcifunc = req->hdr.pcifunc; 1908 int blkaddr, rc; 1909 1910 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1911 if (blkaddr < 0) 1912 return NPC_MCAM_INVALID_REQ; 1913 1914 mutex_lock(&mcam->lock); 1915 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 1916 mutex_unlock(&mcam->lock); 1917 if (rc) 1918 return rc; 1919 1920 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); 1921 1922 return 0; 1923 } 1924 1925 int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, 1926 struct npc_mcam_shift_entry_req *req, 1927 struct npc_mcam_shift_entry_rsp *rsp) 1928 { 1929 struct npc_mcam *mcam = &rvu->hw->mcam; 1930 u16 pcifunc = req->hdr.pcifunc; 1931 u16 old_entry, new_entry; 1932 u16 index, cntr; 1933 int blkaddr, rc; 1934 1935 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1936 if (blkaddr < 0) 1937 return NPC_MCAM_INVALID_REQ; 1938 1939 if (req->shift_count > NPC_MCAM_MAX_SHIFTS) 1940 return NPC_MCAM_INVALID_REQ; 1941 1942 mutex_lock(&mcam->lock); 1943 for (index = 0; index < req->shift_count; index++) { 1944 old_entry = req->curr_entry[index]; 1945 new_entry = req->new_entry[index]; 1946 1947 /* Check if both old and new entries are valid and 1948 * does belong to this PFFUNC or not. 1949 */ 1950 rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry); 1951 if (rc) 1952 break; 1953 1954 rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry); 1955 if (rc) 1956 break; 1957 1958 /* new_entry should not have a counter mapped */ 1959 if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) { 1960 rc = NPC_MCAM_PERM_DENIED; 1961 break; 1962 } 1963 1964 /* Disable the new_entry */ 1965 npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false); 1966 1967 /* Copy rule from old entry to new entry */ 1968 npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry); 1969 1970 /* Copy counter mapping, if any */ 1971 cntr = mcam->entry2cntr_map[old_entry]; 1972 if (cntr != NPC_MCAM_INVALID_MAP) { 1973 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 1974 old_entry, cntr); 1975 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 1976 new_entry, cntr); 1977 } 1978 1979 /* Enable new_entry and disable old_entry */ 1980 npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true); 1981 npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false); 1982 } 1983 1984 /* If shift has failed then report the failed index */ 1985 if (index != req->shift_count) { 1986 rc = NPC_MCAM_PERM_DENIED; 1987 rsp->failed_entry_idx = index; 1988 } 1989 1990 mutex_unlock(&mcam->lock); 1991 return rc; 1992 } 1993 1994 int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, 1995 struct npc_mcam_alloc_counter_req *req, 1996 struct npc_mcam_alloc_counter_rsp *rsp) 1997 { 1998 struct npc_mcam *mcam = &rvu->hw->mcam; 1999 u16 pcifunc = req->hdr.pcifunc; 2000 u16 max_contig, cntr; 2001 int blkaddr, index; 2002 2003 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2004 if (blkaddr < 0) 2005 return NPC_MCAM_INVALID_REQ; 2006 2007 /* If the request is from a PFFUNC with no NIXLF attached, ignore */ 2008 if (!is_nixlf_attached(rvu, pcifunc)) 2009 return NPC_MCAM_INVALID_REQ; 2010 2011 /* Since list of allocated counter IDs needs to be sent to requester, 2012 * max number of non-contiguous counters per mbox msg is limited. 2013 */ 2014 if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS) 2015 return NPC_MCAM_INVALID_REQ; 2016 2017 mutex_lock(&mcam->lock); 2018 2019 /* Check if unused counters are available or not */ 2020 if (!rvu_rsrc_free_count(&mcam->counters)) { 2021 mutex_unlock(&mcam->lock); 2022 return NPC_MCAM_ALLOC_FAILED; 2023 } 2024 2025 rsp->count = 0; 2026 2027 if (req->contig) { 2028 /* Allocate requested number of contiguous counters, if 2029 * unsuccessful find max contiguous entries available. 2030 */ 2031 index = npc_mcam_find_zero_area(mcam->counters.bmap, 2032 mcam->counters.max, 0, 2033 req->count, &max_contig); 2034 rsp->count = max_contig; 2035 rsp->cntr = index; 2036 for (cntr = index; cntr < (index + max_contig); cntr++) { 2037 __set_bit(cntr, mcam->counters.bmap); 2038 mcam->cntr2pfvf_map[cntr] = pcifunc; 2039 } 2040 } else { 2041 /* Allocate requested number of non-contiguous counters, 2042 * if unsuccessful allocate as many as possible. 2043 */ 2044 for (cntr = 0; cntr < req->count; cntr++) { 2045 index = rvu_alloc_rsrc(&mcam->counters); 2046 if (index < 0) 2047 break; 2048 rsp->cntr_list[cntr] = index; 2049 rsp->count++; 2050 mcam->cntr2pfvf_map[index] = pcifunc; 2051 } 2052 } 2053 2054 mutex_unlock(&mcam->lock); 2055 return 0; 2056 } 2057 2058 int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, 2059 struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) 2060 { 2061 struct npc_mcam *mcam = &rvu->hw->mcam; 2062 u16 index, entry = 0; 2063 int blkaddr, err; 2064 2065 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2066 if (blkaddr < 0) 2067 return NPC_MCAM_INVALID_REQ; 2068 2069 mutex_lock(&mcam->lock); 2070 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2071 if (err) { 2072 mutex_unlock(&mcam->lock); 2073 return err; 2074 } 2075 2076 /* Mark counter as free/unused */ 2077 mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP; 2078 rvu_free_rsrc(&mcam->counters, req->cntr); 2079 2080 /* Disable all MCAM entry's stats which are using this counter */ 2081 while (entry < mcam->bmap_entries) { 2082 if (!mcam->cntr_refcnt[req->cntr]) 2083 break; 2084 2085 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 2086 if (index >= mcam->bmap_entries) 2087 break; 2088 if (mcam->entry2cntr_map[index] != req->cntr) 2089 continue; 2090 2091 entry = index + 1; 2092 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2093 index, req->cntr); 2094 } 2095 2096 mutex_unlock(&mcam->lock); 2097 return 0; 2098 } 2099 2100 int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, 2101 struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp) 2102 { 2103 struct npc_mcam *mcam = &rvu->hw->mcam; 2104 u16 index, entry = 0; 2105 int blkaddr, rc; 2106 2107 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2108 if (blkaddr < 0) 2109 return NPC_MCAM_INVALID_REQ; 2110 2111 mutex_lock(&mcam->lock); 2112 rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2113 if (rc) 2114 goto exit; 2115 2116 /* Unmap the MCAM entry and counter */ 2117 if (!req->all) { 2118 rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry); 2119 if (rc) 2120 goto exit; 2121 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2122 req->entry, req->cntr); 2123 goto exit; 2124 } 2125 2126 /* Disable all MCAM entry's stats which are using this counter */ 2127 while (entry < mcam->bmap_entries) { 2128 if (!mcam->cntr_refcnt[req->cntr]) 2129 break; 2130 2131 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 2132 if (index >= mcam->bmap_entries) 2133 break; 2134 if (mcam->entry2cntr_map[index] != req->cntr) 2135 continue; 2136 2137 entry = index + 1; 2138 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2139 index, req->cntr); 2140 } 2141 exit: 2142 mutex_unlock(&mcam->lock); 2143 return rc; 2144 } 2145 2146 int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, 2147 struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) 2148 { 2149 struct npc_mcam *mcam = &rvu->hw->mcam; 2150 int blkaddr, err; 2151 2152 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2153 if (blkaddr < 0) 2154 return NPC_MCAM_INVALID_REQ; 2155 2156 mutex_lock(&mcam->lock); 2157 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2158 mutex_unlock(&mcam->lock); 2159 if (err) 2160 return err; 2161 2162 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00); 2163 2164 return 0; 2165 } 2166 2167 int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, 2168 struct npc_mcam_oper_counter_req *req, 2169 struct npc_mcam_oper_counter_rsp *rsp) 2170 { 2171 struct npc_mcam *mcam = &rvu->hw->mcam; 2172 int blkaddr, err; 2173 2174 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2175 if (blkaddr < 0) 2176 return NPC_MCAM_INVALID_REQ; 2177 2178 mutex_lock(&mcam->lock); 2179 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2180 mutex_unlock(&mcam->lock); 2181 if (err) 2182 return err; 2183 2184 rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr)); 2185 rsp->stat &= BIT_ULL(48) - 1; 2186 2187 return 0; 2188 } 2189 2190 int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, 2191 struct npc_mcam_alloc_and_write_entry_req *req, 2192 struct npc_mcam_alloc_and_write_entry_rsp *rsp) 2193 { 2194 struct npc_mcam_alloc_counter_req cntr_req; 2195 struct npc_mcam_alloc_counter_rsp cntr_rsp; 2196 struct npc_mcam_alloc_entry_req entry_req; 2197 struct npc_mcam_alloc_entry_rsp entry_rsp; 2198 struct npc_mcam *mcam = &rvu->hw->mcam; 2199 u16 entry = NPC_MCAM_ENTRY_INVALID; 2200 u16 cntr = NPC_MCAM_ENTRY_INVALID; 2201 int blkaddr, rc; 2202 2203 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2204 if (blkaddr < 0) 2205 return NPC_MCAM_INVALID_REQ; 2206 2207 if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) 2208 return NPC_MCAM_INVALID_REQ; 2209 2210 /* Try to allocate a MCAM entry */ 2211 entry_req.hdr.pcifunc = req->hdr.pcifunc; 2212 entry_req.contig = true; 2213 entry_req.priority = req->priority; 2214 entry_req.ref_entry = req->ref_entry; 2215 entry_req.count = 1; 2216 2217 rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, 2218 &entry_req, &entry_rsp); 2219 if (rc) 2220 return rc; 2221 2222 if (!entry_rsp.count) 2223 return NPC_MCAM_ALLOC_FAILED; 2224 2225 entry = entry_rsp.entry; 2226 2227 if (!req->alloc_cntr) 2228 goto write_entry; 2229 2230 /* Now allocate counter */ 2231 cntr_req.hdr.pcifunc = req->hdr.pcifunc; 2232 cntr_req.contig = true; 2233 cntr_req.count = 1; 2234 2235 rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); 2236 if (rc) { 2237 /* Free allocated MCAM entry */ 2238 mutex_lock(&mcam->lock); 2239 mcam->entry2pfvf_map[entry] = 0; 2240 npc_mcam_clear_bit(mcam, entry); 2241 mutex_unlock(&mcam->lock); 2242 return rc; 2243 } 2244 2245 cntr = cntr_rsp.cntr; 2246 2247 write_entry: 2248 mutex_lock(&mcam->lock); 2249 npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf, 2250 &req->entry_data, req->enable_entry); 2251 2252 if (req->alloc_cntr) 2253 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr); 2254 mutex_unlock(&mcam->lock); 2255 2256 rsp->entry = entry; 2257 rsp->cntr = cntr; 2258 2259 return 0; 2260 } 2261 2262 #define GET_KEX_CFG(intf) \ 2263 rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf)) 2264 2265 #define GET_KEX_FLAGS(ld) \ 2266 rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld)) 2267 2268 #define GET_KEX_LD(intf, lid, lt, ld) \ 2269 rvu_read64(rvu, BLKADDR_NPC, \ 2270 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)) 2271 2272 #define GET_KEX_LDFLAGS(intf, ld, fl) \ 2273 rvu_read64(rvu, BLKADDR_NPC, \ 2274 NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl)) 2275 2276 int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, 2277 struct npc_get_kex_cfg_rsp *rsp) 2278 { 2279 int lid, lt, ld, fl; 2280 2281 rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX); 2282 rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX); 2283 for (lid = 0; lid < NPC_MAX_LID; lid++) { 2284 for (lt = 0; lt < NPC_MAX_LT; lt++) { 2285 for (ld = 0; ld < NPC_MAX_LD; ld++) { 2286 rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] = 2287 GET_KEX_LD(NIX_INTF_RX, lid, lt, ld); 2288 rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] = 2289 GET_KEX_LD(NIX_INTF_TX, lid, lt, ld); 2290 } 2291 } 2292 } 2293 for (ld = 0; ld < NPC_MAX_LD; ld++) 2294 rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld); 2295 2296 for (ld = 0; ld < NPC_MAX_LD; ld++) { 2297 for (fl = 0; fl < NPC_MAX_LFL; fl++) { 2298 rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] = 2299 GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl); 2300 rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] = 2301 GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl); 2302 } 2303 } 2304 memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN); 2305 return 0; 2306 } 2307 2308 int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf) 2309 { 2310 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 2311 struct npc_mcam *mcam = &rvu->hw->mcam; 2312 int blkaddr, index; 2313 bool enable; 2314 2315 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2316 if (blkaddr < 0) 2317 return NIX_AF_ERR_AF_LF_INVALID; 2318 2319 if (!pfvf->rxvlan) 2320 return 0; 2321 2322 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 2323 NIXLF_UCAST_ENTRY); 2324 pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index); 2325 enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index); 2326 npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index, 2327 NIX_INTF_RX, &pfvf->entry, enable); 2328 2329 return 0; 2330 } 2331