1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 15 #include "rvu_struct.h" 16 #include "rvu_reg.h" 17 #include "rvu.h" 18 #include "npc.h" 19 #include "cgx.h" 20 #include "npc_profile.h" 21 22 #define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */ 23 #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ 24 25 #define NIXLF_UCAST_ENTRY 0 26 #define NIXLF_BCAST_ENTRY 1 27 #define NIXLF_PROMISC_ENTRY 2 28 29 #define NPC_PARSE_RESULT_DMAC_OFFSET 8 30 31 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 32 int blkaddr, u16 pcifunc); 33 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 34 u16 pcifunc); 35 36 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) 37 { 38 int blkaddr; 39 u64 val = 0; 40 41 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 42 if (blkaddr < 0) 43 return; 44 45 /* Config CPI base for the PKIND */ 46 val = pkind | 1ULL << 62; 47 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val); 48 } 49 50 int rvu_npc_get_pkind(struct rvu *rvu, u16 pf) 51 { 52 struct npc_pkind *pkind = &rvu->hw->pkind; 53 u32 map; 54 int i; 55 56 for (i = 0; i < pkind->rsrc.max; i++) { 57 map = pkind->pfchan_map[i]; 58 if (((map >> 16) & 0x3F) == pf) 59 return i; 60 } 61 return -1; 62 } 63 64 static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, 65 u16 pcifunc, int nixlf, int type) 66 { 67 int pf = rvu_get_pf(pcifunc); 68 int index; 69 70 /* Check if this is for a PF */ 71 if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) { 72 /* Reserved entries exclude PF0 */ 73 pf--; 74 index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF); 75 /* Broadcast address matching entry should be first so 76 * that the packet can be replicated to all VFs. 77 */ 78 if (type == NIXLF_BCAST_ENTRY) 79 return index; 80 else if (type == NIXLF_PROMISC_ENTRY) 81 return index + 1; 82 } 83 84 return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF)); 85 } 86 87 static int npc_get_bank(struct npc_mcam *mcam, int index) 88 { 89 int bank = index / mcam->banksize; 90 91 /* 0,1 & 2,3 banks are combined for this keysize */ 92 if (mcam->keysize == NPC_MCAM_KEY_X2) 93 return bank ? 2 : 0; 94 95 return bank; 96 } 97 98 static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, 99 int blkaddr, int index) 100 { 101 int bank = npc_get_bank(mcam, index); 102 u64 cfg; 103 104 index &= (mcam->banksize - 1); 105 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank)); 106 return (cfg & 1); 107 } 108 109 static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 110 int blkaddr, int index, bool enable) 111 { 112 int bank = npc_get_bank(mcam, index); 113 int actbank = bank; 114 115 index &= (mcam->banksize - 1); 116 for (; bank < (actbank + mcam->banks_per_entry); bank++) { 117 rvu_write64(rvu, blkaddr, 118 NPC_AF_MCAMEX_BANKX_CFG(index, bank), 119 enable ? 1 : 0); 120 } 121 } 122 123 static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 124 int blkaddr, int index) 125 { 126 int bank = npc_get_bank(mcam, index); 127 int actbank = bank; 128 129 index &= (mcam->banksize - 1); 130 for (; bank < (actbank + mcam->banks_per_entry); bank++) { 131 rvu_write64(rvu, blkaddr, 132 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0); 133 rvu_write64(rvu, blkaddr, 134 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0); 135 136 rvu_write64(rvu, blkaddr, 137 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0); 138 rvu_write64(rvu, blkaddr, 139 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0); 140 141 rvu_write64(rvu, blkaddr, 142 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0); 143 rvu_write64(rvu, blkaddr, 144 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0); 145 } 146 } 147 148 static void npc_get_keyword(struct mcam_entry *entry, int idx, 149 u64 *cam0, u64 *cam1) 150 { 151 u64 kw_mask = 0x00; 152 153 #define CAM_MASK(n) (BIT_ULL(n) - 1) 154 155 /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and 156 * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1. 157 * 158 * Also, only 48 bits of BANKX_CAMX_W1 are valid. 159 */ 160 switch (idx) { 161 case 0: 162 /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */ 163 *cam1 = entry->kw[0]; 164 kw_mask = entry->kw_mask[0]; 165 break; 166 case 1: 167 /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */ 168 *cam1 = entry->kw[1] & CAM_MASK(48); 169 kw_mask = entry->kw_mask[1] & CAM_MASK(48); 170 break; 171 case 2: 172 /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48> 173 * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0> 174 */ 175 *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16); 176 *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16); 177 kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16); 178 kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16); 179 break; 180 case 3: 181 /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48> 182 * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0> 183 */ 184 *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16); 185 *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16); 186 kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16); 187 kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16); 188 break; 189 case 4: 190 /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32> 191 * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0> 192 */ 193 *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32); 194 *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32); 195 kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32); 196 kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32); 197 break; 198 case 5: 199 /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32> 200 * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0> 201 */ 202 *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32); 203 *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32); 204 kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32); 205 kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32); 206 break; 207 case 6: 208 /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16> 209 * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0> 210 */ 211 *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48); 212 *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48); 213 kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48); 214 kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48); 215 break; 216 case 7: 217 /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */ 218 *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48); 219 kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48); 220 break; 221 } 222 223 *cam1 &= kw_mask; 224 *cam0 = ~*cam1 & kw_mask; 225 } 226 227 static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 228 int blkaddr, int index, u8 intf, 229 struct mcam_entry *entry, bool enable) 230 { 231 int bank = npc_get_bank(mcam, index); 232 int kw = 0, actbank, actindex; 233 u64 cam0, cam1; 234 235 actbank = bank; /* Save bank id, to set action later on */ 236 actindex = index; 237 index &= (mcam->banksize - 1); 238 239 /* Disable before mcam entry update */ 240 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); 241 242 /* Clear mcam entry to avoid writes being suppressed by NPC */ 243 npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex); 244 245 /* CAM1 takes the comparison value and 246 * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'. 247 * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0 248 * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1 249 * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare. 250 */ 251 for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { 252 /* Interface should be set in all banks */ 253 rvu_write64(rvu, blkaddr, 254 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 255 intf); 256 rvu_write64(rvu, blkaddr, 257 NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 258 ~intf & 0x3); 259 260 /* Set the match key */ 261 npc_get_keyword(entry, kw, &cam0, &cam1); 262 rvu_write64(rvu, blkaddr, 263 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1); 264 rvu_write64(rvu, blkaddr, 265 NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0); 266 267 npc_get_keyword(entry, kw + 1, &cam0, &cam1); 268 rvu_write64(rvu, blkaddr, 269 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1); 270 rvu_write64(rvu, blkaddr, 271 NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0); 272 } 273 274 /* Set 'action' */ 275 rvu_write64(rvu, blkaddr, 276 NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action); 277 278 /* Set TAG 'action' */ 279 rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank), 280 entry->vtag_action); 281 282 /* Enable the entry */ 283 if (enable) 284 npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true); 285 } 286 287 static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 288 int blkaddr, u16 src, u16 dest) 289 { 290 int dbank = npc_get_bank(mcam, dest); 291 int sbank = npc_get_bank(mcam, src); 292 u64 cfg, sreg, dreg; 293 int bank, i; 294 295 src &= (mcam->banksize - 1); 296 dest &= (mcam->banksize - 1); 297 298 /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */ 299 for (bank = 0; bank < mcam->banks_per_entry; bank++) { 300 sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0); 301 dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0); 302 for (i = 0; i < 6; i++) { 303 cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8)); 304 rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg); 305 } 306 } 307 308 /* Copy action */ 309 cfg = rvu_read64(rvu, blkaddr, 310 NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); 311 rvu_write64(rvu, blkaddr, 312 NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg); 313 314 /* Copy TAG action */ 315 cfg = rvu_read64(rvu, blkaddr, 316 NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); 317 rvu_write64(rvu, blkaddr, 318 NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg); 319 320 /* Enable or disable */ 321 cfg = rvu_read64(rvu, blkaddr, 322 NPC_AF_MCAMEX_BANKX_CFG(src, sbank)); 323 rvu_write64(rvu, blkaddr, 324 NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg); 325 } 326 327 static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, 328 int blkaddr, int index) 329 { 330 int bank = npc_get_bank(mcam, index); 331 332 index &= (mcam->banksize - 1); 333 return rvu_read64(rvu, blkaddr, 334 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 335 } 336 337 void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, 338 int nixlf, u64 chan, u8 *mac_addr) 339 { 340 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 341 struct npc_mcam *mcam = &rvu->hw->mcam; 342 struct mcam_entry entry = { {0} }; 343 struct nix_rx_action action; 344 int blkaddr, index, kwi; 345 u64 mac = 0; 346 347 /* AF's VFs work in promiscuous mode */ 348 if (is_afvf(pcifunc)) 349 return; 350 351 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 352 if (blkaddr < 0) 353 return; 354 355 for (index = ETH_ALEN - 1; index >= 0; index--) 356 mac |= ((u64)*mac_addr++) << (8 * index); 357 358 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 359 nixlf, NIXLF_UCAST_ENTRY); 360 361 /* Match ingress channel and DMAC */ 362 entry.kw[0] = chan; 363 entry.kw_mask[0] = 0xFFFULL; 364 365 kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64); 366 entry.kw[kwi] = mac; 367 entry.kw_mask[kwi] = BIT_ULL(48) - 1; 368 369 /* Don't change the action if entry is already enabled 370 * Otherwise RSS action may get overwritten. 371 */ 372 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { 373 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 374 blkaddr, index); 375 } else { 376 *(u64 *)&action = 0x00; 377 action.op = NIX_RX_ACTIONOP_UCAST; 378 action.pf_func = pcifunc; 379 } 380 381 entry.action = *(u64 *)&action; 382 npc_config_mcam_entry(rvu, mcam, blkaddr, index, 383 NIX_INTF_RX, &entry, true); 384 385 /* add VLAN matching, setup action and save entry back for later */ 386 entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20; 387 entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20; 388 389 entry.vtag_action = VTAG0_VALID_BIT | 390 FIELD_PREP(VTAG0_TYPE_MASK, 0) | 391 FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) | 392 FIELD_PREP(VTAG0_RELPTR_MASK, 12); 393 394 memcpy(&pfvf->entry, &entry, sizeof(entry)); 395 } 396 397 void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, 398 int nixlf, u64 chan, bool allmulti) 399 { 400 struct npc_mcam *mcam = &rvu->hw->mcam; 401 int blkaddr, ucast_idx, index, kwi; 402 struct mcam_entry entry = { {0} }; 403 struct nix_rx_action action = { }; 404 405 /* Only PF or AF VF can add a promiscuous entry */ 406 if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc)) 407 return; 408 409 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 410 if (blkaddr < 0) 411 return; 412 413 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 414 nixlf, NIXLF_PROMISC_ENTRY); 415 416 entry.kw[0] = chan; 417 entry.kw_mask[0] = 0xFFFULL; 418 419 if (allmulti) { 420 kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64); 421 entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */ 422 entry.kw_mask[kwi] = BIT_ULL(40); 423 } 424 425 ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, 426 nixlf, NIXLF_UCAST_ENTRY); 427 428 /* If the corresponding PF's ucast action is RSS, 429 * use the same action for promisc also 430 */ 431 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) 432 *(u64 *)&action = npc_get_mcam_action(rvu, mcam, 433 blkaddr, ucast_idx); 434 435 if (action.op != NIX_RX_ACTIONOP_RSS) { 436 *(u64 *)&action = 0x00; 437 action.op = NIX_RX_ACTIONOP_UCAST; 438 action.pf_func = pcifunc; 439 } 440 441 entry.action = *(u64 *)&action; 442 npc_config_mcam_entry(rvu, mcam, blkaddr, index, 443 NIX_INTF_RX, &entry, true); 444 } 445 446 static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc, 447 int nixlf, bool enable) 448 { 449 struct npc_mcam *mcam = &rvu->hw->mcam; 450 int blkaddr, index; 451 452 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 453 if (blkaddr < 0) 454 return; 455 456 /* Only PF's have a promiscuous entry */ 457 if (pcifunc & RVU_PFVF_FUNC_MASK) 458 return; 459 460 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 461 nixlf, NIXLF_PROMISC_ENTRY); 462 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 463 } 464 465 void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) 466 { 467 npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false); 468 } 469 470 void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) 471 { 472 npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true); 473 } 474 475 void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, 476 int nixlf, u64 chan) 477 { 478 struct npc_mcam *mcam = &rvu->hw->mcam; 479 struct mcam_entry entry = { {0} }; 480 struct rvu_hwinfo *hw = rvu->hw; 481 struct nix_rx_action action; 482 struct rvu_pfvf *pfvf; 483 int blkaddr, index; 484 485 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 486 if (blkaddr < 0) 487 return; 488 489 /* Skip LBK VFs */ 490 if (is_afvf(pcifunc)) 491 return; 492 493 /* If pkt replication is not supported, 494 * then only PF is allowed to add a bcast match entry. 495 */ 496 if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK) 497 return; 498 499 /* Get 'pcifunc' of PF device */ 500 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 501 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 502 nixlf, NIXLF_BCAST_ENTRY); 503 504 /* Match ingress channel */ 505 entry.kw[0] = chan; 506 entry.kw_mask[0] = 0xfffull; 507 508 /* Match broadcast MAC address. 509 * DMAC is extracted at 0th bit of PARSE_KEX::KW1 510 */ 511 entry.kw[1] = 0xffffffffffffull; 512 entry.kw_mask[1] = 0xffffffffffffull; 513 514 *(u64 *)&action = 0x00; 515 if (!hw->cap.nix_rx_multicast) { 516 /* Early silicon doesn't support pkt replication, 517 * so install entry with UCAST action, so that PF 518 * receives all broadcast packets. 519 */ 520 action.op = NIX_RX_ACTIONOP_UCAST; 521 action.pf_func = pcifunc; 522 } else { 523 pfvf = rvu_get_pfvf(rvu, pcifunc); 524 action.index = pfvf->bcast_mce_idx; 525 action.op = NIX_RX_ACTIONOP_MCAST; 526 } 527 528 entry.action = *(u64 *)&action; 529 npc_config_mcam_entry(rvu, mcam, blkaddr, index, 530 NIX_INTF_RX, &entry, true); 531 } 532 533 void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable) 534 { 535 struct npc_mcam *mcam = &rvu->hw->mcam; 536 int blkaddr, index; 537 538 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 539 if (blkaddr < 0) 540 return; 541 542 /* Get 'pcifunc' of PF device */ 543 pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; 544 545 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY); 546 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 547 } 548 549 void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, 550 int group, int alg_idx, int mcam_index) 551 { 552 struct npc_mcam *mcam = &rvu->hw->mcam; 553 struct nix_rx_action action; 554 int blkaddr, index, bank; 555 556 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 557 if (blkaddr < 0) 558 return; 559 560 /* Check if this is for reserved default entry */ 561 if (mcam_index < 0) { 562 if (group != DEFAULT_RSS_CONTEXT_GROUP) 563 return; 564 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 565 nixlf, NIXLF_UCAST_ENTRY); 566 } else { 567 /* TODO: validate this mcam index */ 568 index = mcam_index; 569 } 570 571 if (index >= mcam->total_entries) 572 return; 573 574 bank = npc_get_bank(mcam, index); 575 index &= (mcam->banksize - 1); 576 577 *(u64 *)&action = rvu_read64(rvu, blkaddr, 578 NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); 579 /* Ignore if no action was set earlier */ 580 if (!*(u64 *)&action) 581 return; 582 583 action.op = NIX_RX_ACTIONOP_RSS; 584 action.pf_func = pcifunc; 585 action.index = group; 586 action.flow_key_alg = alg_idx; 587 588 rvu_write64(rvu, blkaddr, 589 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); 590 591 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 592 nixlf, NIXLF_PROMISC_ENTRY); 593 594 /* If PF's promiscuous entry is enabled, 595 * Set RSS action for that entry as well 596 */ 597 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { 598 bank = npc_get_bank(mcam, index); 599 index &= (mcam->banksize - 1); 600 601 rvu_write64(rvu, blkaddr, 602 NPC_AF_MCAMEX_BANKX_ACTION(index, bank), 603 *(u64 *)&action); 604 } 605 606 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); 607 } 608 609 static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, 610 int nixlf, bool enable) 611 { 612 struct npc_mcam *mcam = &rvu->hw->mcam; 613 struct nix_rx_action action; 614 int index, bank, blkaddr; 615 616 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 617 if (blkaddr < 0) 618 return; 619 620 /* Ucast MCAM match entry of this PF/VF */ 621 index = npc_get_nixlf_mcam_index(mcam, pcifunc, 622 nixlf, NIXLF_UCAST_ENTRY); 623 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); 624 625 /* For PF, ena/dis promisc and bcast MCAM match entries. 626 * For VFs add/delete from bcast list when RX multicast 627 * feature is present. 628 */ 629 if (pcifunc & RVU_PFVF_FUNC_MASK && !rvu->hw->cap.nix_rx_multicast) 630 return; 631 632 /* For bcast, enable/disable only if it's action is not 633 * packet replication, incase if action is replication 634 * then this PF/VF's nixlf is removed from bcast replication 635 * list. 636 */ 637 index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK, 638 nixlf, NIXLF_BCAST_ENTRY); 639 bank = npc_get_bank(mcam, index); 640 *(u64 *)&action = rvu_read64(rvu, blkaddr, 641 NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank)); 642 643 /* VFs will not have BCAST entry */ 644 if (action.op != NIX_RX_ACTIONOP_MCAST && 645 !(pcifunc & RVU_PFVF_FUNC_MASK)) { 646 npc_enable_mcam_entry(rvu, mcam, 647 blkaddr, index, enable); 648 } else { 649 nix_update_bcast_mce_list(rvu, pcifunc, enable); 650 /* Enable PF's BCAST entry for packet replication */ 651 rvu_npc_enable_bcast_entry(rvu, pcifunc, enable); 652 } 653 654 if (enable) 655 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf); 656 else 657 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); 658 659 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); 660 } 661 662 void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 663 { 664 npc_enadis_default_entries(rvu, pcifunc, nixlf, false); 665 } 666 667 void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 668 { 669 npc_enadis_default_entries(rvu, pcifunc, nixlf, true); 670 } 671 672 void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 673 { 674 struct npc_mcam *mcam = &rvu->hw->mcam; 675 int blkaddr; 676 677 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 678 if (blkaddr < 0) 679 return; 680 681 mutex_lock(&mcam->lock); 682 683 /* Disable and free all MCAM entries mapped to this 'pcifunc' */ 684 npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); 685 686 /* Free all MCAM counters mapped to this 'pcifunc' */ 687 npc_mcam_free_all_counters(rvu, mcam, pcifunc); 688 689 mutex_unlock(&mcam->lock); 690 691 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 692 } 693 694 #define SET_KEX_LD(intf, lid, ltype, ld, cfg) \ 695 rvu_write64(rvu, blkaddr, \ 696 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg) 697 698 #define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \ 699 rvu_write64(rvu, blkaddr, \ 700 NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) 701 702 #define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \ 703 (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \ 704 ((flags_ena) << 6) | ((key_ofs) & 0x3F)) 705 706 static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr) 707 { 708 struct npc_mcam *mcam = &rvu->hw->mcam; 709 int lid, ltype; 710 int lid_count; 711 u64 cfg; 712 713 cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); 714 lid_count = (cfg >> 4) & 0xF; 715 716 /* First clear any existing config i.e 717 * disable LDATA and FLAGS extraction. 718 */ 719 for (lid = 0; lid < lid_count; lid++) { 720 for (ltype = 0; ltype < 16; ltype++) { 721 SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL); 722 SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL); 723 SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL); 724 SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL); 725 726 SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL); 727 SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL); 728 SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL); 729 SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL); 730 } 731 } 732 733 if (mcam->keysize != NPC_MCAM_KEY_X2) 734 return; 735 736 /* Default MCAM KEX profile */ 737 /* Layer A: Ethernet: */ 738 739 /* DMAC: 6 bytes, KW1[47:0] */ 740 cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET); 741 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg); 742 743 /* Ethertype: 2 bytes, KW0[47:32] */ 744 cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4); 745 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg); 746 747 /* Layer B: Single VLAN (CTAG) */ 748 /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ 749 cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4); 750 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg); 751 752 /* Layer B: Stacked VLAN (STAG|QinQ) */ 753 /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ 754 cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4); 755 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, cfg); 756 757 /* Layer C: IPv4 */ 758 /* SIP+DIP: 8 bytes, KW2[63:0] */ 759 cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10); 760 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg); 761 /* TOS: 1 byte, KW1[63:56] */ 762 cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf); 763 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg); 764 765 /* Layer D:UDP */ 766 /* SPORT: 2 bytes, KW3[15:0] */ 767 cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18); 768 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg); 769 /* DPORT: 2 bytes, KW3[31:16] */ 770 cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a); 771 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg); 772 773 /* Layer D:TCP */ 774 /* SPORT: 2 bytes, KW3[15:0] */ 775 cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18); 776 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg); 777 /* DPORT: 2 bytes, KW3[31:16] */ 778 cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a); 779 SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg); 780 } 781 782 static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr, 783 struct npc_mcam_kex *mkex) 784 { 785 int lid, lt, ld, fl; 786 787 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), 788 mkex->keyx_cfg[NIX_INTF_RX]); 789 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), 790 mkex->keyx_cfg[NIX_INTF_TX]); 791 792 for (ld = 0; ld < NPC_MAX_LD; ld++) 793 rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld), 794 mkex->kex_ld_flags[ld]); 795 796 for (lid = 0; lid < NPC_MAX_LID; lid++) { 797 for (lt = 0; lt < NPC_MAX_LT; lt++) { 798 for (ld = 0; ld < NPC_MAX_LD; ld++) { 799 SET_KEX_LD(NIX_INTF_RX, lid, lt, ld, 800 mkex->intf_lid_lt_ld[NIX_INTF_RX] 801 [lid][lt][ld]); 802 803 SET_KEX_LD(NIX_INTF_TX, lid, lt, ld, 804 mkex->intf_lid_lt_ld[NIX_INTF_TX] 805 [lid][lt][ld]); 806 } 807 } 808 } 809 810 for (ld = 0; ld < NPC_MAX_LD; ld++) { 811 for (fl = 0; fl < NPC_MAX_LFL; fl++) { 812 SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl, 813 mkex->intf_ld_flags[NIX_INTF_RX] 814 [ld][fl]); 815 816 SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl, 817 mkex->intf_ld_flags[NIX_INTF_TX] 818 [ld][fl]); 819 } 820 } 821 } 822 823 /* strtoull of "mkexprof" with base:36 */ 824 #define MKEX_SIGN 0x19bbfdbd15f 825 #define MKEX_END_SIGN 0xdeadbeef 826 827 static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr) 828 { 829 const char *mkex_profile = rvu->mkex_pfl_name; 830 struct device *dev = &rvu->pdev->dev; 831 void __iomem *mkex_prfl_addr = NULL; 832 struct npc_mcam_kex *mcam_kex; 833 u64 prfl_addr; 834 u64 prfl_sz; 835 836 /* If user not selected mkex profile */ 837 if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN)) 838 goto load_default; 839 840 if (!rvu->fwdata) 841 goto load_default; 842 prfl_addr = rvu->fwdata->mcam_addr; 843 prfl_sz = rvu->fwdata->mcam_sz; 844 845 if (!prfl_addr || !prfl_sz) 846 goto load_default; 847 848 mkex_prfl_addr = ioremap_wc(prfl_addr, prfl_sz); 849 if (!mkex_prfl_addr) 850 goto load_default; 851 852 mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr; 853 854 while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) { 855 /* Compare with mkex mod_param name string */ 856 if (mcam_kex->mkex_sign == MKEX_SIGN && 857 !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) { 858 /* Due to an errata (35786) in A0/B0 pass silicon, 859 * parse nibble enable configuration has to be 860 * identical for both Rx and Tx interfaces. 861 */ 862 if (is_rvu_96xx_B0(rvu) && 863 mcam_kex->keyx_cfg[NIX_INTF_RX] != 864 mcam_kex->keyx_cfg[NIX_INTF_TX]) 865 goto load_default; 866 867 /* Program selected mkex profile */ 868 npc_program_mkex_profile(rvu, blkaddr, mcam_kex); 869 870 goto unmap; 871 } 872 873 mcam_kex++; 874 prfl_sz -= sizeof(struct npc_mcam_kex); 875 } 876 dev_warn(dev, "Failed to load requested profile: %s\n", 877 rvu->mkex_pfl_name); 878 879 load_default: 880 dev_info(rvu->dev, "Using default mkex profile\n"); 881 /* Config packet data and flags extraction into PARSE result */ 882 npc_config_ldata_extract(rvu, blkaddr); 883 884 unmap: 885 if (mkex_prfl_addr) 886 iounmap(mkex_prfl_addr); 887 } 888 889 static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, 890 struct npc_kpu_profile_action *kpuaction, 891 int kpu, int entry, bool pkind) 892 { 893 struct npc_kpu_action0 action0 = {0}; 894 struct npc_kpu_action1 action1 = {0}; 895 u64 reg; 896 897 action1.errlev = kpuaction->errlev; 898 action1.errcode = kpuaction->errcode; 899 action1.dp0_offset = kpuaction->dp0_offset; 900 action1.dp1_offset = kpuaction->dp1_offset; 901 action1.dp2_offset = kpuaction->dp2_offset; 902 903 if (pkind) 904 reg = NPC_AF_PKINDX_ACTION1(entry); 905 else 906 reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry); 907 908 rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1); 909 910 action0.byp_count = kpuaction->bypass_count; 911 action0.capture_ena = kpuaction->cap_ena; 912 action0.parse_done = kpuaction->parse_done; 913 action0.next_state = kpuaction->next_state; 914 action0.capture_lid = kpuaction->lid; 915 action0.capture_ltype = kpuaction->ltype; 916 action0.capture_flags = kpuaction->flags; 917 action0.ptr_advance = kpuaction->ptr_advance; 918 action0.var_len_offset = kpuaction->offset; 919 action0.var_len_mask = kpuaction->mask; 920 action0.var_len_right = kpuaction->right; 921 action0.var_len_shift = kpuaction->shift; 922 923 if (pkind) 924 reg = NPC_AF_PKINDX_ACTION0(entry); 925 else 926 reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry); 927 928 rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0); 929 } 930 931 static void npc_config_kpucam(struct rvu *rvu, int blkaddr, 932 struct npc_kpu_profile_cam *kpucam, 933 int kpu, int entry) 934 { 935 struct npc_kpu_cam cam0 = {0}; 936 struct npc_kpu_cam cam1 = {0}; 937 938 cam1.state = kpucam->state & kpucam->state_mask; 939 cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask; 940 cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask; 941 cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask; 942 943 cam0.state = ~kpucam->state & kpucam->state_mask; 944 cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask; 945 cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask; 946 cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask; 947 948 rvu_write64(rvu, blkaddr, 949 NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0); 950 rvu_write64(rvu, blkaddr, 951 NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1); 952 } 953 954 static inline u64 enable_mask(int count) 955 { 956 return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL)); 957 } 958 959 static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, 960 struct npc_kpu_profile *profile) 961 { 962 int entry, num_entries, max_entries; 963 964 if (profile->cam_entries != profile->action_entries) { 965 dev_err(rvu->dev, 966 "KPU%d: CAM and action entries [%d != %d] not equal\n", 967 kpu, profile->cam_entries, profile->action_entries); 968 } 969 970 max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF; 971 972 /* Program CAM match entries for previous KPU extracted data */ 973 num_entries = min_t(int, profile->cam_entries, max_entries); 974 for (entry = 0; entry < num_entries; entry++) 975 npc_config_kpucam(rvu, blkaddr, 976 &profile->cam[entry], kpu, entry); 977 978 /* Program this KPU's actions */ 979 num_entries = min_t(int, profile->action_entries, max_entries); 980 for (entry = 0; entry < num_entries; entry++) 981 npc_config_kpuaction(rvu, blkaddr, &profile->action[entry], 982 kpu, entry, false); 983 984 /* Enable all programmed entries */ 985 num_entries = min_t(int, profile->action_entries, profile->cam_entries); 986 rvu_write64(rvu, blkaddr, 987 NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries)); 988 if (num_entries > 64) { 989 rvu_write64(rvu, blkaddr, 990 NPC_AF_KPUX_ENTRY_DISX(kpu, 1), 991 enable_mask(num_entries - 64)); 992 } 993 994 /* Enable this KPU */ 995 rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01); 996 } 997 998 static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) 999 { 1000 struct rvu_hwinfo *hw = rvu->hw; 1001 int num_pkinds, num_kpus, idx; 1002 struct npc_pkind *pkind; 1003 1004 /* Get HW limits */ 1005 hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F; 1006 1007 /* Disable all KPUs and their entries */ 1008 for (idx = 0; idx < hw->npc_kpus; idx++) { 1009 rvu_write64(rvu, blkaddr, 1010 NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL); 1011 rvu_write64(rvu, blkaddr, 1012 NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL); 1013 rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00); 1014 } 1015 1016 /* First program IKPU profile i.e PKIND configs. 1017 * Check HW max count to avoid configuring junk or 1018 * writing to unsupported CSR addresses. 1019 */ 1020 pkind = &hw->pkind; 1021 num_pkinds = ARRAY_SIZE(ikpu_action_entries); 1022 num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds); 1023 1024 for (idx = 0; idx < num_pkinds; idx++) 1025 npc_config_kpuaction(rvu, blkaddr, 1026 &ikpu_action_entries[idx], 0, idx, true); 1027 1028 /* Program KPU CAM and Action profiles */ 1029 num_kpus = ARRAY_SIZE(npc_kpu_profiles); 1030 num_kpus = min_t(int, hw->npc_kpus, num_kpus); 1031 1032 for (idx = 0; idx < num_kpus; idx++) 1033 npc_program_kpu_profile(rvu, blkaddr, 1034 idx, &npc_kpu_profiles[idx]); 1035 } 1036 1037 static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) 1038 { 1039 int nixlf_count = rvu_get_nixlf_count(rvu); 1040 struct npc_mcam *mcam = &rvu->hw->mcam; 1041 int rsvd, err; 1042 u64 cfg; 1043 1044 /* Get HW limits */ 1045 cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); 1046 mcam->banks = (cfg >> 44) & 0xF; 1047 mcam->banksize = (cfg >> 28) & 0xFFFF; 1048 mcam->counters.max = (cfg >> 48) & 0xFFFF; 1049 1050 /* Actual number of MCAM entries vary by entry size */ 1051 cfg = (rvu_read64(rvu, blkaddr, 1052 NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07; 1053 mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize; 1054 mcam->keysize = cfg; 1055 1056 /* Number of banks combined per MCAM entry */ 1057 if (cfg == NPC_MCAM_KEY_X4) 1058 mcam->banks_per_entry = 4; 1059 else if (cfg == NPC_MCAM_KEY_X2) 1060 mcam->banks_per_entry = 2; 1061 else 1062 mcam->banks_per_entry = 1; 1063 1064 /* Reserve one MCAM entry for each of the NIX LF to 1065 * guarantee space to install default matching DMAC rule. 1066 * Also reserve 2 MCAM entries for each PF for default 1067 * channel based matching or 'bcast & promisc' matching to 1068 * support BCAST and PROMISC modes of operation for PFs. 1069 * PF0 is excluded. 1070 */ 1071 rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) + 1072 ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF); 1073 if (mcam->total_entries <= rsvd) { 1074 dev_warn(rvu->dev, 1075 "Insufficient NPC MCAM size %d for pkt I/O, exiting\n", 1076 mcam->total_entries); 1077 return -ENOMEM; 1078 } 1079 1080 mcam->bmap_entries = mcam->total_entries - rsvd; 1081 mcam->nixlf_offset = mcam->bmap_entries; 1082 mcam->pf_offset = mcam->nixlf_offset + nixlf_count; 1083 1084 /* Allocate bitmaps for managing MCAM entries */ 1085 mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries), 1086 sizeof(long), GFP_KERNEL); 1087 if (!mcam->bmap) 1088 return -ENOMEM; 1089 1090 mcam->bmap_reverse = devm_kcalloc(rvu->dev, 1091 BITS_TO_LONGS(mcam->bmap_entries), 1092 sizeof(long), GFP_KERNEL); 1093 if (!mcam->bmap_reverse) 1094 return -ENOMEM; 1095 1096 mcam->bmap_fcnt = mcam->bmap_entries; 1097 1098 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 1099 mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, 1100 sizeof(u16), GFP_KERNEL); 1101 if (!mcam->entry2pfvf_map) 1102 return -ENOMEM; 1103 1104 /* Reserve 1/8th of MCAM entries at the bottom for low priority 1105 * allocations and another 1/8th at the top for high priority 1106 * allocations. 1107 */ 1108 mcam->lprio_count = mcam->bmap_entries / 8; 1109 if (mcam->lprio_count > BITS_PER_LONG) 1110 mcam->lprio_count = round_down(mcam->lprio_count, 1111 BITS_PER_LONG); 1112 mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count; 1113 mcam->hprio_count = mcam->lprio_count; 1114 mcam->hprio_end = mcam->hprio_count; 1115 1116 /* Reserve last counter for MCAM RX miss action which is set to 1117 * drop pkt. This way we will know how many pkts didn't match 1118 * any MCAM entry. 1119 */ 1120 mcam->counters.max--; 1121 mcam->rx_miss_act_cntr = mcam->counters.max; 1122 1123 /* Allocate bitmap for managing MCAM counters and memory 1124 * for saving counter to RVU PFFUNC allocation mapping. 1125 */ 1126 err = rvu_alloc_bitmap(&mcam->counters); 1127 if (err) 1128 return err; 1129 1130 mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max, 1131 sizeof(u16), GFP_KERNEL); 1132 if (!mcam->cntr2pfvf_map) 1133 goto free_mem; 1134 1135 /* Alloc memory for MCAM entry to counter mapping and for tracking 1136 * counter's reference count. 1137 */ 1138 mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, 1139 sizeof(u16), GFP_KERNEL); 1140 if (!mcam->entry2cntr_map) 1141 goto free_mem; 1142 1143 mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max, 1144 sizeof(u16), GFP_KERNEL); 1145 if (!mcam->cntr_refcnt) 1146 goto free_mem; 1147 1148 mutex_init(&mcam->lock); 1149 1150 return 0; 1151 1152 free_mem: 1153 kfree(mcam->counters.bmap); 1154 return -ENOMEM; 1155 } 1156 1157 int rvu_npc_init(struct rvu *rvu) 1158 { 1159 struct npc_pkind *pkind = &rvu->hw->pkind; 1160 struct npc_mcam *mcam = &rvu->hw->mcam; 1161 u64 keyz = NPC_MCAM_KEY_X2; 1162 int blkaddr, entry, bank, err; 1163 u64 cfg, nibble_ena; 1164 1165 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1166 if (blkaddr < 0) { 1167 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1168 return -ENODEV; 1169 } 1170 1171 /* First disable all MCAM entries, to stop traffic towards NIXLFs */ 1172 cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); 1173 for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) { 1174 for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++) 1175 rvu_write64(rvu, blkaddr, 1176 NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0); 1177 } 1178 1179 /* Allocate resource bimap for pkind*/ 1180 pkind->rsrc.max = (rvu_read64(rvu, blkaddr, 1181 NPC_AF_CONST1) >> 12) & 0xFF; 1182 err = rvu_alloc_bitmap(&pkind->rsrc); 1183 if (err) 1184 return err; 1185 1186 /* Allocate mem for pkind to PF and channel mapping info */ 1187 pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max, 1188 sizeof(u32), GFP_KERNEL); 1189 if (!pkind->pfchan_map) 1190 return -ENOMEM; 1191 1192 /* Configure KPU profile */ 1193 npc_parser_profile_init(rvu, blkaddr); 1194 1195 /* Config Outer L2, IPv4's NPC layer info */ 1196 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2, 1197 (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F); 1198 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4, 1199 (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F); 1200 1201 /* Config Inner IPV4 NPC layer info */ 1202 rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4, 1203 (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F); 1204 1205 /* Enable below for Rx pkts. 1206 * - Outer IPv4 header checksum validation. 1207 * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M]. 1208 * - Inner IPv4 header checksum validation. 1209 * - Set non zero checksum error code value 1210 */ 1211 rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, 1212 rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | 1213 BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) | 1214 BIT_ULL(2) | BIT_ULL(1)); 1215 1216 /* Set RX and TX side MCAM search key size. 1217 * LA..LD (ltype only) + Channel 1218 */ 1219 nibble_ena = 0x49247; 1220 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), 1221 ((keyz & 0x3) << 32) | nibble_ena); 1222 /* Due to an errata (35786) in A0 pass silicon, parse nibble enable 1223 * configuration has to be identical for both Rx and Tx interfaces. 1224 */ 1225 if (!is_rvu_96xx_B0(rvu)) 1226 nibble_ena = (1ULL << 19) - 1; 1227 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), 1228 ((keyz & 0x3) << 32) | nibble_ena); 1229 1230 err = npc_mcam_rsrcs_init(rvu, blkaddr); 1231 if (err) 1232 return err; 1233 1234 /* Configure MKEX profile */ 1235 npc_load_mkex_profile(rvu, blkaddr); 1236 1237 /* Set TX miss action to UCAST_DEFAULT i.e 1238 * transmit the packet on NIX LF SQ's default channel. 1239 */ 1240 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX), 1241 NIX_TX_ACTIONOP_UCAST_DEFAULT); 1242 1243 /* If MCAM lookup doesn't result in a match, drop the received packet. 1244 * And map this action to a counter to count dropped pkts. 1245 */ 1246 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX), 1247 NIX_RX_ACTIONOP_DROP); 1248 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX), 1249 BIT_ULL(9) | mcam->rx_miss_act_cntr); 1250 1251 return 0; 1252 } 1253 1254 void rvu_npc_freemem(struct rvu *rvu) 1255 { 1256 struct npc_pkind *pkind = &rvu->hw->pkind; 1257 struct npc_mcam *mcam = &rvu->hw->mcam; 1258 1259 kfree(pkind->rsrc.bmap); 1260 kfree(mcam->counters.bmap); 1261 mutex_destroy(&mcam->lock); 1262 } 1263 1264 void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, 1265 int blkaddr, int *alloc_cnt, 1266 int *enable_cnt) 1267 { 1268 struct npc_mcam *mcam = &rvu->hw->mcam; 1269 int entry; 1270 1271 *alloc_cnt = 0; 1272 *enable_cnt = 0; 1273 1274 for (entry = 0; entry < mcam->bmap_entries; entry++) { 1275 if (mcam->entry2pfvf_map[entry] == pcifunc) { 1276 (*alloc_cnt)++; 1277 if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry)) 1278 (*enable_cnt)++; 1279 } 1280 } 1281 } 1282 1283 void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, 1284 int blkaddr, int *alloc_cnt, 1285 int *enable_cnt) 1286 { 1287 struct npc_mcam *mcam = &rvu->hw->mcam; 1288 int cntr; 1289 1290 *alloc_cnt = 0; 1291 *enable_cnt = 0; 1292 1293 for (cntr = 0; cntr < mcam->counters.max; cntr++) { 1294 if (mcam->cntr2pfvf_map[cntr] == pcifunc) { 1295 (*alloc_cnt)++; 1296 if (mcam->cntr_refcnt[cntr]) 1297 (*enable_cnt)++; 1298 } 1299 } 1300 } 1301 1302 static int npc_mcam_verify_entry(struct npc_mcam *mcam, 1303 u16 pcifunc, int entry) 1304 { 1305 /* Verify if entry is valid and if it is indeed 1306 * allocated to the requesting PFFUNC. 1307 */ 1308 if (entry >= mcam->bmap_entries) 1309 return NPC_MCAM_INVALID_REQ; 1310 1311 if (pcifunc != mcam->entry2pfvf_map[entry]) 1312 return NPC_MCAM_PERM_DENIED; 1313 1314 return 0; 1315 } 1316 1317 static int npc_mcam_verify_counter(struct npc_mcam *mcam, 1318 u16 pcifunc, int cntr) 1319 { 1320 /* Verify if counter is valid and if it is indeed 1321 * allocated to the requesting PFFUNC. 1322 */ 1323 if (cntr >= mcam->counters.max) 1324 return NPC_MCAM_INVALID_REQ; 1325 1326 if (pcifunc != mcam->cntr2pfvf_map[cntr]) 1327 return NPC_MCAM_PERM_DENIED; 1328 1329 return 0; 1330 } 1331 1332 static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, 1333 int blkaddr, u16 entry, u16 cntr) 1334 { 1335 u16 index = entry & (mcam->banksize - 1); 1336 u16 bank = npc_get_bank(mcam, entry); 1337 1338 /* Set mapping and increment counter's refcnt */ 1339 mcam->entry2cntr_map[entry] = cntr; 1340 mcam->cntr_refcnt[cntr]++; 1341 /* Enable stats */ 1342 rvu_write64(rvu, blkaddr, 1343 NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 1344 BIT_ULL(9) | cntr); 1345 } 1346 1347 static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, 1348 struct npc_mcam *mcam, 1349 int blkaddr, u16 entry, u16 cntr) 1350 { 1351 u16 index = entry & (mcam->banksize - 1); 1352 u16 bank = npc_get_bank(mcam, entry); 1353 1354 /* Remove mapping and reduce counter's refcnt */ 1355 mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP; 1356 mcam->cntr_refcnt[cntr]--; 1357 /* Disable stats */ 1358 rvu_write64(rvu, blkaddr, 1359 NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00); 1360 } 1361 1362 /* Sets MCAM entry in bitmap as used. Update 1363 * reverse bitmap too. Should be called with 1364 * 'mcam->lock' held. 1365 */ 1366 static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index) 1367 { 1368 u16 entry, rentry; 1369 1370 entry = index; 1371 rentry = mcam->bmap_entries - index - 1; 1372 1373 __set_bit(entry, mcam->bmap); 1374 __set_bit(rentry, mcam->bmap_reverse); 1375 mcam->bmap_fcnt--; 1376 } 1377 1378 /* Sets MCAM entry in bitmap as free. Update 1379 * reverse bitmap too. Should be called with 1380 * 'mcam->lock' held. 1381 */ 1382 static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index) 1383 { 1384 u16 entry, rentry; 1385 1386 entry = index; 1387 rentry = mcam->bmap_entries - index - 1; 1388 1389 __clear_bit(entry, mcam->bmap); 1390 __clear_bit(rentry, mcam->bmap_reverse); 1391 mcam->bmap_fcnt++; 1392 } 1393 1394 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 1395 int blkaddr, u16 pcifunc) 1396 { 1397 u16 index, cntr; 1398 1399 /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */ 1400 for (index = 0; index < mcam->bmap_entries; index++) { 1401 if (mcam->entry2pfvf_map[index] == pcifunc) { 1402 mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; 1403 /* Free the entry in bitmap */ 1404 npc_mcam_clear_bit(mcam, index); 1405 /* Disable the entry */ 1406 npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); 1407 1408 /* Update entry2counter mapping */ 1409 cntr = mcam->entry2cntr_map[index]; 1410 if (cntr != NPC_MCAM_INVALID_MAP) 1411 npc_unmap_mcam_entry_and_cntr(rvu, mcam, 1412 blkaddr, index, 1413 cntr); 1414 } 1415 } 1416 } 1417 1418 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 1419 u16 pcifunc) 1420 { 1421 u16 cntr; 1422 1423 /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */ 1424 for (cntr = 0; cntr < mcam->counters.max; cntr++) { 1425 if (mcam->cntr2pfvf_map[cntr] == pcifunc) { 1426 mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; 1427 mcam->cntr_refcnt[cntr] = 0; 1428 rvu_free_rsrc(&mcam->counters, cntr); 1429 /* This API is expected to be called after freeing 1430 * MCAM entries, which inturn will remove 1431 * 'entry to counter' mapping. 1432 * No need to do it again. 1433 */ 1434 } 1435 } 1436 } 1437 1438 /* Find area of contiguous free entries of size 'nr'. 1439 * If not found return max contiguous free entries available. 1440 */ 1441 static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, 1442 u16 nr, u16 *max_area) 1443 { 1444 u16 max_area_start = 0; 1445 u16 index, next, end; 1446 1447 *max_area = 0; 1448 1449 again: 1450 index = find_next_zero_bit(map, size, start); 1451 if (index >= size) 1452 return max_area_start; 1453 1454 end = ((index + nr) >= size) ? size : index + nr; 1455 next = find_next_bit(map, end, index); 1456 if (*max_area < (next - index)) { 1457 *max_area = next - index; 1458 max_area_start = index; 1459 } 1460 1461 if (next < end) { 1462 start = next + 1; 1463 goto again; 1464 } 1465 1466 return max_area_start; 1467 } 1468 1469 /* Find number of free MCAM entries available 1470 * within range i.e in between 'start' and 'end'. 1471 */ 1472 static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) 1473 { 1474 u16 index, next; 1475 u16 fcnt = 0; 1476 1477 again: 1478 if (start >= end) 1479 return fcnt; 1480 1481 index = find_next_zero_bit(map, end, start); 1482 if (index >= end) 1483 return fcnt; 1484 1485 next = find_next_bit(map, end, index); 1486 if (next <= end) { 1487 fcnt += next - index; 1488 start = next + 1; 1489 goto again; 1490 } 1491 1492 fcnt += end - index; 1493 return fcnt; 1494 } 1495 1496 static void 1497 npc_get_mcam_search_range_priority(struct npc_mcam *mcam, 1498 struct npc_mcam_alloc_entry_req *req, 1499 u16 *start, u16 *end, bool *reverse) 1500 { 1501 u16 fcnt; 1502 1503 if (req->priority == NPC_MCAM_HIGHER_PRIO) 1504 goto hprio; 1505 1506 /* For a low priority entry allocation 1507 * - If reference entry is not in hprio zone then 1508 * search range: ref_entry to end. 1509 * - If reference entry is in hprio zone and if 1510 * request can be accomodated in non-hprio zone then 1511 * search range: 'start of middle zone' to 'end' 1512 * - else search in reverse, so that less number of hprio 1513 * zone entries are allocated. 1514 */ 1515 1516 *reverse = false; 1517 *start = req->ref_entry + 1; 1518 *end = mcam->bmap_entries; 1519 1520 if (req->ref_entry >= mcam->hprio_end) 1521 return; 1522 1523 fcnt = npc_mcam_get_free_count(mcam->bmap, 1524 mcam->hprio_end, mcam->bmap_entries); 1525 if (fcnt > req->count) 1526 *start = mcam->hprio_end; 1527 else 1528 *reverse = true; 1529 return; 1530 1531 hprio: 1532 /* For a high priority entry allocation, search is always 1533 * in reverse to preserve hprio zone entries. 1534 * - If reference entry is not in lprio zone then 1535 * search range: 0 to ref_entry. 1536 * - If reference entry is in lprio zone and if 1537 * request can be accomodated in middle zone then 1538 * search range: 'hprio_end' to 'lprio_start' 1539 */ 1540 1541 *reverse = true; 1542 *start = 0; 1543 *end = req->ref_entry; 1544 1545 if (req->ref_entry <= mcam->lprio_start) 1546 return; 1547 1548 fcnt = npc_mcam_get_free_count(mcam->bmap, 1549 mcam->hprio_end, mcam->lprio_start); 1550 if (fcnt < req->count) 1551 return; 1552 *start = mcam->hprio_end; 1553 *end = mcam->lprio_start; 1554 } 1555 1556 static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, 1557 struct npc_mcam_alloc_entry_req *req, 1558 struct npc_mcam_alloc_entry_rsp *rsp) 1559 { 1560 u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; 1561 u16 fcnt, hp_fcnt, lp_fcnt; 1562 u16 start, end, index; 1563 int entry, next_start; 1564 bool reverse = false; 1565 unsigned long *bmap; 1566 u16 max_contig; 1567 1568 mutex_lock(&mcam->lock); 1569 1570 /* Check if there are any free entries */ 1571 if (!mcam->bmap_fcnt) { 1572 mutex_unlock(&mcam->lock); 1573 return NPC_MCAM_ALLOC_FAILED; 1574 } 1575 1576 /* MCAM entries are divided into high priority, middle and 1577 * low priority zones. Idea is to not allocate top and lower 1578 * most entries as much as possible, this is to increase 1579 * probability of honouring priority allocation requests. 1580 * 1581 * Two bitmaps are used for mcam entry management, 1582 * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'. 1583 * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'. 1584 * 1585 * Reverse bitmap is used to allocate entries 1586 * - when a higher priority entry is requested 1587 * - when available free entries are less. 1588 * Lower priority ones out of avaialble free entries are always 1589 * chosen when 'high vs low' question arises. 1590 */ 1591 1592 /* Get the search range for priority allocation request */ 1593 if (req->priority) { 1594 npc_get_mcam_search_range_priority(mcam, req, 1595 &start, &end, &reverse); 1596 goto alloc; 1597 } 1598 1599 /* Find out the search range for non-priority allocation request 1600 * 1601 * Get MCAM free entry count in middle zone. 1602 */ 1603 lp_fcnt = npc_mcam_get_free_count(mcam->bmap, 1604 mcam->lprio_start, 1605 mcam->bmap_entries); 1606 hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end); 1607 fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt; 1608 1609 /* Check if request can be accomodated in the middle zone */ 1610 if (fcnt > req->count) { 1611 start = mcam->hprio_end; 1612 end = mcam->lprio_start; 1613 } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) { 1614 /* Expand search zone from half of hprio zone to 1615 * half of lprio zone. 1616 */ 1617 start = mcam->hprio_end / 2; 1618 end = mcam->bmap_entries - (mcam->lprio_count / 2); 1619 reverse = true; 1620 } else { 1621 /* Not enough free entries, search all entries in reverse, 1622 * so that low priority ones will get used up. 1623 */ 1624 reverse = true; 1625 start = 0; 1626 end = mcam->bmap_entries; 1627 } 1628 1629 alloc: 1630 if (reverse) { 1631 bmap = mcam->bmap_reverse; 1632 start = mcam->bmap_entries - start; 1633 end = mcam->bmap_entries - end; 1634 index = start; 1635 start = end; 1636 end = index; 1637 } else { 1638 bmap = mcam->bmap; 1639 } 1640 1641 if (req->contig) { 1642 /* Allocate requested number of contiguous entries, if 1643 * unsuccessful find max contiguous entries available. 1644 */ 1645 index = npc_mcam_find_zero_area(bmap, end, start, 1646 req->count, &max_contig); 1647 rsp->count = max_contig; 1648 if (reverse) 1649 rsp->entry = mcam->bmap_entries - index - max_contig; 1650 else 1651 rsp->entry = index; 1652 } else { 1653 /* Allocate requested number of non-contiguous entries, 1654 * if unsuccessful allocate as many as possible. 1655 */ 1656 rsp->count = 0; 1657 next_start = start; 1658 for (entry = 0; entry < req->count; entry++) { 1659 index = find_next_zero_bit(bmap, end, next_start); 1660 if (index >= end) 1661 break; 1662 1663 next_start = start + (index - start) + 1; 1664 1665 /* Save the entry's index */ 1666 if (reverse) 1667 index = mcam->bmap_entries - index - 1; 1668 entry_list[entry] = index; 1669 rsp->count++; 1670 } 1671 } 1672 1673 /* If allocating requested no of entries is unsucessful, 1674 * expand the search range to full bitmap length and retry. 1675 */ 1676 if (!req->priority && (rsp->count < req->count) && 1677 ((end - start) != mcam->bmap_entries)) { 1678 reverse = true; 1679 start = 0; 1680 end = mcam->bmap_entries; 1681 goto alloc; 1682 } 1683 1684 /* For priority entry allocation requests, if allocation is 1685 * failed then expand search to max possible range and retry. 1686 */ 1687 if (req->priority && rsp->count < req->count) { 1688 if (req->priority == NPC_MCAM_LOWER_PRIO && 1689 (start != (req->ref_entry + 1))) { 1690 start = req->ref_entry + 1; 1691 end = mcam->bmap_entries; 1692 reverse = false; 1693 goto alloc; 1694 } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) && 1695 ((end - start) != req->ref_entry)) { 1696 start = 0; 1697 end = req->ref_entry; 1698 reverse = true; 1699 goto alloc; 1700 } 1701 } 1702 1703 /* Copy MCAM entry indices into mbox response entry_list. 1704 * Requester always expects indices in ascending order, so 1705 * so reverse the list if reverse bitmap is used for allocation. 1706 */ 1707 if (!req->contig && rsp->count) { 1708 index = 0; 1709 for (entry = rsp->count - 1; entry >= 0; entry--) { 1710 if (reverse) 1711 rsp->entry_list[index++] = entry_list[entry]; 1712 else 1713 rsp->entry_list[entry] = entry_list[entry]; 1714 } 1715 } 1716 1717 /* Mark the allocated entries as used and set nixlf mapping */ 1718 for (entry = 0; entry < rsp->count; entry++) { 1719 index = req->contig ? 1720 (rsp->entry + entry) : rsp->entry_list[entry]; 1721 npc_mcam_set_bit(mcam, index); 1722 mcam->entry2pfvf_map[index] = pcifunc; 1723 mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; 1724 } 1725 1726 /* Update available free count in mbox response */ 1727 rsp->free_count = mcam->bmap_fcnt; 1728 1729 mutex_unlock(&mcam->lock); 1730 return 0; 1731 } 1732 1733 int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, 1734 struct npc_mcam_alloc_entry_req *req, 1735 struct npc_mcam_alloc_entry_rsp *rsp) 1736 { 1737 struct npc_mcam *mcam = &rvu->hw->mcam; 1738 u16 pcifunc = req->hdr.pcifunc; 1739 int blkaddr; 1740 1741 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1742 if (blkaddr < 0) 1743 return NPC_MCAM_INVALID_REQ; 1744 1745 rsp->entry = NPC_MCAM_ENTRY_INVALID; 1746 rsp->free_count = 0; 1747 1748 /* Check if ref_entry is within range */ 1749 if (req->priority && req->ref_entry >= mcam->bmap_entries) 1750 return NPC_MCAM_INVALID_REQ; 1751 1752 /* ref_entry can't be '0' if requested priority is high. 1753 * Can't be last entry if requested priority is low. 1754 */ 1755 if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) || 1756 ((req->ref_entry == (mcam->bmap_entries - 1)) && 1757 req->priority == NPC_MCAM_LOWER_PRIO)) 1758 return NPC_MCAM_INVALID_REQ; 1759 1760 /* Since list of allocated indices needs to be sent to requester, 1761 * max number of non-contiguous entries per mbox msg is limited. 1762 */ 1763 if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) 1764 return NPC_MCAM_INVALID_REQ; 1765 1766 /* Alloc request from PFFUNC with no NIXLF attached should be denied */ 1767 if (!is_nixlf_attached(rvu, pcifunc)) 1768 return NPC_MCAM_ALLOC_DENIED; 1769 1770 return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); 1771 } 1772 1773 int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, 1774 struct npc_mcam_free_entry_req *req, 1775 struct msg_rsp *rsp) 1776 { 1777 struct npc_mcam *mcam = &rvu->hw->mcam; 1778 u16 pcifunc = req->hdr.pcifunc; 1779 int blkaddr, rc = 0; 1780 u16 cntr; 1781 1782 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1783 if (blkaddr < 0) 1784 return NPC_MCAM_INVALID_REQ; 1785 1786 /* Free request from PFFUNC with no NIXLF attached, ignore */ 1787 if (!is_nixlf_attached(rvu, pcifunc)) 1788 return NPC_MCAM_INVALID_REQ; 1789 1790 mutex_lock(&mcam->lock); 1791 1792 if (req->all) 1793 goto free_all; 1794 1795 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 1796 if (rc) 1797 goto exit; 1798 1799 mcam->entry2pfvf_map[req->entry] = 0; 1800 npc_mcam_clear_bit(mcam, req->entry); 1801 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); 1802 1803 /* Update entry2counter mapping */ 1804 cntr = mcam->entry2cntr_map[req->entry]; 1805 if (cntr != NPC_MCAM_INVALID_MAP) 1806 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 1807 req->entry, cntr); 1808 1809 goto exit; 1810 1811 free_all: 1812 /* Free up all entries allocated to requesting PFFUNC */ 1813 npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); 1814 exit: 1815 mutex_unlock(&mcam->lock); 1816 return rc; 1817 } 1818 1819 int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, 1820 struct npc_mcam_write_entry_req *req, 1821 struct msg_rsp *rsp) 1822 { 1823 struct npc_mcam *mcam = &rvu->hw->mcam; 1824 u16 pcifunc = req->hdr.pcifunc; 1825 int blkaddr, rc; 1826 1827 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1828 if (blkaddr < 0) 1829 return NPC_MCAM_INVALID_REQ; 1830 1831 mutex_lock(&mcam->lock); 1832 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 1833 if (rc) 1834 goto exit; 1835 1836 if (req->set_cntr && 1837 npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) { 1838 rc = NPC_MCAM_INVALID_REQ; 1839 goto exit; 1840 } 1841 1842 if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) { 1843 rc = NPC_MCAM_INVALID_REQ; 1844 goto exit; 1845 } 1846 1847 npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf, 1848 &req->entry_data, req->enable_entry); 1849 1850 if (req->set_cntr) 1851 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 1852 req->entry, req->cntr); 1853 1854 rc = 0; 1855 exit: 1856 mutex_unlock(&mcam->lock); 1857 return rc; 1858 } 1859 1860 int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, 1861 struct npc_mcam_ena_dis_entry_req *req, 1862 struct msg_rsp *rsp) 1863 { 1864 struct npc_mcam *mcam = &rvu->hw->mcam; 1865 u16 pcifunc = req->hdr.pcifunc; 1866 int blkaddr, rc; 1867 1868 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1869 if (blkaddr < 0) 1870 return NPC_MCAM_INVALID_REQ; 1871 1872 mutex_lock(&mcam->lock); 1873 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 1874 mutex_unlock(&mcam->lock); 1875 if (rc) 1876 return rc; 1877 1878 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true); 1879 1880 return 0; 1881 } 1882 1883 int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, 1884 struct npc_mcam_ena_dis_entry_req *req, 1885 struct msg_rsp *rsp) 1886 { 1887 struct npc_mcam *mcam = &rvu->hw->mcam; 1888 u16 pcifunc = req->hdr.pcifunc; 1889 int blkaddr, rc; 1890 1891 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1892 if (blkaddr < 0) 1893 return NPC_MCAM_INVALID_REQ; 1894 1895 mutex_lock(&mcam->lock); 1896 rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); 1897 mutex_unlock(&mcam->lock); 1898 if (rc) 1899 return rc; 1900 1901 npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); 1902 1903 return 0; 1904 } 1905 1906 int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, 1907 struct npc_mcam_shift_entry_req *req, 1908 struct npc_mcam_shift_entry_rsp *rsp) 1909 { 1910 struct npc_mcam *mcam = &rvu->hw->mcam; 1911 u16 pcifunc = req->hdr.pcifunc; 1912 u16 old_entry, new_entry; 1913 u16 index, cntr; 1914 int blkaddr, rc; 1915 1916 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1917 if (blkaddr < 0) 1918 return NPC_MCAM_INVALID_REQ; 1919 1920 if (req->shift_count > NPC_MCAM_MAX_SHIFTS) 1921 return NPC_MCAM_INVALID_REQ; 1922 1923 mutex_lock(&mcam->lock); 1924 for (index = 0; index < req->shift_count; index++) { 1925 old_entry = req->curr_entry[index]; 1926 new_entry = req->new_entry[index]; 1927 1928 /* Check if both old and new entries are valid and 1929 * does belong to this PFFUNC or not. 1930 */ 1931 rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry); 1932 if (rc) 1933 break; 1934 1935 rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry); 1936 if (rc) 1937 break; 1938 1939 /* new_entry should not have a counter mapped */ 1940 if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) { 1941 rc = NPC_MCAM_PERM_DENIED; 1942 break; 1943 } 1944 1945 /* Disable the new_entry */ 1946 npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false); 1947 1948 /* Copy rule from old entry to new entry */ 1949 npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry); 1950 1951 /* Copy counter mapping, if any */ 1952 cntr = mcam->entry2cntr_map[old_entry]; 1953 if (cntr != NPC_MCAM_INVALID_MAP) { 1954 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 1955 old_entry, cntr); 1956 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, 1957 new_entry, cntr); 1958 } 1959 1960 /* Enable new_entry and disable old_entry */ 1961 npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true); 1962 npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false); 1963 } 1964 1965 /* If shift has failed then report the failed index */ 1966 if (index != req->shift_count) { 1967 rc = NPC_MCAM_PERM_DENIED; 1968 rsp->failed_entry_idx = index; 1969 } 1970 1971 mutex_unlock(&mcam->lock); 1972 return rc; 1973 } 1974 1975 int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, 1976 struct npc_mcam_alloc_counter_req *req, 1977 struct npc_mcam_alloc_counter_rsp *rsp) 1978 { 1979 struct npc_mcam *mcam = &rvu->hw->mcam; 1980 u16 pcifunc = req->hdr.pcifunc; 1981 u16 max_contig, cntr; 1982 int blkaddr, index; 1983 1984 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1985 if (blkaddr < 0) 1986 return NPC_MCAM_INVALID_REQ; 1987 1988 /* If the request is from a PFFUNC with no NIXLF attached, ignore */ 1989 if (!is_nixlf_attached(rvu, pcifunc)) 1990 return NPC_MCAM_INVALID_REQ; 1991 1992 /* Since list of allocated counter IDs needs to be sent to requester, 1993 * max number of non-contiguous counters per mbox msg is limited. 1994 */ 1995 if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS) 1996 return NPC_MCAM_INVALID_REQ; 1997 1998 mutex_lock(&mcam->lock); 1999 2000 /* Check if unused counters are available or not */ 2001 if (!rvu_rsrc_free_count(&mcam->counters)) { 2002 mutex_unlock(&mcam->lock); 2003 return NPC_MCAM_ALLOC_FAILED; 2004 } 2005 2006 rsp->count = 0; 2007 2008 if (req->contig) { 2009 /* Allocate requested number of contiguous counters, if 2010 * unsuccessful find max contiguous entries available. 2011 */ 2012 index = npc_mcam_find_zero_area(mcam->counters.bmap, 2013 mcam->counters.max, 0, 2014 req->count, &max_contig); 2015 rsp->count = max_contig; 2016 rsp->cntr = index; 2017 for (cntr = index; cntr < (index + max_contig); cntr++) { 2018 __set_bit(cntr, mcam->counters.bmap); 2019 mcam->cntr2pfvf_map[cntr] = pcifunc; 2020 } 2021 } else { 2022 /* Allocate requested number of non-contiguous counters, 2023 * if unsuccessful allocate as many as possible. 2024 */ 2025 for (cntr = 0; cntr < req->count; cntr++) { 2026 index = rvu_alloc_rsrc(&mcam->counters); 2027 if (index < 0) 2028 break; 2029 rsp->cntr_list[cntr] = index; 2030 rsp->count++; 2031 mcam->cntr2pfvf_map[index] = pcifunc; 2032 } 2033 } 2034 2035 mutex_unlock(&mcam->lock); 2036 return 0; 2037 } 2038 2039 int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, 2040 struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) 2041 { 2042 struct npc_mcam *mcam = &rvu->hw->mcam; 2043 u16 index, entry = 0; 2044 int blkaddr, err; 2045 2046 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2047 if (blkaddr < 0) 2048 return NPC_MCAM_INVALID_REQ; 2049 2050 mutex_lock(&mcam->lock); 2051 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2052 if (err) { 2053 mutex_unlock(&mcam->lock); 2054 return err; 2055 } 2056 2057 /* Mark counter as free/unused */ 2058 mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP; 2059 rvu_free_rsrc(&mcam->counters, req->cntr); 2060 2061 /* Disable all MCAM entry's stats which are using this counter */ 2062 while (entry < mcam->bmap_entries) { 2063 if (!mcam->cntr_refcnt[req->cntr]) 2064 break; 2065 2066 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 2067 if (index >= mcam->bmap_entries) 2068 break; 2069 if (mcam->entry2cntr_map[index] != req->cntr) 2070 continue; 2071 2072 entry = index + 1; 2073 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2074 index, req->cntr); 2075 } 2076 2077 mutex_unlock(&mcam->lock); 2078 return 0; 2079 } 2080 2081 int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, 2082 struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp) 2083 { 2084 struct npc_mcam *mcam = &rvu->hw->mcam; 2085 u16 index, entry = 0; 2086 int blkaddr, rc; 2087 2088 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2089 if (blkaddr < 0) 2090 return NPC_MCAM_INVALID_REQ; 2091 2092 mutex_lock(&mcam->lock); 2093 rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2094 if (rc) 2095 goto exit; 2096 2097 /* Unmap the MCAM entry and counter */ 2098 if (!req->all) { 2099 rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry); 2100 if (rc) 2101 goto exit; 2102 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2103 req->entry, req->cntr); 2104 goto exit; 2105 } 2106 2107 /* Disable all MCAM entry's stats which are using this counter */ 2108 while (entry < mcam->bmap_entries) { 2109 if (!mcam->cntr_refcnt[req->cntr]) 2110 break; 2111 2112 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 2113 if (index >= mcam->bmap_entries) 2114 break; 2115 if (mcam->entry2cntr_map[index] != req->cntr) 2116 continue; 2117 2118 entry = index + 1; 2119 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2120 index, req->cntr); 2121 } 2122 exit: 2123 mutex_unlock(&mcam->lock); 2124 return rc; 2125 } 2126 2127 int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, 2128 struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) 2129 { 2130 struct npc_mcam *mcam = &rvu->hw->mcam; 2131 int blkaddr, err; 2132 2133 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2134 if (blkaddr < 0) 2135 return NPC_MCAM_INVALID_REQ; 2136 2137 mutex_lock(&mcam->lock); 2138 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2139 mutex_unlock(&mcam->lock); 2140 if (err) 2141 return err; 2142 2143 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00); 2144 2145 return 0; 2146 } 2147 2148 int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, 2149 struct npc_mcam_oper_counter_req *req, 2150 struct npc_mcam_oper_counter_rsp *rsp) 2151 { 2152 struct npc_mcam *mcam = &rvu->hw->mcam; 2153 int blkaddr, err; 2154 2155 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2156 if (blkaddr < 0) 2157 return NPC_MCAM_INVALID_REQ; 2158 2159 mutex_lock(&mcam->lock); 2160 err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); 2161 mutex_unlock(&mcam->lock); 2162 if (err) 2163 return err; 2164 2165 rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr)); 2166 rsp->stat &= BIT_ULL(48) - 1; 2167 2168 return 0; 2169 } 2170 2171 int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, 2172 struct npc_mcam_alloc_and_write_entry_req *req, 2173 struct npc_mcam_alloc_and_write_entry_rsp *rsp) 2174 { 2175 struct npc_mcam_alloc_counter_req cntr_req; 2176 struct npc_mcam_alloc_counter_rsp cntr_rsp; 2177 struct npc_mcam_alloc_entry_req entry_req; 2178 struct npc_mcam_alloc_entry_rsp entry_rsp; 2179 struct npc_mcam *mcam = &rvu->hw->mcam; 2180 u16 entry = NPC_MCAM_ENTRY_INVALID; 2181 u16 cntr = NPC_MCAM_ENTRY_INVALID; 2182 int blkaddr, rc; 2183 2184 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2185 if (blkaddr < 0) 2186 return NPC_MCAM_INVALID_REQ; 2187 2188 if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) 2189 return NPC_MCAM_INVALID_REQ; 2190 2191 /* Try to allocate a MCAM entry */ 2192 entry_req.hdr.pcifunc = req->hdr.pcifunc; 2193 entry_req.contig = true; 2194 entry_req.priority = req->priority; 2195 entry_req.ref_entry = req->ref_entry; 2196 entry_req.count = 1; 2197 2198 rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, 2199 &entry_req, &entry_rsp); 2200 if (rc) 2201 return rc; 2202 2203 if (!entry_rsp.count) 2204 return NPC_MCAM_ALLOC_FAILED; 2205 2206 entry = entry_rsp.entry; 2207 2208 if (!req->alloc_cntr) 2209 goto write_entry; 2210 2211 /* Now allocate counter */ 2212 cntr_req.hdr.pcifunc = req->hdr.pcifunc; 2213 cntr_req.contig = true; 2214 cntr_req.count = 1; 2215 2216 rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); 2217 if (rc) { 2218 /* Free allocated MCAM entry */ 2219 mutex_lock(&mcam->lock); 2220 mcam->entry2pfvf_map[entry] = 0; 2221 npc_mcam_clear_bit(mcam, entry); 2222 mutex_unlock(&mcam->lock); 2223 return rc; 2224 } 2225 2226 cntr = cntr_rsp.cntr; 2227 2228 write_entry: 2229 mutex_lock(&mcam->lock); 2230 npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf, 2231 &req->entry_data, req->enable_entry); 2232 2233 if (req->alloc_cntr) 2234 npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr); 2235 mutex_unlock(&mcam->lock); 2236 2237 rsp->entry = entry; 2238 rsp->cntr = cntr; 2239 2240 return 0; 2241 } 2242 2243 #define GET_KEX_CFG(intf) \ 2244 rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf)) 2245 2246 #define GET_KEX_FLAGS(ld) \ 2247 rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld)) 2248 2249 #define GET_KEX_LD(intf, lid, lt, ld) \ 2250 rvu_read64(rvu, BLKADDR_NPC, \ 2251 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)) 2252 2253 #define GET_KEX_LDFLAGS(intf, ld, fl) \ 2254 rvu_read64(rvu, BLKADDR_NPC, \ 2255 NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl)) 2256 2257 int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, 2258 struct npc_get_kex_cfg_rsp *rsp) 2259 { 2260 int lid, lt, ld, fl; 2261 2262 rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX); 2263 rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX); 2264 for (lid = 0; lid < NPC_MAX_LID; lid++) { 2265 for (lt = 0; lt < NPC_MAX_LT; lt++) { 2266 for (ld = 0; ld < NPC_MAX_LD; ld++) { 2267 rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] = 2268 GET_KEX_LD(NIX_INTF_RX, lid, lt, ld); 2269 rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] = 2270 GET_KEX_LD(NIX_INTF_TX, lid, lt, ld); 2271 } 2272 } 2273 } 2274 for (ld = 0; ld < NPC_MAX_LD; ld++) 2275 rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld); 2276 2277 for (ld = 0; ld < NPC_MAX_LD; ld++) { 2278 for (fl = 0; fl < NPC_MAX_LFL; fl++) { 2279 rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] = 2280 GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl); 2281 rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] = 2282 GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl); 2283 } 2284 } 2285 memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN); 2286 return 0; 2287 } 2288 2289 int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf) 2290 { 2291 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 2292 struct npc_mcam *mcam = &rvu->hw->mcam; 2293 int blkaddr, index; 2294 bool enable; 2295 2296 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2297 if (blkaddr < 0) 2298 return NIX_AF_ERR_AF_LF_INVALID; 2299 2300 if (!pfvf->rxvlan) 2301 return 0; 2302 2303 index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, 2304 NIXLF_UCAST_ENTRY); 2305 pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index); 2306 enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index); 2307 npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index, 2308 NIX_INTF_RX, &pfvf->entry, enable); 2309 2310 return 0; 2311 } 2312