1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 11 #include "rvu_struct.h" 12 #include "rvu_reg.h" 13 #include "rvu.h" 14 #include "npc.h" 15 #include "mcs.h" 16 #include "cgx.h" 17 #include "lmac_common.h" 18 #include "rvu_npc_hash.h" 19 20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 22 int type, int chan_id); 23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 24 int type, bool add); 25 static int nix_setup_ipolicers(struct rvu *rvu, 26 struct nix_hw *nix_hw, int blkaddr); 27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); 28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 29 struct nix_hw *nix_hw, u16 pcifunc); 30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); 31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 32 u32 leaf_prof); 33 static const char *nix_get_ctx_name(int ctype); 34 35 enum mc_tbl_sz { 36 MC_TBL_SZ_256, 37 MC_TBL_SZ_512, 38 MC_TBL_SZ_1K, 39 MC_TBL_SZ_2K, 40 MC_TBL_SZ_4K, 41 MC_TBL_SZ_8K, 42 MC_TBL_SZ_16K, 43 MC_TBL_SZ_32K, 44 MC_TBL_SZ_64K, 45 }; 46 47 enum mc_buf_cnt { 48 MC_BUF_CNT_8, 49 MC_BUF_CNT_16, 50 MC_BUF_CNT_32, 51 MC_BUF_CNT_64, 52 MC_BUF_CNT_128, 53 MC_BUF_CNT_256, 54 MC_BUF_CNT_512, 55 MC_BUF_CNT_1024, 56 MC_BUF_CNT_2048, 57 }; 58 59 enum nix_makr_fmt_indexes { 60 NIX_MARK_CFG_IP_DSCP_RED, 61 NIX_MARK_CFG_IP_DSCP_YELLOW, 62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 63 NIX_MARK_CFG_IP_ECN_RED, 64 NIX_MARK_CFG_IP_ECN_YELLOW, 65 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 66 NIX_MARK_CFG_VLAN_DEI_RED, 67 NIX_MARK_CFG_VLAN_DEI_YELLOW, 68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 69 NIX_MARK_CFG_MAX, 70 }; 71 72 /* For now considering MC resources needed for broadcast 73 * pkt replication only. i.e 256 HWVFs + 12 PFs. 74 */ 75 #define MC_TBL_SIZE MC_TBL_SZ_512 76 #define MC_BUF_CNT MC_BUF_CNT_128 77 78 struct mce { 79 struct hlist_node node; 80 u16 pcifunc; 81 }; 82 83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 84 { 85 int i = 0; 86 87 /*If blkaddr is 0, return the first nix block address*/ 88 if (blkaddr == 0) 89 return rvu->nix_blkaddr[blkaddr]; 90 91 while (i + 1 < MAX_NIX_BLKS) { 92 if (rvu->nix_blkaddr[i] == blkaddr) 93 return rvu->nix_blkaddr[i + 1]; 94 i++; 95 } 96 97 return 0; 98 } 99 100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 101 { 102 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 103 int blkaddr; 104 105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 106 if (!pfvf->nixlf || blkaddr < 0) 107 return false; 108 return true; 109 } 110 111 int rvu_get_nixlf_count(struct rvu *rvu) 112 { 113 int blkaddr = 0, max = 0; 114 struct rvu_block *block; 115 116 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 117 while (blkaddr) { 118 block = &rvu->hw->block[blkaddr]; 119 max += block->lf.max; 120 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 121 } 122 return max; 123 } 124 125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 126 { 127 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 128 struct rvu_hwinfo *hw = rvu->hw; 129 int blkaddr; 130 131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 132 if (!pfvf->nixlf || blkaddr < 0) 133 return NIX_AF_ERR_AF_LF_INVALID; 134 135 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 136 if (*nixlf < 0) 137 return NIX_AF_ERR_AF_LF_INVALID; 138 139 if (nix_blkaddr) 140 *nix_blkaddr = blkaddr; 141 142 return 0; 143 } 144 145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, 146 struct nix_hw **nix_hw, int *blkaddr) 147 { 148 struct rvu_pfvf *pfvf; 149 150 pfvf = rvu_get_pfvf(rvu, pcifunc); 151 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 152 if (!pfvf->nixlf || *blkaddr < 0) 153 return NIX_AF_ERR_AF_LF_INVALID; 154 155 *nix_hw = get_nix_hw(rvu->hw, *blkaddr); 156 if (!*nix_hw) 157 return NIX_AF_ERR_INVALID_NIXBLK; 158 return 0; 159 } 160 161 static void nix_mce_list_init(struct nix_mce_list *list, int max) 162 { 163 INIT_HLIST_HEAD(&list->head); 164 list->count = 0; 165 list->max = max; 166 } 167 168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) 169 { 170 int idx; 171 172 if (!mcast) 173 return 0; 174 175 idx = mcast->next_free_mce; 176 mcast->next_free_mce += count; 177 return idx; 178 } 179 180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 181 { 182 int nix_blkaddr = 0, i = 0; 183 struct rvu *rvu = hw->rvu; 184 185 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 186 while (nix_blkaddr) { 187 if (blkaddr == nix_blkaddr && hw->nix) 188 return &hw->nix[i]; 189 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 190 i++; 191 } 192 return NULL; 193 } 194 195 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type) 196 { 197 if (hw->cap.nix_multiple_dwrr_mtu) 198 return NIX_AF_DWRR_MTUX(smq_link_type); 199 200 if (smq_link_type == SMQ_LINK_TYPE_SDP) 201 return NIX_AF_DWRR_SDP_MTU; 202 203 /* Here it's same reg for RPM and LBK */ 204 return NIX_AF_DWRR_RPM_MTU; 205 } 206 207 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) 208 { 209 dwrr_mtu &= 0x1FULL; 210 211 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 212 * Value of 4 is reserved for MTU value of 9728 bytes. 213 * Value of 5 is reserved for MTU value of 10240 bytes. 214 */ 215 switch (dwrr_mtu) { 216 case 4: 217 return 9728; 218 case 5: 219 return 10240; 220 default: 221 return BIT_ULL(dwrr_mtu); 222 } 223 224 return 0; 225 } 226 227 u32 convert_bytes_to_dwrr_mtu(u32 bytes) 228 { 229 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 230 * Value of 4 is reserved for MTU value of 9728 bytes. 231 * Value of 5 is reserved for MTU value of 10240 bytes. 232 */ 233 if (bytes > BIT_ULL(16)) 234 return 0; 235 236 switch (bytes) { 237 case 9728: 238 return 4; 239 case 10240: 240 return 5; 241 default: 242 return ilog2(bytes); 243 } 244 245 return 0; 246 } 247 248 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 249 { 250 int err; 251 252 /* Sync all in flight RX packets to LLC/DRAM */ 253 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 254 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 255 if (err) 256 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); 257 258 /* SW_SYNC ensures all existing transactions are finished and pkts 259 * are written to LLC/DRAM, queues should be teared down after 260 * successful SW_SYNC. Due to a HW errata, in some rare scenarios 261 * an existing transaction might end after SW_SYNC operation. To 262 * ensure operation is fully done, do the SW_SYNC twice. 263 */ 264 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 265 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 266 if (err) 267 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); 268 } 269 270 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 271 int lvl, u16 pcifunc, u16 schq) 272 { 273 struct rvu_hwinfo *hw = rvu->hw; 274 struct nix_txsch *txsch; 275 struct nix_hw *nix_hw; 276 u16 map_func; 277 278 nix_hw = get_nix_hw(rvu->hw, blkaddr); 279 if (!nix_hw) 280 return false; 281 282 txsch = &nix_hw->txsch[lvl]; 283 /* Check out of bounds */ 284 if (schq >= txsch->schq.max) 285 return false; 286 287 mutex_lock(&rvu->rsrc_lock); 288 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 289 mutex_unlock(&rvu->rsrc_lock); 290 291 /* TLs aggegating traffic are shared across PF and VFs */ 292 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 293 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 294 return false; 295 else 296 return true; 297 } 298 299 if (map_func != pcifunc) 300 return false; 301 302 return true; 303 } 304 305 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, 306 struct nix_lf_alloc_rsp *rsp, bool loop) 307 { 308 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); 309 u16 req_chan_base, req_chan_end, req_chan_cnt; 310 struct rvu_hwinfo *hw = rvu->hw; 311 struct sdp_node_info *sdp_info; 312 int pkind, pf, vf, lbkid, vfid; 313 u8 cgx_id, lmac_id; 314 bool from_vf; 315 int err; 316 317 pf = rvu_get_pf(pcifunc); 318 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 319 type != NIX_INTF_TYPE_SDP) 320 return 0; 321 322 switch (type) { 323 case NIX_INTF_TYPE_CGX: 324 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 325 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 326 327 pkind = rvu_npc_get_pkind(rvu, pf); 328 if (pkind < 0) { 329 dev_err(rvu->dev, 330 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 331 return -EINVAL; 332 } 333 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 334 pfvf->tx_chan_base = pfvf->rx_chan_base; 335 pfvf->rx_chan_cnt = 1; 336 pfvf->tx_chan_cnt = 1; 337 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; 338 339 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 340 rvu_npc_set_pkind(rvu, pkind, pfvf); 341 342 break; 343 case NIX_INTF_TYPE_LBK: 344 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 345 346 /* If NIX1 block is present on the silicon then NIXes are 347 * assigned alternatively for lbk interfaces. NIX0 should 348 * send packets on lbk link 1 channels and NIX1 should send 349 * on lbk link 0 channels for the communication between 350 * NIX0 and NIX1. 351 */ 352 lbkid = 0; 353 if (rvu->hw->lbk_links > 1) 354 lbkid = vf & 0x1 ? 0 : 1; 355 356 /* By default NIX0 is configured to send packet on lbk link 1 357 * (which corresponds to LBK1), same packet will receive on 358 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 359 * (which corresponds to LBK2) packet will receive on NIX0 lbk 360 * link 1. 361 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 362 * transmits and receives on lbk link 0, whick corresponds 363 * to LBK1 block, back to back connectivity between NIX and 364 * LBK can be achieved (which is similar to 96xx) 365 * 366 * RX TX 367 * NIX0 lbk link 1 (LBK2) 1 (LBK1) 368 * NIX0 lbk link 0 (LBK0) 0 (LBK0) 369 * NIX1 lbk link 0 (LBK1) 0 (LBK2) 370 * NIX1 lbk link 1 (LBK3) 1 (LBK3) 371 */ 372 if (loop) 373 lbkid = !lbkid; 374 375 /* Note that AF's VFs work in pairs and talk over consecutive 376 * loopback channels.Therefore if odd number of AF VFs are 377 * enabled then the last VF remains with no pair. 378 */ 379 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 380 pfvf->tx_chan_base = vf & 0x1 ? 381 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 382 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 383 pfvf->rx_chan_cnt = 1; 384 pfvf->tx_chan_cnt = 1; 385 rsp->tx_link = hw->cgx_links + lbkid; 386 pfvf->lbkid = lbkid; 387 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); 388 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 389 pfvf->rx_chan_base, 390 pfvf->rx_chan_cnt); 391 392 break; 393 case NIX_INTF_TYPE_SDP: 394 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 395 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; 396 sdp_info = parent_pf->sdp_info; 397 if (!sdp_info) { 398 dev_err(rvu->dev, "Invalid sdp_info pointer\n"); 399 return -EINVAL; 400 } 401 if (from_vf) { 402 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + 403 sdp_info->num_pf_rings; 404 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 405 for (vfid = 0; vfid < vf; vfid++) 406 req_chan_base += sdp_info->vf_rings[vfid]; 407 req_chan_cnt = sdp_info->vf_rings[vf]; 408 req_chan_end = req_chan_base + req_chan_cnt - 1; 409 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || 410 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { 411 dev_err(rvu->dev, 412 "PF_Func 0x%x: Invalid channel base and count\n", 413 pcifunc); 414 return -EINVAL; 415 } 416 } else { 417 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; 418 req_chan_cnt = sdp_info->num_pf_rings; 419 } 420 421 pfvf->rx_chan_base = req_chan_base; 422 pfvf->rx_chan_cnt = req_chan_cnt; 423 pfvf->tx_chan_base = pfvf->rx_chan_base; 424 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; 425 426 rsp->tx_link = hw->cgx_links + hw->lbk_links; 427 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 428 pfvf->rx_chan_base, 429 pfvf->rx_chan_cnt); 430 break; 431 } 432 433 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 434 * RVU PF/VF's MAC address. 435 */ 436 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 437 pfvf->rx_chan_base, pfvf->mac_addr); 438 439 /* Add this PF_FUNC to bcast pkt replication list */ 440 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); 441 if (err) { 442 dev_err(rvu->dev, 443 "Bcast list, failed to enable PF_FUNC 0x%x\n", 444 pcifunc); 445 return err; 446 } 447 /* Install MCAM rule matching Ethernet broadcast mac address */ 448 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 449 nixlf, pfvf->rx_chan_base); 450 451 pfvf->maxlen = NIC_HW_MIN_FRS; 452 pfvf->minlen = NIC_HW_MIN_FRS; 453 454 return 0; 455 } 456 457 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 458 { 459 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 460 int err; 461 462 pfvf->maxlen = 0; 463 pfvf->minlen = 0; 464 465 /* Remove this PF_FUNC from bcast pkt replication list */ 466 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); 467 if (err) { 468 dev_err(rvu->dev, 469 "Bcast list, failed to disable PF_FUNC 0x%x\n", 470 pcifunc); 471 } 472 473 /* Free and disable any MCAM entries used by this NIX LF */ 474 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 475 476 /* Disable DMAC filters used */ 477 rvu_cgx_disable_dmac_entries(rvu, pcifunc); 478 } 479 480 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 481 struct nix_bp_cfg_req *req, 482 struct msg_rsp *rsp) 483 { 484 u16 pcifunc = req->hdr.pcifunc; 485 struct rvu_pfvf *pfvf; 486 int blkaddr, pf, type; 487 u16 chan_base, chan; 488 u64 cfg; 489 490 pf = rvu_get_pf(pcifunc); 491 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 492 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 493 return 0; 494 495 pfvf = rvu_get_pfvf(rvu, pcifunc); 496 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 497 498 chan_base = pfvf->rx_chan_base + req->chan_base; 499 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 500 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 501 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 502 cfg & ~BIT_ULL(16)); 503 } 504 return 0; 505 } 506 507 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 508 int type, int chan_id) 509 { 510 int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; 511 u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt; 512 struct rvu_hwinfo *hw = rvu->hw; 513 struct rvu_pfvf *pfvf; 514 u8 cgx_id, lmac_id; 515 u64 cfg; 516 517 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 518 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 519 lmac_chan_cnt = cfg & 0xFF; 520 521 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; 522 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); 523 524 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 525 sdp_chan_cnt = cfg & 0xFFF; 526 sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; 527 528 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 529 530 /* Backpressure IDs range division 531 * CGX channles are mapped to (0 - 191) BPIDs 532 * LBK channles are mapped to (192 - 255) BPIDs 533 * SDP channles are mapped to (256 - 511) BPIDs 534 * 535 * Lmac channles and bpids mapped as follows 536 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 537 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 538 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 539 */ 540 switch (type) { 541 case NIX_INTF_TYPE_CGX: 542 if ((req->chan_base + req->chan_cnt) > 16) 543 return -EINVAL; 544 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 545 /* Assign bpid based on cgx, lmac and chan id */ 546 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + 547 (lmac_id * lmac_chan_cnt) + req->chan_base; 548 549 if (req->bpid_per_chan) 550 bpid += chan_id; 551 if (bpid > cgx_bpid_cnt) 552 return -EINVAL; 553 break; 554 555 case NIX_INTF_TYPE_LBK: 556 if ((req->chan_base + req->chan_cnt) > 63) 557 return -EINVAL; 558 bpid = cgx_bpid_cnt + req->chan_base; 559 if (req->bpid_per_chan) 560 bpid += chan_id; 561 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) 562 return -EINVAL; 563 break; 564 case NIX_INTF_TYPE_SDP: 565 if ((req->chan_base + req->chan_cnt) > 255) 566 return -EINVAL; 567 568 bpid = sdp_bpid_cnt + req->chan_base; 569 if (req->bpid_per_chan) 570 bpid += chan_id; 571 572 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) 573 return -EINVAL; 574 break; 575 default: 576 return -EINVAL; 577 } 578 return bpid; 579 } 580 581 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 582 struct nix_bp_cfg_req *req, 583 struct nix_bp_cfg_rsp *rsp) 584 { 585 int blkaddr, pf, type, chan_id = 0; 586 u16 pcifunc = req->hdr.pcifunc; 587 struct rvu_pfvf *pfvf; 588 u16 chan_base, chan; 589 s16 bpid, bpid_base; 590 u64 cfg; 591 592 pf = rvu_get_pf(pcifunc); 593 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 594 if (is_sdp_pfvf(pcifunc)) 595 type = NIX_INTF_TYPE_SDP; 596 597 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ 598 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 599 type != NIX_INTF_TYPE_SDP) 600 return 0; 601 602 pfvf = rvu_get_pfvf(rvu, pcifunc); 603 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 604 605 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 606 chan_base = pfvf->rx_chan_base + req->chan_base; 607 bpid = bpid_base; 608 609 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 610 if (bpid < 0) { 611 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 612 return -EINVAL; 613 } 614 615 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 616 cfg &= ~GENMASK_ULL(8, 0); 617 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 618 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); 619 chan_id++; 620 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 621 } 622 623 for (chan = 0; chan < req->chan_cnt; chan++) { 624 /* Map channel and bpid assign to it */ 625 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 626 (bpid_base & 0x3FF); 627 if (req->bpid_per_chan) 628 bpid_base++; 629 } 630 rsp->chan_cnt = req->chan_cnt; 631 632 return 0; 633 } 634 635 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 636 u64 format, bool v4, u64 *fidx) 637 { 638 struct nix_lso_format field = {0}; 639 640 /* IP's Length field */ 641 field.layer = NIX_TXLAYER_OL3; 642 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 643 field.offset = v4 ? 2 : 4; 644 field.sizem1 = 1; /* i.e 2 bytes */ 645 field.alg = NIX_LSOALG_ADD_PAYLEN; 646 rvu_write64(rvu, blkaddr, 647 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 648 *(u64 *)&field); 649 650 /* No ID field in IPv6 header */ 651 if (!v4) 652 return; 653 654 /* IP's ID field */ 655 field.layer = NIX_TXLAYER_OL3; 656 field.offset = 4; 657 field.sizem1 = 1; /* i.e 2 bytes */ 658 field.alg = NIX_LSOALG_ADD_SEGNUM; 659 rvu_write64(rvu, blkaddr, 660 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 661 *(u64 *)&field); 662 } 663 664 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 665 u64 format, u64 *fidx) 666 { 667 struct nix_lso_format field = {0}; 668 669 /* TCP's sequence number field */ 670 field.layer = NIX_TXLAYER_OL4; 671 field.offset = 4; 672 field.sizem1 = 3; /* i.e 4 bytes */ 673 field.alg = NIX_LSOALG_ADD_OFFSET; 674 rvu_write64(rvu, blkaddr, 675 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 676 *(u64 *)&field); 677 678 /* TCP's flags field */ 679 field.layer = NIX_TXLAYER_OL4; 680 field.offset = 12; 681 field.sizem1 = 1; /* 2 bytes */ 682 field.alg = NIX_LSOALG_TCP_FLAGS; 683 rvu_write64(rvu, blkaddr, 684 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 685 *(u64 *)&field); 686 } 687 688 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 689 { 690 u64 cfg, idx, fidx = 0; 691 692 /* Get max HW supported format indices */ 693 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 694 nix_hw->lso.total = cfg; 695 696 /* Enable LSO */ 697 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 698 /* For TSO, set first and middle segment flags to 699 * mask out PSH, RST & FIN flags in TCP packet 700 */ 701 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 702 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 703 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 704 705 /* Setup default static LSO formats 706 * 707 * Configure format fields for TCPv4 segmentation offload 708 */ 709 idx = NIX_LSO_FORMAT_IDX_TSOV4; 710 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 711 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 712 713 /* Set rest of the fields to NOP */ 714 for (; fidx < 8; fidx++) { 715 rvu_write64(rvu, blkaddr, 716 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 717 } 718 nix_hw->lso.in_use++; 719 720 /* Configure format fields for TCPv6 segmentation offload */ 721 idx = NIX_LSO_FORMAT_IDX_TSOV6; 722 fidx = 0; 723 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 724 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 725 726 /* Set rest of the fields to NOP */ 727 for (; fidx < 8; fidx++) { 728 rvu_write64(rvu, blkaddr, 729 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 730 } 731 nix_hw->lso.in_use++; 732 } 733 734 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 735 { 736 kfree(pfvf->rq_bmap); 737 kfree(pfvf->sq_bmap); 738 kfree(pfvf->cq_bmap); 739 if (pfvf->rq_ctx) 740 qmem_free(rvu->dev, pfvf->rq_ctx); 741 if (pfvf->sq_ctx) 742 qmem_free(rvu->dev, pfvf->sq_ctx); 743 if (pfvf->cq_ctx) 744 qmem_free(rvu->dev, pfvf->cq_ctx); 745 if (pfvf->rss_ctx) 746 qmem_free(rvu->dev, pfvf->rss_ctx); 747 if (pfvf->nix_qints_ctx) 748 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 749 if (pfvf->cq_ints_ctx) 750 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 751 752 pfvf->rq_bmap = NULL; 753 pfvf->cq_bmap = NULL; 754 pfvf->sq_bmap = NULL; 755 pfvf->rq_ctx = NULL; 756 pfvf->sq_ctx = NULL; 757 pfvf->cq_ctx = NULL; 758 pfvf->rss_ctx = NULL; 759 pfvf->nix_qints_ctx = NULL; 760 pfvf->cq_ints_ctx = NULL; 761 } 762 763 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 764 struct rvu_pfvf *pfvf, int nixlf, 765 int rss_sz, int rss_grps, int hwctx_size, 766 u64 way_mask, bool tag_lsb_as_adder) 767 { 768 int err, grp, num_indices; 769 u64 val; 770 771 /* RSS is not requested for this NIXLF */ 772 if (!rss_sz) 773 return 0; 774 num_indices = rss_sz * rss_grps; 775 776 /* Alloc NIX RSS HW context memory and config the base */ 777 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 778 if (err) 779 return err; 780 781 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 782 (u64)pfvf->rss_ctx->iova); 783 784 /* Config full RSS table size, enable RSS and caching */ 785 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | 786 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); 787 788 if (tag_lsb_as_adder) 789 val |= BIT_ULL(5); 790 791 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); 792 /* Config RSS group offset and sizes */ 793 for (grp = 0; grp < rss_grps; grp++) 794 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 795 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 796 return 0; 797 } 798 799 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 800 struct nix_aq_inst_s *inst) 801 { 802 struct admin_queue *aq = block->aq; 803 struct nix_aq_res_s *result; 804 int timeout = 1000; 805 u64 reg, head; 806 int ret; 807 808 result = (struct nix_aq_res_s *)aq->res->base; 809 810 /* Get current head pointer where to append this instruction */ 811 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 812 head = (reg >> 4) & AQ_PTR_MASK; 813 814 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 815 (void *)inst, aq->inst->entry_sz); 816 memset(result, 0, sizeof(*result)); 817 /* sync into memory */ 818 wmb(); 819 820 /* Ring the doorbell and wait for result */ 821 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 822 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 823 cpu_relax(); 824 udelay(1); 825 timeout--; 826 if (!timeout) 827 return -EBUSY; 828 } 829 830 if (result->compcode != NIX_AQ_COMP_GOOD) { 831 /* TODO: Replace this with some error code */ 832 if (result->compcode == NIX_AQ_COMP_CTX_FAULT || 833 result->compcode == NIX_AQ_COMP_LOCKERR || 834 result->compcode == NIX_AQ_COMP_CTX_POISON) { 835 ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX); 836 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX); 837 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX); 838 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX); 839 if (ret) 840 dev_err(rvu->dev, 841 "%s: Not able to unlock cachelines\n", __func__); 842 } 843 844 return -EBUSY; 845 } 846 847 return 0; 848 } 849 850 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, 851 u16 *smq, u16 *smq_mask) 852 { 853 struct nix_cn10k_aq_enq_req *aq_req; 854 855 if (!is_rvu_otx2(rvu)) { 856 aq_req = (struct nix_cn10k_aq_enq_req *)req; 857 *smq = aq_req->sq.smq; 858 *smq_mask = aq_req->sq_mask.smq; 859 } else { 860 *smq = req->sq.smq; 861 *smq_mask = req->sq_mask.smq; 862 } 863 } 864 865 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 866 struct nix_aq_enq_req *req, 867 struct nix_aq_enq_rsp *rsp) 868 { 869 struct rvu_hwinfo *hw = rvu->hw; 870 u16 pcifunc = req->hdr.pcifunc; 871 int nixlf, blkaddr, rc = 0; 872 struct nix_aq_inst_s inst; 873 struct rvu_block *block; 874 struct admin_queue *aq; 875 struct rvu_pfvf *pfvf; 876 u16 smq, smq_mask; 877 void *ctx, *mask; 878 bool ena; 879 u64 cfg; 880 881 blkaddr = nix_hw->blkaddr; 882 block = &hw->block[blkaddr]; 883 aq = block->aq; 884 if (!aq) { 885 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 886 return NIX_AF_ERR_AQ_ENQUEUE; 887 } 888 889 pfvf = rvu_get_pfvf(rvu, pcifunc); 890 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 891 892 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile 893 * operations done by AF itself. 894 */ 895 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || 896 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { 897 if (!pfvf->nixlf || nixlf < 0) 898 return NIX_AF_ERR_AF_LF_INVALID; 899 } 900 901 switch (req->ctype) { 902 case NIX_AQ_CTYPE_RQ: 903 /* Check if index exceeds max no of queues */ 904 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 905 rc = NIX_AF_ERR_AQ_ENQUEUE; 906 break; 907 case NIX_AQ_CTYPE_SQ: 908 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 909 rc = NIX_AF_ERR_AQ_ENQUEUE; 910 break; 911 case NIX_AQ_CTYPE_CQ: 912 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 913 rc = NIX_AF_ERR_AQ_ENQUEUE; 914 break; 915 case NIX_AQ_CTYPE_RSS: 916 /* Check if RSS is enabled and qidx is within range */ 917 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 918 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 919 (req->qidx >= (256UL << (cfg & 0xF)))) 920 rc = NIX_AF_ERR_AQ_ENQUEUE; 921 break; 922 case NIX_AQ_CTYPE_MCE: 923 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 924 925 /* Check if index exceeds MCE list length */ 926 if (!nix_hw->mcast.mce_ctx || 927 (req->qidx >= (256UL << (cfg & 0xF)))) 928 rc = NIX_AF_ERR_AQ_ENQUEUE; 929 930 /* Adding multicast lists for requests from PF/VFs is not 931 * yet supported, so ignore this. 932 */ 933 if (rsp) 934 rc = NIX_AF_ERR_AQ_ENQUEUE; 935 break; 936 case NIX_AQ_CTYPE_BANDPROF: 937 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, 938 nix_hw, pcifunc)) 939 rc = NIX_AF_ERR_INVALID_BANDPROF; 940 break; 941 default: 942 rc = NIX_AF_ERR_AQ_ENQUEUE; 943 } 944 945 if (rc) 946 return rc; 947 948 nix_get_aq_req_smq(rvu, req, &smq, &smq_mask); 949 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 950 if (req->ctype == NIX_AQ_CTYPE_SQ && 951 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 952 (req->op == NIX_AQ_INSTOP_WRITE && 953 req->sq_mask.ena && req->sq.ena && smq_mask))) { 954 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 955 pcifunc, smq)) 956 return NIX_AF_ERR_AQ_ENQUEUE; 957 } 958 959 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 960 inst.lf = nixlf; 961 inst.cindex = req->qidx; 962 inst.ctype = req->ctype; 963 inst.op = req->op; 964 /* Currently we are not supporting enqueuing multiple instructions, 965 * so always choose first entry in result memory. 966 */ 967 inst.res_addr = (u64)aq->res->iova; 968 969 /* Hardware uses same aq->res->base for updating result of 970 * previous instruction hence wait here till it is done. 971 */ 972 spin_lock(&aq->lock); 973 974 /* Clean result + context memory */ 975 memset(aq->res->base, 0, aq->res->entry_sz); 976 /* Context needs to be written at RES_ADDR + 128 */ 977 ctx = aq->res->base + 128; 978 /* Mask needs to be written at RES_ADDR + 256 */ 979 mask = aq->res->base + 256; 980 981 switch (req->op) { 982 case NIX_AQ_INSTOP_WRITE: 983 if (req->ctype == NIX_AQ_CTYPE_RQ) 984 memcpy(mask, &req->rq_mask, 985 sizeof(struct nix_rq_ctx_s)); 986 else if (req->ctype == NIX_AQ_CTYPE_SQ) 987 memcpy(mask, &req->sq_mask, 988 sizeof(struct nix_sq_ctx_s)); 989 else if (req->ctype == NIX_AQ_CTYPE_CQ) 990 memcpy(mask, &req->cq_mask, 991 sizeof(struct nix_cq_ctx_s)); 992 else if (req->ctype == NIX_AQ_CTYPE_RSS) 993 memcpy(mask, &req->rss_mask, 994 sizeof(struct nix_rsse_s)); 995 else if (req->ctype == NIX_AQ_CTYPE_MCE) 996 memcpy(mask, &req->mce_mask, 997 sizeof(struct nix_rx_mce_s)); 998 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 999 memcpy(mask, &req->prof_mask, 1000 sizeof(struct nix_bandprof_s)); 1001 fallthrough; 1002 case NIX_AQ_INSTOP_INIT: 1003 if (req->ctype == NIX_AQ_CTYPE_RQ) 1004 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 1005 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1006 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 1007 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1008 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 1009 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1010 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 1011 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1012 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 1013 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1014 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); 1015 break; 1016 case NIX_AQ_INSTOP_NOP: 1017 case NIX_AQ_INSTOP_READ: 1018 case NIX_AQ_INSTOP_LOCK: 1019 case NIX_AQ_INSTOP_UNLOCK: 1020 break; 1021 default: 1022 rc = NIX_AF_ERR_AQ_ENQUEUE; 1023 spin_unlock(&aq->lock); 1024 return rc; 1025 } 1026 1027 /* Submit the instruction to AQ */ 1028 rc = nix_aq_enqueue_wait(rvu, block, &inst); 1029 if (rc) { 1030 spin_unlock(&aq->lock); 1031 return rc; 1032 } 1033 1034 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 1035 if (req->op == NIX_AQ_INSTOP_INIT) { 1036 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 1037 __set_bit(req->qidx, pfvf->rq_bmap); 1038 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 1039 __set_bit(req->qidx, pfvf->sq_bmap); 1040 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 1041 __set_bit(req->qidx, pfvf->cq_bmap); 1042 } 1043 1044 if (req->op == NIX_AQ_INSTOP_WRITE) { 1045 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1046 ena = (req->rq.ena & req->rq_mask.ena) | 1047 (test_bit(req->qidx, pfvf->rq_bmap) & 1048 ~req->rq_mask.ena); 1049 if (ena) 1050 __set_bit(req->qidx, pfvf->rq_bmap); 1051 else 1052 __clear_bit(req->qidx, pfvf->rq_bmap); 1053 } 1054 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1055 ena = (req->rq.ena & req->sq_mask.ena) | 1056 (test_bit(req->qidx, pfvf->sq_bmap) & 1057 ~req->sq_mask.ena); 1058 if (ena) 1059 __set_bit(req->qidx, pfvf->sq_bmap); 1060 else 1061 __clear_bit(req->qidx, pfvf->sq_bmap); 1062 } 1063 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1064 ena = (req->rq.ena & req->cq_mask.ena) | 1065 (test_bit(req->qidx, pfvf->cq_bmap) & 1066 ~req->cq_mask.ena); 1067 if (ena) 1068 __set_bit(req->qidx, pfvf->cq_bmap); 1069 else 1070 __clear_bit(req->qidx, pfvf->cq_bmap); 1071 } 1072 } 1073 1074 if (rsp) { 1075 /* Copy read context into mailbox */ 1076 if (req->op == NIX_AQ_INSTOP_READ) { 1077 if (req->ctype == NIX_AQ_CTYPE_RQ) 1078 memcpy(&rsp->rq, ctx, 1079 sizeof(struct nix_rq_ctx_s)); 1080 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1081 memcpy(&rsp->sq, ctx, 1082 sizeof(struct nix_sq_ctx_s)); 1083 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1084 memcpy(&rsp->cq, ctx, 1085 sizeof(struct nix_cq_ctx_s)); 1086 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1087 memcpy(&rsp->rss, ctx, 1088 sizeof(struct nix_rsse_s)); 1089 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1090 memcpy(&rsp->mce, ctx, 1091 sizeof(struct nix_rx_mce_s)); 1092 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1093 memcpy(&rsp->prof, ctx, 1094 sizeof(struct nix_bandprof_s)); 1095 } 1096 } 1097 1098 spin_unlock(&aq->lock); 1099 return 0; 1100 } 1101 1102 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, 1103 struct nix_aq_enq_req *req, u8 ctype) 1104 { 1105 struct nix_cn10k_aq_enq_req aq_req; 1106 struct nix_cn10k_aq_enq_rsp aq_rsp; 1107 int rc, word; 1108 1109 if (req->ctype != NIX_AQ_CTYPE_CQ) 1110 return 0; 1111 1112 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 1113 req->hdr.pcifunc, ctype, req->qidx); 1114 if (rc) { 1115 dev_err(rvu->dev, 1116 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", 1117 __func__, nix_get_ctx_name(ctype), req->qidx, 1118 req->hdr.pcifunc); 1119 return rc; 1120 } 1121 1122 /* Make copy of original context & mask which are required 1123 * for resubmission 1124 */ 1125 memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); 1126 memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); 1127 1128 /* exclude fields which HW can update */ 1129 aq_req.cq_mask.cq_err = 0; 1130 aq_req.cq_mask.wrptr = 0; 1131 aq_req.cq_mask.tail = 0; 1132 aq_req.cq_mask.head = 0; 1133 aq_req.cq_mask.avg_level = 0; 1134 aq_req.cq_mask.update_time = 0; 1135 aq_req.cq_mask.substream = 0; 1136 1137 /* Context mask (cq_mask) holds mask value of fields which 1138 * are changed in AQ WRITE operation. 1139 * for example cq.drop = 0xa; 1140 * cq_mask.drop = 0xff; 1141 * Below logic performs '&' between cq and cq_mask so that non 1142 * updated fields are masked out for request and response 1143 * comparison 1144 */ 1145 for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); 1146 word++) { 1147 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= 1148 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1149 *(u64 *)((u8 *)&aq_req.cq + word * 8) &= 1150 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1151 } 1152 1153 if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) 1154 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; 1155 1156 return 0; 1157 } 1158 1159 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 1160 struct nix_aq_enq_rsp *rsp) 1161 { 1162 struct nix_hw *nix_hw; 1163 int err, retries = 5; 1164 int blkaddr; 1165 1166 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 1167 if (blkaddr < 0) 1168 return NIX_AF_ERR_AF_LF_INVALID; 1169 1170 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1171 if (!nix_hw) 1172 return NIX_AF_ERR_INVALID_NIXBLK; 1173 1174 retry: 1175 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 1176 1177 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' 1178 * As a work around perfrom CQ context read after each AQ write. If AQ 1179 * read shows AQ write is not updated perform AQ write again. 1180 */ 1181 if (!err && req->op == NIX_AQ_INSTOP_WRITE) { 1182 err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); 1183 if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { 1184 if (retries--) 1185 goto retry; 1186 else 1187 return NIX_AF_ERR_CQ_CTX_WRITE_ERR; 1188 } 1189 } 1190 1191 return err; 1192 } 1193 1194 static const char *nix_get_ctx_name(int ctype) 1195 { 1196 switch (ctype) { 1197 case NIX_AQ_CTYPE_CQ: 1198 return "CQ"; 1199 case NIX_AQ_CTYPE_SQ: 1200 return "SQ"; 1201 case NIX_AQ_CTYPE_RQ: 1202 return "RQ"; 1203 case NIX_AQ_CTYPE_RSS: 1204 return "RSS"; 1205 } 1206 return ""; 1207 } 1208 1209 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 1210 { 1211 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1212 struct nix_aq_enq_req aq_req; 1213 unsigned long *bmap; 1214 int qidx, q_cnt = 0; 1215 int err = 0, rc; 1216 1217 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 1218 return NIX_AF_ERR_AQ_ENQUEUE; 1219 1220 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 1221 aq_req.hdr.pcifunc = req->hdr.pcifunc; 1222 1223 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1224 aq_req.cq.ena = 0; 1225 aq_req.cq_mask.ena = 1; 1226 aq_req.cq.bp_ena = 0; 1227 aq_req.cq_mask.bp_ena = 1; 1228 q_cnt = pfvf->cq_ctx->qsize; 1229 bmap = pfvf->cq_bmap; 1230 } 1231 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1232 aq_req.sq.ena = 0; 1233 aq_req.sq_mask.ena = 1; 1234 q_cnt = pfvf->sq_ctx->qsize; 1235 bmap = pfvf->sq_bmap; 1236 } 1237 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1238 aq_req.rq.ena = 0; 1239 aq_req.rq_mask.ena = 1; 1240 q_cnt = pfvf->rq_ctx->qsize; 1241 bmap = pfvf->rq_bmap; 1242 } 1243 1244 aq_req.ctype = req->ctype; 1245 aq_req.op = NIX_AQ_INSTOP_WRITE; 1246 1247 for (qidx = 0; qidx < q_cnt; qidx++) { 1248 if (!test_bit(qidx, bmap)) 1249 continue; 1250 aq_req.qidx = qidx; 1251 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 1252 if (rc) { 1253 err = rc; 1254 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 1255 nix_get_ctx_name(req->ctype), qidx); 1256 } 1257 } 1258 1259 return err; 1260 } 1261 1262 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 1263 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 1264 { 1265 struct nix_aq_enq_req lock_ctx_req; 1266 int err; 1267 1268 if (req->op != NIX_AQ_INSTOP_INIT) 1269 return 0; 1270 1271 if (req->ctype == NIX_AQ_CTYPE_MCE || 1272 req->ctype == NIX_AQ_CTYPE_DYNO) 1273 return 0; 1274 1275 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 1276 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 1277 lock_ctx_req.ctype = req->ctype; 1278 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 1279 lock_ctx_req.qidx = req->qidx; 1280 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 1281 if (err) 1282 dev_err(rvu->dev, 1283 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 1284 req->hdr.pcifunc, 1285 nix_get_ctx_name(req->ctype), req->qidx); 1286 return err; 1287 } 1288 1289 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1290 struct nix_aq_enq_req *req, 1291 struct nix_aq_enq_rsp *rsp) 1292 { 1293 int err; 1294 1295 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 1296 if (!err) 1297 err = nix_lf_hwctx_lockdown(rvu, req); 1298 return err; 1299 } 1300 #else 1301 1302 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1303 struct nix_aq_enq_req *req, 1304 struct nix_aq_enq_rsp *rsp) 1305 { 1306 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1307 } 1308 #endif 1309 /* CN10K mbox handler */ 1310 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1311 struct nix_cn10k_aq_enq_req *req, 1312 struct nix_cn10k_aq_enq_rsp *rsp) 1313 { 1314 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1315 (struct nix_aq_enq_rsp *)rsp); 1316 } 1317 1318 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1319 struct hwctx_disable_req *req, 1320 struct msg_rsp *rsp) 1321 { 1322 return nix_lf_hwctx_disable(rvu, req); 1323 } 1324 1325 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1326 struct nix_lf_alloc_req *req, 1327 struct nix_lf_alloc_rsp *rsp) 1328 { 1329 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1330 struct rvu_hwinfo *hw = rvu->hw; 1331 u16 pcifunc = req->hdr.pcifunc; 1332 struct rvu_block *block; 1333 struct rvu_pfvf *pfvf; 1334 u64 cfg, ctx_cfg; 1335 int blkaddr; 1336 1337 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1338 return NIX_AF_ERR_PARAM; 1339 1340 if (req->way_mask) 1341 req->way_mask &= 0xFFFF; 1342 1343 pfvf = rvu_get_pfvf(rvu, pcifunc); 1344 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1345 if (!pfvf->nixlf || blkaddr < 0) 1346 return NIX_AF_ERR_AF_LF_INVALID; 1347 1348 block = &hw->block[blkaddr]; 1349 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1350 if (nixlf < 0) 1351 return NIX_AF_ERR_AF_LF_INVALID; 1352 1353 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1354 if (req->npa_func) { 1355 /* If default, use 'this' NIXLF's PFFUNC */ 1356 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1357 req->npa_func = pcifunc; 1358 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1359 return NIX_AF_INVAL_NPA_PF_FUNC; 1360 } 1361 1362 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1363 if (req->sso_func) { 1364 /* If default, use 'this' NIXLF's PFFUNC */ 1365 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1366 req->sso_func = pcifunc; 1367 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1368 return NIX_AF_INVAL_SSO_PF_FUNC; 1369 } 1370 1371 /* If RSS is being enabled, check if requested config is valid. 1372 * RSS table size should be power of two, otherwise 1373 * RSS_GRP::OFFSET + adder might go beyond that group or 1374 * won't be able to use entire table. 1375 */ 1376 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1377 !is_power_of_2(req->rss_sz))) 1378 return NIX_AF_ERR_RSS_SIZE_INVALID; 1379 1380 if (req->rss_sz && 1381 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1382 return NIX_AF_ERR_RSS_GRPS_INVALID; 1383 1384 /* Reset this NIX LF */ 1385 err = rvu_lf_reset(rvu, block, nixlf); 1386 if (err) { 1387 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1388 block->addr - BLKADDR_NIX0, nixlf); 1389 return NIX_AF_ERR_LF_RESET; 1390 } 1391 1392 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1393 1394 /* Alloc NIX RQ HW context memory and config the base */ 1395 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1396 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1397 if (err) 1398 goto free_mem; 1399 1400 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1401 if (!pfvf->rq_bmap) 1402 goto free_mem; 1403 1404 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1405 (u64)pfvf->rq_ctx->iova); 1406 1407 /* Set caching and queue count in HW */ 1408 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1409 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1410 1411 /* Alloc NIX SQ HW context memory and config the base */ 1412 hwctx_size = 1UL << (ctx_cfg & 0xF); 1413 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1414 if (err) 1415 goto free_mem; 1416 1417 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1418 if (!pfvf->sq_bmap) 1419 goto free_mem; 1420 1421 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1422 (u64)pfvf->sq_ctx->iova); 1423 1424 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1425 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1426 1427 /* Alloc NIX CQ HW context memory and config the base */ 1428 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1429 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1430 if (err) 1431 goto free_mem; 1432 1433 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1434 if (!pfvf->cq_bmap) 1435 goto free_mem; 1436 1437 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1438 (u64)pfvf->cq_ctx->iova); 1439 1440 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1441 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1442 1443 /* Initialize receive side scaling (RSS) */ 1444 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1445 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1446 req->rss_grps, hwctx_size, req->way_mask, 1447 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); 1448 if (err) 1449 goto free_mem; 1450 1451 /* Alloc memory for CQINT's HW contexts */ 1452 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1453 qints = (cfg >> 24) & 0xFFF; 1454 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1455 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1456 if (err) 1457 goto free_mem; 1458 1459 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1460 (u64)pfvf->cq_ints_ctx->iova); 1461 1462 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1463 BIT_ULL(36) | req->way_mask << 20); 1464 1465 /* Alloc memory for QINT's HW contexts */ 1466 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1467 qints = (cfg >> 12) & 0xFFF; 1468 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1469 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1470 if (err) 1471 goto free_mem; 1472 1473 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1474 (u64)pfvf->nix_qints_ctx->iova); 1475 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1476 BIT_ULL(36) | req->way_mask << 20); 1477 1478 /* Setup VLANX TPID's. 1479 * Use VLAN1 for 802.1Q 1480 * and VLAN0 for 802.1AD. 1481 */ 1482 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1483 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1484 1485 /* Enable LMTST for this NIX LF */ 1486 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1487 1488 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1489 if (req->npa_func) 1490 cfg = req->npa_func; 1491 if (req->sso_func) 1492 cfg |= (u64)req->sso_func << 16; 1493 1494 cfg |= (u64)req->xqe_sz << 33; 1495 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1496 1497 /* Config Rx pkt length, csum checks and apad enable / disable */ 1498 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1499 1500 /* Configure pkind for TX parse config */ 1501 cfg = NPC_TX_DEF_PKIND; 1502 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1503 1504 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1505 if (is_sdp_pfvf(pcifunc)) 1506 intf = NIX_INTF_TYPE_SDP; 1507 1508 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, 1509 !!(req->flags & NIX_LF_LBK_BLK_SEL)); 1510 if (err) 1511 goto free_mem; 1512 1513 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1514 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1515 1516 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1517 rvu_write64(rvu, blkaddr, 1518 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1519 VTAGSIZE_T4 | VTAG_STRIP); 1520 1521 goto exit; 1522 1523 free_mem: 1524 nix_ctx_free(rvu, pfvf); 1525 rc = -ENOMEM; 1526 1527 exit: 1528 /* Set macaddr of this PF/VF */ 1529 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1530 1531 /* set SQB size info */ 1532 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1533 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1534 rsp->rx_chan_base = pfvf->rx_chan_base; 1535 rsp->tx_chan_base = pfvf->tx_chan_base; 1536 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1537 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1538 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1539 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1540 /* Get HW supported stat count */ 1541 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1542 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1543 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1544 /* Get count of CQ IRQs and error IRQs supported per LF */ 1545 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1546 rsp->qints = ((cfg >> 12) & 0xFFF); 1547 rsp->cints = ((cfg >> 24) & 0xFFF); 1548 rsp->cgx_links = hw->cgx_links; 1549 rsp->lbk_links = hw->lbk_links; 1550 rsp->sdp_links = hw->sdp_links; 1551 1552 return rc; 1553 } 1554 1555 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1556 struct msg_rsp *rsp) 1557 { 1558 struct rvu_hwinfo *hw = rvu->hw; 1559 u16 pcifunc = req->hdr.pcifunc; 1560 struct rvu_block *block; 1561 int blkaddr, nixlf, err; 1562 struct rvu_pfvf *pfvf; 1563 1564 pfvf = rvu_get_pfvf(rvu, pcifunc); 1565 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1566 if (!pfvf->nixlf || blkaddr < 0) 1567 return NIX_AF_ERR_AF_LF_INVALID; 1568 1569 block = &hw->block[blkaddr]; 1570 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1571 if (nixlf < 0) 1572 return NIX_AF_ERR_AF_LF_INVALID; 1573 1574 if (req->flags & NIX_LF_DISABLE_FLOWS) 1575 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1576 else 1577 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1578 1579 /* Free any tx vtag def entries used by this NIX LF */ 1580 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1581 nix_free_tx_vtag_entries(rvu, pcifunc); 1582 1583 nix_interface_deinit(rvu, pcifunc, nixlf); 1584 1585 /* Reset this NIX LF */ 1586 err = rvu_lf_reset(rvu, block, nixlf); 1587 if (err) { 1588 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1589 block->addr - BLKADDR_NIX0, nixlf); 1590 return NIX_AF_ERR_LF_RESET; 1591 } 1592 1593 nix_ctx_free(rvu, pfvf); 1594 1595 return 0; 1596 } 1597 1598 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1599 struct nix_mark_format_cfg *req, 1600 struct nix_mark_format_cfg_rsp *rsp) 1601 { 1602 u16 pcifunc = req->hdr.pcifunc; 1603 struct nix_hw *nix_hw; 1604 struct rvu_pfvf *pfvf; 1605 int blkaddr, rc; 1606 u32 cfg; 1607 1608 pfvf = rvu_get_pfvf(rvu, pcifunc); 1609 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1610 if (!pfvf->nixlf || blkaddr < 0) 1611 return NIX_AF_ERR_AF_LF_INVALID; 1612 1613 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1614 if (!nix_hw) 1615 return NIX_AF_ERR_INVALID_NIXBLK; 1616 1617 cfg = (((u32)req->offset & 0x7) << 16) | 1618 (((u32)req->y_mask & 0xF) << 12) | 1619 (((u32)req->y_val & 0xF) << 8) | 1620 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1621 1622 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1623 if (rc < 0) { 1624 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1625 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1626 return NIX_AF_ERR_MARK_CFG_FAIL; 1627 } 1628 1629 rsp->mark_format_idx = rc; 1630 return 0; 1631 } 1632 1633 /* Handle shaper update specially for few revisions */ 1634 static bool 1635 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, 1636 int lvl, u64 reg, u64 regval) 1637 { 1638 u64 regbase, oldval, sw_xoff = 0; 1639 u64 dbgval, md_debug0 = 0; 1640 unsigned long poll_tmo; 1641 bool rate_reg = 0; 1642 u32 schq; 1643 1644 regbase = reg & 0xFFFF; 1645 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1646 1647 /* Check for rate register */ 1648 switch (lvl) { 1649 case NIX_TXSCH_LVL_TL1: 1650 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); 1651 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); 1652 1653 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); 1654 break; 1655 case NIX_TXSCH_LVL_TL2: 1656 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); 1657 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); 1658 1659 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || 1660 regbase == NIX_AF_TL2X_PIR(0)); 1661 break; 1662 case NIX_TXSCH_LVL_TL3: 1663 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); 1664 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); 1665 1666 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || 1667 regbase == NIX_AF_TL3X_PIR(0)); 1668 break; 1669 case NIX_TXSCH_LVL_TL4: 1670 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); 1671 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); 1672 1673 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || 1674 regbase == NIX_AF_TL4X_PIR(0)); 1675 break; 1676 case NIX_TXSCH_LVL_MDQ: 1677 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); 1678 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || 1679 regbase == NIX_AF_MDQX_PIR(0)); 1680 break; 1681 } 1682 1683 if (!rate_reg) 1684 return false; 1685 1686 /* Nothing special to do when state is not toggled */ 1687 oldval = rvu_read64(rvu, blkaddr, reg); 1688 if ((oldval & 0x1) == (regval & 0x1)) { 1689 rvu_write64(rvu, blkaddr, reg, regval); 1690 return true; 1691 } 1692 1693 /* PIR/CIR disable */ 1694 if (!(regval & 0x1)) { 1695 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1696 rvu_write64(rvu, blkaddr, reg, 0); 1697 udelay(4); 1698 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1699 return true; 1700 } 1701 1702 /* PIR/CIR enable */ 1703 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1704 if (md_debug0) { 1705 poll_tmo = jiffies + usecs_to_jiffies(10000); 1706 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ 1707 do { 1708 if (time_after(jiffies, poll_tmo)) { 1709 dev_err(rvu->dev, 1710 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", 1711 nixlf, schq, lvl); 1712 goto exit; 1713 } 1714 usleep_range(1, 5); 1715 dbgval = rvu_read64(rvu, blkaddr, md_debug0); 1716 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); 1717 } 1718 rvu_write64(rvu, blkaddr, reg, regval); 1719 exit: 1720 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1721 return true; 1722 } 1723 1724 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr, 1725 int lvl, int schq) 1726 { 1727 u64 tlx_parent = 0, tlx_schedule = 0; 1728 1729 switch (lvl) { 1730 case NIX_TXSCH_LVL_TL2: 1731 tlx_parent = NIX_AF_TL2X_PARENT(schq); 1732 tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq); 1733 break; 1734 case NIX_TXSCH_LVL_TL3: 1735 tlx_parent = NIX_AF_TL3X_PARENT(schq); 1736 tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq); 1737 break; 1738 case NIX_TXSCH_LVL_TL4: 1739 tlx_parent = NIX_AF_TL4X_PARENT(schq); 1740 tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq); 1741 break; 1742 case NIX_TXSCH_LVL_MDQ: 1743 /* no need to reset SMQ_CFG as HW clears this CSR 1744 * on SMQ flush 1745 */ 1746 tlx_parent = NIX_AF_MDQX_PARENT(schq); 1747 tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq); 1748 break; 1749 default: 1750 return; 1751 } 1752 1753 if (tlx_parent) 1754 rvu_write64(rvu, blkaddr, tlx_parent, 0x0); 1755 1756 if (tlx_schedule) 1757 rvu_write64(rvu, blkaddr, tlx_schedule, 0x0); 1758 } 1759 1760 /* Disable shaping of pkts by a scheduler queue 1761 * at a given scheduler level. 1762 */ 1763 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1764 int nixlf, int lvl, int schq) 1765 { 1766 struct rvu_hwinfo *hw = rvu->hw; 1767 u64 cir_reg = 0, pir_reg = 0; 1768 u64 cfg; 1769 1770 switch (lvl) { 1771 case NIX_TXSCH_LVL_TL1: 1772 cir_reg = NIX_AF_TL1X_CIR(schq); 1773 pir_reg = 0; /* PIR not available at TL1 */ 1774 break; 1775 case NIX_TXSCH_LVL_TL2: 1776 cir_reg = NIX_AF_TL2X_CIR(schq); 1777 pir_reg = NIX_AF_TL2X_PIR(schq); 1778 break; 1779 case NIX_TXSCH_LVL_TL3: 1780 cir_reg = NIX_AF_TL3X_CIR(schq); 1781 pir_reg = NIX_AF_TL3X_PIR(schq); 1782 break; 1783 case NIX_TXSCH_LVL_TL4: 1784 cir_reg = NIX_AF_TL4X_CIR(schq); 1785 pir_reg = NIX_AF_TL4X_PIR(schq); 1786 break; 1787 case NIX_TXSCH_LVL_MDQ: 1788 cir_reg = NIX_AF_MDQX_CIR(schq); 1789 pir_reg = NIX_AF_MDQX_PIR(schq); 1790 break; 1791 } 1792 1793 /* Shaper state toggle needs wait/poll */ 1794 if (hw->cap.nix_shaper_toggle_wait) { 1795 if (cir_reg) 1796 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1797 lvl, cir_reg, 0); 1798 if (pir_reg) 1799 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1800 lvl, pir_reg, 0); 1801 return; 1802 } 1803 1804 if (!cir_reg) 1805 return; 1806 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1807 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1808 1809 if (!pir_reg) 1810 return; 1811 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1812 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1813 } 1814 1815 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1816 int lvl, int schq) 1817 { 1818 struct rvu_hwinfo *hw = rvu->hw; 1819 int link_level; 1820 int link; 1821 1822 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1823 return; 1824 1825 /* Reset TL4's SDP link config */ 1826 if (lvl == NIX_TXSCH_LVL_TL4) 1827 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1828 1829 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1830 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1831 if (lvl != link_level) 1832 return; 1833 1834 /* Reset TL2's CGX or LBK link config */ 1835 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1836 rvu_write64(rvu, blkaddr, 1837 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1838 } 1839 1840 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, 1841 int lvl, int schq) 1842 { 1843 struct rvu_hwinfo *hw = rvu->hw; 1844 u64 reg; 1845 1846 /* Skip this if shaping is not supported */ 1847 if (!hw->cap.nix_shaping) 1848 return; 1849 1850 /* Clear level specific SW_XOFF */ 1851 switch (lvl) { 1852 case NIX_TXSCH_LVL_TL1: 1853 reg = NIX_AF_TL1X_SW_XOFF(schq); 1854 break; 1855 case NIX_TXSCH_LVL_TL2: 1856 reg = NIX_AF_TL2X_SW_XOFF(schq); 1857 break; 1858 case NIX_TXSCH_LVL_TL3: 1859 reg = NIX_AF_TL3X_SW_XOFF(schq); 1860 break; 1861 case NIX_TXSCH_LVL_TL4: 1862 reg = NIX_AF_TL4X_SW_XOFF(schq); 1863 break; 1864 case NIX_TXSCH_LVL_MDQ: 1865 reg = NIX_AF_MDQX_SW_XOFF(schq); 1866 break; 1867 default: 1868 return; 1869 } 1870 1871 rvu_write64(rvu, blkaddr, reg, 0x0); 1872 } 1873 1874 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1875 { 1876 struct rvu_hwinfo *hw = rvu->hw; 1877 int pf = rvu_get_pf(pcifunc); 1878 u8 cgx_id = 0, lmac_id = 0; 1879 1880 if (is_afvf(pcifunc)) {/* LBK links */ 1881 return hw->cgx_links; 1882 } else if (is_pf_cgxmapped(rvu, pf)) { 1883 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1884 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1885 } 1886 1887 /* SDP link */ 1888 return hw->cgx_links + hw->lbk_links; 1889 } 1890 1891 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 1892 int link, int *start, int *end) 1893 { 1894 struct rvu_hwinfo *hw = rvu->hw; 1895 int pf = rvu_get_pf(pcifunc); 1896 1897 if (is_afvf(pcifunc)) { /* LBK links */ 1898 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1899 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 1900 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 1901 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1902 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 1903 } else { /* SDP link */ 1904 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 1905 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 1906 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 1907 } 1908 } 1909 1910 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 1911 struct nix_hw *nix_hw, 1912 struct nix_txsch_alloc_req *req) 1913 { 1914 struct rvu_hwinfo *hw = rvu->hw; 1915 int schq, req_schq, free_cnt; 1916 struct nix_txsch *txsch; 1917 int link, start, end; 1918 1919 txsch = &nix_hw->txsch[lvl]; 1920 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 1921 1922 if (!req_schq) 1923 return 0; 1924 1925 link = nix_get_tx_link(rvu, pcifunc); 1926 1927 /* For traffic aggregating scheduler level, one queue is enough */ 1928 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1929 if (req_schq != 1) 1930 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1931 return 0; 1932 } 1933 1934 /* Get free SCHQ count and check if request can be accomodated */ 1935 if (hw->cap.nix_fixed_txschq_mapping) { 1936 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1937 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 1938 if (end <= txsch->schq.max && schq < end && 1939 !test_bit(schq, txsch->schq.bmap)) 1940 free_cnt = 1; 1941 else 1942 free_cnt = 0; 1943 } else { 1944 free_cnt = rvu_rsrc_free_count(&txsch->schq); 1945 } 1946 1947 if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || 1948 req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) 1949 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1950 1951 /* If contiguous queues are needed, check for availability */ 1952 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 1953 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 1954 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1955 1956 return 0; 1957 } 1958 1959 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 1960 struct nix_txsch_alloc_rsp *rsp, 1961 int lvl, int start, int end) 1962 { 1963 struct rvu_hwinfo *hw = rvu->hw; 1964 u16 pcifunc = rsp->hdr.pcifunc; 1965 int idx, schq; 1966 1967 /* For traffic aggregating levels, queue alloc is based 1968 * on transmit link to which PF_FUNC is mapped to. 1969 */ 1970 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1971 /* A single TL queue is allocated */ 1972 if (rsp->schq_contig[lvl]) { 1973 rsp->schq_contig[lvl] = 1; 1974 rsp->schq_contig_list[lvl][0] = start; 1975 } 1976 1977 /* Both contig and non-contig reqs doesn't make sense here */ 1978 if (rsp->schq_contig[lvl]) 1979 rsp->schq[lvl] = 0; 1980 1981 if (rsp->schq[lvl]) { 1982 rsp->schq[lvl] = 1; 1983 rsp->schq_list[lvl][0] = start; 1984 } 1985 return; 1986 } 1987 1988 /* Adjust the queue request count if HW supports 1989 * only one queue per level configuration. 1990 */ 1991 if (hw->cap.nix_fixed_txschq_mapping) { 1992 idx = pcifunc & RVU_PFVF_FUNC_MASK; 1993 schq = start + idx; 1994 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 1995 rsp->schq_contig[lvl] = 0; 1996 rsp->schq[lvl] = 0; 1997 return; 1998 } 1999 2000 if (rsp->schq_contig[lvl]) { 2001 rsp->schq_contig[lvl] = 1; 2002 set_bit(schq, txsch->schq.bmap); 2003 rsp->schq_contig_list[lvl][0] = schq; 2004 rsp->schq[lvl] = 0; 2005 } else if (rsp->schq[lvl]) { 2006 rsp->schq[lvl] = 1; 2007 set_bit(schq, txsch->schq.bmap); 2008 rsp->schq_list[lvl][0] = schq; 2009 } 2010 return; 2011 } 2012 2013 /* Allocate contiguous queue indices requesty first */ 2014 if (rsp->schq_contig[lvl]) { 2015 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 2016 txsch->schq.max, start, 2017 rsp->schq_contig[lvl], 0); 2018 if (schq >= end) 2019 rsp->schq_contig[lvl] = 0; 2020 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 2021 set_bit(schq, txsch->schq.bmap); 2022 rsp->schq_contig_list[lvl][idx] = schq; 2023 schq++; 2024 } 2025 } 2026 2027 /* Allocate non-contiguous queue indices */ 2028 if (rsp->schq[lvl]) { 2029 idx = 0; 2030 for (schq = start; schq < end; schq++) { 2031 if (!test_bit(schq, txsch->schq.bmap)) { 2032 set_bit(schq, txsch->schq.bmap); 2033 rsp->schq_list[lvl][idx++] = schq; 2034 } 2035 if (idx == rsp->schq[lvl]) 2036 break; 2037 } 2038 /* Update how many were allocated */ 2039 rsp->schq[lvl] = idx; 2040 } 2041 } 2042 2043 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 2044 struct nix_txsch_alloc_req *req, 2045 struct nix_txsch_alloc_rsp *rsp) 2046 { 2047 struct rvu_hwinfo *hw = rvu->hw; 2048 u16 pcifunc = req->hdr.pcifunc; 2049 int link, blkaddr, rc = 0; 2050 int lvl, idx, start, end; 2051 struct nix_txsch *txsch; 2052 struct nix_hw *nix_hw; 2053 u32 *pfvf_map; 2054 int nixlf; 2055 u16 schq; 2056 2057 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2058 if (rc) 2059 return rc; 2060 2061 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2062 if (!nix_hw) 2063 return NIX_AF_ERR_INVALID_NIXBLK; 2064 2065 mutex_lock(&rvu->rsrc_lock); 2066 2067 /* Check if request is valid as per HW capabilities 2068 * and can be accomodated. 2069 */ 2070 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2071 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 2072 if (rc) 2073 goto err; 2074 } 2075 2076 /* Allocate requested Tx scheduler queues */ 2077 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2078 txsch = &nix_hw->txsch[lvl]; 2079 pfvf_map = txsch->pfvf_map; 2080 2081 if (!req->schq[lvl] && !req->schq_contig[lvl]) 2082 continue; 2083 2084 rsp->schq[lvl] = req->schq[lvl]; 2085 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 2086 2087 link = nix_get_tx_link(rvu, pcifunc); 2088 2089 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2090 start = link; 2091 end = link; 2092 } else if (hw->cap.nix_fixed_txschq_mapping) { 2093 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 2094 } else { 2095 start = 0; 2096 end = txsch->schq.max; 2097 } 2098 2099 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 2100 2101 /* Reset queue config */ 2102 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 2103 schq = rsp->schq_contig_list[lvl][idx]; 2104 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2105 NIX_TXSCHQ_CFG_DONE)) 2106 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2107 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2108 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2109 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2110 } 2111 2112 for (idx = 0; idx < req->schq[lvl]; idx++) { 2113 schq = rsp->schq_list[lvl][idx]; 2114 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2115 NIX_TXSCHQ_CFG_DONE)) 2116 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2117 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2118 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2119 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2120 } 2121 } 2122 2123 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 2124 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 2125 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 2126 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 2127 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 2128 goto exit; 2129 err: 2130 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 2131 exit: 2132 mutex_unlock(&rvu->rsrc_lock); 2133 return rc; 2134 } 2135 2136 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, 2137 struct nix_smq_flush_ctx *smq_flush_ctx) 2138 { 2139 struct nix_smq_tree_ctx *smq_tree_ctx; 2140 u64 parent_off, regval; 2141 u16 schq; 2142 int lvl; 2143 2144 smq_flush_ctx->smq = smq; 2145 2146 schq = smq; 2147 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2148 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2149 if (lvl == NIX_TXSCH_LVL_TL1) { 2150 smq_flush_ctx->tl1_schq = schq; 2151 smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); 2152 smq_tree_ctx->pir_off = 0; 2153 smq_tree_ctx->pir_val = 0; 2154 parent_off = 0; 2155 } else if (lvl == NIX_TXSCH_LVL_TL2) { 2156 smq_flush_ctx->tl2_schq = schq; 2157 smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); 2158 smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); 2159 parent_off = NIX_AF_TL2X_PARENT(schq); 2160 } else if (lvl == NIX_TXSCH_LVL_TL3) { 2161 smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq); 2162 smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq); 2163 parent_off = NIX_AF_TL3X_PARENT(schq); 2164 } else if (lvl == NIX_TXSCH_LVL_TL4) { 2165 smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq); 2166 smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq); 2167 parent_off = NIX_AF_TL4X_PARENT(schq); 2168 } else if (lvl == NIX_TXSCH_LVL_MDQ) { 2169 smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq); 2170 smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq); 2171 parent_off = NIX_AF_MDQX_PARENT(schq); 2172 } 2173 /* save cir/pir register values */ 2174 smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off); 2175 if (smq_tree_ctx->pir_off) 2176 smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off); 2177 2178 /* get parent txsch node */ 2179 if (parent_off) { 2180 regval = rvu_read64(rvu, blkaddr, parent_off); 2181 schq = (regval >> 16) & 0x1FF; 2182 } 2183 } 2184 } 2185 2186 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, 2187 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2188 { 2189 struct nix_txsch *txsch; 2190 struct nix_hw *nix_hw; 2191 u64 regoff; 2192 int tl2; 2193 2194 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2195 if (!nix_hw) 2196 return; 2197 2198 /* loop through all TL2s with matching PF_FUNC */ 2199 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; 2200 for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { 2201 /* skip the smq(flush) TL2 */ 2202 if (tl2 == smq_flush_ctx->tl2_schq) 2203 continue; 2204 /* skip unused TL2s */ 2205 if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) 2206 continue; 2207 /* skip if PF_FUNC doesn't match */ 2208 if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != 2209 (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] & 2210 ~RVU_PFVF_FUNC_MASK))) 2211 continue; 2212 /* enable/disable XOFF */ 2213 regoff = NIX_AF_TL2X_SW_XOFF(tl2); 2214 if (enable) 2215 rvu_write64(rvu, blkaddr, regoff, 0x1); 2216 else 2217 rvu_write64(rvu, blkaddr, regoff, 0x0); 2218 } 2219 } 2220 2221 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr, 2222 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2223 { 2224 u64 cir_off, pir_off, cir_val, pir_val; 2225 struct nix_smq_tree_ctx *smq_tree_ctx; 2226 int lvl; 2227 2228 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2229 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2230 cir_off = smq_tree_ctx->cir_off; 2231 cir_val = smq_tree_ctx->cir_val; 2232 pir_off = smq_tree_ctx->pir_off; 2233 pir_val = smq_tree_ctx->pir_val; 2234 2235 if (enable) { 2236 rvu_write64(rvu, blkaddr, cir_off, cir_val); 2237 if (lvl != NIX_TXSCH_LVL_TL1) 2238 rvu_write64(rvu, blkaddr, pir_off, pir_val); 2239 } else { 2240 rvu_write64(rvu, blkaddr, cir_off, 0x0); 2241 if (lvl != NIX_TXSCH_LVL_TL1) 2242 rvu_write64(rvu, blkaddr, pir_off, 0x0); 2243 } 2244 } 2245 } 2246 2247 static int nix_smq_flush(struct rvu *rvu, int blkaddr, 2248 int smq, u16 pcifunc, int nixlf) 2249 { 2250 struct nix_smq_flush_ctx *smq_flush_ctx; 2251 int pf = rvu_get_pf(pcifunc); 2252 u8 cgx_id = 0, lmac_id = 0; 2253 int err, restore_tx_en = 0; 2254 u64 cfg; 2255 2256 if (!is_rvu_otx2(rvu)) { 2257 /* Skip SMQ flush if pkt count is zero */ 2258 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq)); 2259 if (!cfg) 2260 return 0; 2261 } 2262 2263 /* enable cgx tx if disabled */ 2264 if (is_pf_cgxmapped(rvu, pf)) { 2265 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 2266 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), 2267 lmac_id, true); 2268 } 2269 2270 /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */ 2271 smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL); 2272 if (!smq_flush_ctx) 2273 return -ENOMEM; 2274 nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx); 2275 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); 2276 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); 2277 2278 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 2279 /* Do SMQ flush and set enqueue xoff */ 2280 cfg |= BIT_ULL(50) | BIT_ULL(49); 2281 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 2282 2283 /* Disable backpressure from physical link, 2284 * otherwise SMQ flush may stall. 2285 */ 2286 rvu_cgx_enadis_rx_bp(rvu, pf, false); 2287 2288 /* Wait for flush to complete */ 2289 err = rvu_poll_reg(rvu, blkaddr, 2290 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 2291 if (err) 2292 dev_info(rvu->dev, 2293 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n", 2294 nixlf, smq); 2295 2296 /* clear XOFF on TL2s */ 2297 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); 2298 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); 2299 kfree(smq_flush_ctx); 2300 2301 rvu_cgx_enadis_rx_bp(rvu, pf, true); 2302 /* restore cgx tx state */ 2303 if (restore_tx_en) 2304 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 2305 return err; 2306 } 2307 2308 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 2309 { 2310 int blkaddr, nixlf, lvl, schq, err; 2311 struct rvu_hwinfo *hw = rvu->hw; 2312 struct nix_txsch *txsch; 2313 struct nix_hw *nix_hw; 2314 u16 map_func; 2315 2316 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2317 if (blkaddr < 0) 2318 return NIX_AF_ERR_AF_LF_INVALID; 2319 2320 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2321 if (!nix_hw) 2322 return NIX_AF_ERR_INVALID_NIXBLK; 2323 2324 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2325 if (nixlf < 0) 2326 return NIX_AF_ERR_AF_LF_INVALID; 2327 2328 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ 2329 mutex_lock(&rvu->rsrc_lock); 2330 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2331 txsch = &nix_hw->txsch[lvl]; 2332 2333 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2334 continue; 2335 2336 for (schq = 0; schq < txsch->schq.max; schq++) { 2337 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2338 continue; 2339 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2340 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2341 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2342 } 2343 } 2344 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, 2345 nix_get_tx_link(rvu, pcifunc)); 2346 2347 /* On PF cleanup, clear cfg done flag as 2348 * PF would have changed default config. 2349 */ 2350 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 2351 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; 2352 schq = nix_get_tx_link(rvu, pcifunc); 2353 /* Do not clear pcifunc in txsch->pfvf_map[schq] because 2354 * VF might be using this TL1 queue 2355 */ 2356 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 2357 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); 2358 } 2359 2360 /* Flush SMQs */ 2361 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 2362 for (schq = 0; schq < txsch->schq.max; schq++) { 2363 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2364 continue; 2365 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2366 } 2367 2368 /* Now free scheduler queues to free pool */ 2369 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2370 /* TLs above aggregation level are shared across all PF 2371 * and it's VFs, hence skip freeing them. 2372 */ 2373 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2374 continue; 2375 2376 txsch = &nix_hw->txsch[lvl]; 2377 for (schq = 0; schq < txsch->schq.max; schq++) { 2378 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2379 continue; 2380 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2381 rvu_free_rsrc(&txsch->schq, schq); 2382 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2383 } 2384 } 2385 mutex_unlock(&rvu->rsrc_lock); 2386 2387 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ 2388 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); 2389 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); 2390 if (err) 2391 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 2392 2393 return 0; 2394 } 2395 2396 static int nix_txschq_free_one(struct rvu *rvu, 2397 struct nix_txsch_free_req *req) 2398 { 2399 struct rvu_hwinfo *hw = rvu->hw; 2400 u16 pcifunc = req->hdr.pcifunc; 2401 int lvl, schq, nixlf, blkaddr; 2402 struct nix_txsch *txsch; 2403 struct nix_hw *nix_hw; 2404 u32 *pfvf_map; 2405 int rc; 2406 2407 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2408 if (blkaddr < 0) 2409 return NIX_AF_ERR_AF_LF_INVALID; 2410 2411 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2412 if (!nix_hw) 2413 return NIX_AF_ERR_INVALID_NIXBLK; 2414 2415 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2416 if (nixlf < 0) 2417 return NIX_AF_ERR_AF_LF_INVALID; 2418 2419 lvl = req->schq_lvl; 2420 schq = req->schq; 2421 txsch = &nix_hw->txsch[lvl]; 2422 2423 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 2424 return 0; 2425 2426 pfvf_map = txsch->pfvf_map; 2427 mutex_lock(&rvu->rsrc_lock); 2428 2429 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 2430 rc = NIX_AF_ERR_TLX_INVALID; 2431 goto err; 2432 } 2433 2434 /* Clear SW_XOFF of this resource only. 2435 * For SMQ level, all path XOFF's 2436 * need to be made clear by user 2437 */ 2438 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2439 2440 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2441 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2442 2443 /* Flush if it is a SMQ. Onus of disabling 2444 * TL2/3 queue links before SMQ flush is on user 2445 */ 2446 if (lvl == NIX_TXSCH_LVL_SMQ && 2447 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { 2448 rc = NIX_AF_SMQ_FLUSH_FAILED; 2449 goto err; 2450 } 2451 2452 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2453 2454 /* Free the resource */ 2455 rvu_free_rsrc(&txsch->schq, schq); 2456 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2457 mutex_unlock(&rvu->rsrc_lock); 2458 return 0; 2459 err: 2460 mutex_unlock(&rvu->rsrc_lock); 2461 return rc; 2462 } 2463 2464 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 2465 struct nix_txsch_free_req *req, 2466 struct msg_rsp *rsp) 2467 { 2468 if (req->flags & TXSCHQ_FREE_ALL) 2469 return nix_txschq_free(rvu, req->hdr.pcifunc); 2470 else 2471 return nix_txschq_free_one(rvu, req); 2472 } 2473 2474 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 2475 int lvl, u64 reg, u64 regval) 2476 { 2477 u64 regbase = reg & 0xFFFF; 2478 u16 schq, parent; 2479 2480 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 2481 return false; 2482 2483 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2484 /* Check if this schq belongs to this PF/VF or not */ 2485 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 2486 return false; 2487 2488 parent = (regval >> 16) & 0x1FF; 2489 /* Validate MDQ's TL4 parent */ 2490 if (regbase == NIX_AF_MDQX_PARENT(0) && 2491 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 2492 return false; 2493 2494 /* Validate TL4's TL3 parent */ 2495 if (regbase == NIX_AF_TL4X_PARENT(0) && 2496 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 2497 return false; 2498 2499 /* Validate TL3's TL2 parent */ 2500 if (regbase == NIX_AF_TL3X_PARENT(0) && 2501 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 2502 return false; 2503 2504 /* Validate TL2's TL1 parent */ 2505 if (regbase == NIX_AF_TL2X_PARENT(0) && 2506 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 2507 return false; 2508 2509 return true; 2510 } 2511 2512 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 2513 { 2514 u64 regbase; 2515 2516 if (hw->cap.nix_shaping) 2517 return true; 2518 2519 /* If shaping and coloring is not supported, then 2520 * *_CIR and *_PIR registers should not be configured. 2521 */ 2522 regbase = reg & 0xFFFF; 2523 2524 switch (lvl) { 2525 case NIX_TXSCH_LVL_TL1: 2526 if (regbase == NIX_AF_TL1X_CIR(0)) 2527 return false; 2528 break; 2529 case NIX_TXSCH_LVL_TL2: 2530 if (regbase == NIX_AF_TL2X_CIR(0) || 2531 regbase == NIX_AF_TL2X_PIR(0)) 2532 return false; 2533 break; 2534 case NIX_TXSCH_LVL_TL3: 2535 if (regbase == NIX_AF_TL3X_CIR(0) || 2536 regbase == NIX_AF_TL3X_PIR(0)) 2537 return false; 2538 break; 2539 case NIX_TXSCH_LVL_TL4: 2540 if (regbase == NIX_AF_TL4X_CIR(0) || 2541 regbase == NIX_AF_TL4X_PIR(0)) 2542 return false; 2543 break; 2544 case NIX_TXSCH_LVL_MDQ: 2545 if (regbase == NIX_AF_MDQX_CIR(0) || 2546 regbase == NIX_AF_MDQX_PIR(0)) 2547 return false; 2548 break; 2549 } 2550 return true; 2551 } 2552 2553 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 2554 u16 pcifunc, int blkaddr) 2555 { 2556 u32 *pfvf_map; 2557 int schq; 2558 2559 schq = nix_get_tx_link(rvu, pcifunc); 2560 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 2561 /* Skip if PF has already done the config */ 2562 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 2563 return; 2564 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 2565 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 2566 2567 /* On OcteonTx2 the config was in bytes and newer silcons 2568 * it's changed to weight. 2569 */ 2570 if (!rvu->hw->cap.nix_common_dwrr_mtu) 2571 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2572 TXSCH_TL1_DFLT_RR_QTM); 2573 else 2574 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2575 CN10K_MAX_DWRR_WEIGHT); 2576 2577 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 2578 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 2579 } 2580 2581 /* Register offset - [15:0] 2582 * Scheduler Queue number - [25:16] 2583 */ 2584 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) 2585 2586 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, 2587 int blkaddr, struct nix_txschq_config *req, 2588 struct nix_txschq_config *rsp) 2589 { 2590 u16 pcifunc = req->hdr.pcifunc; 2591 int idx, schq; 2592 u64 reg; 2593 2594 for (idx = 0; idx < req->num_regs; idx++) { 2595 reg = req->reg[idx]; 2596 reg &= NIX_TX_SCHQ_MASK; 2597 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2598 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || 2599 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) 2600 return NIX_AF_INVAL_TXSCHQ_CFG; 2601 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); 2602 } 2603 rsp->lvl = req->lvl; 2604 rsp->num_regs = req->num_regs; 2605 return 0; 2606 } 2607 2608 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, 2609 struct nix_txsch *txsch, bool enable) 2610 { 2611 struct rvu_hwinfo *hw = rvu->hw; 2612 int lbk_link_start, lbk_links; 2613 u8 pf = rvu_get_pf(pcifunc); 2614 int schq; 2615 u64 cfg; 2616 2617 if (!is_pf_cgxmapped(rvu, pf)) 2618 return; 2619 2620 cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; 2621 lbk_link_start = hw->cgx_links; 2622 2623 for (schq = 0; schq < txsch->schq.max; schq++) { 2624 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2625 continue; 2626 /* Enable all LBK links with channel 63 by default so that 2627 * packets can be sent to LBK with a NPC TX MCAM rule 2628 */ 2629 lbk_links = hw->lbk_links; 2630 while (lbk_links--) 2631 rvu_write64(rvu, blkaddr, 2632 NIX_AF_TL3_TL2X_LINKX_CFG(schq, 2633 lbk_link_start + 2634 lbk_links), cfg); 2635 } 2636 } 2637 2638 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 2639 struct nix_txschq_config *req, 2640 struct nix_txschq_config *rsp) 2641 { 2642 u64 reg, val, regval, schq_regbase, val_mask; 2643 struct rvu_hwinfo *hw = rvu->hw; 2644 u16 pcifunc = req->hdr.pcifunc; 2645 struct nix_txsch *txsch; 2646 struct nix_hw *nix_hw; 2647 int blkaddr, idx, err; 2648 int nixlf, schq; 2649 u32 *pfvf_map; 2650 2651 if (req->lvl >= NIX_TXSCH_LVL_CNT || 2652 req->num_regs > MAX_REGS_PER_MBOX_MSG) 2653 return NIX_AF_INVAL_TXSCHQ_CFG; 2654 2655 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2656 if (err) 2657 return err; 2658 2659 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2660 if (!nix_hw) 2661 return NIX_AF_ERR_INVALID_NIXBLK; 2662 2663 if (req->read) 2664 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); 2665 2666 txsch = &nix_hw->txsch[req->lvl]; 2667 pfvf_map = txsch->pfvf_map; 2668 2669 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 2670 pcifunc & RVU_PFVF_FUNC_MASK) { 2671 mutex_lock(&rvu->rsrc_lock); 2672 if (req->lvl == NIX_TXSCH_LVL_TL1) 2673 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 2674 mutex_unlock(&rvu->rsrc_lock); 2675 return 0; 2676 } 2677 2678 for (idx = 0; idx < req->num_regs; idx++) { 2679 reg = req->reg[idx]; 2680 reg &= NIX_TX_SCHQ_MASK; 2681 regval = req->regval[idx]; 2682 schq_regbase = reg & 0xFFFF; 2683 val_mask = req->regval_mask[idx]; 2684 2685 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 2686 txsch->lvl, reg, regval)) 2687 return NIX_AF_INVAL_TXSCHQ_CFG; 2688 2689 /* Check if shaping and coloring is supported */ 2690 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 2691 continue; 2692 2693 val = rvu_read64(rvu, blkaddr, reg); 2694 regval = (val & val_mask) | (regval & ~val_mask); 2695 2696 /* Handle shaping state toggle specially */ 2697 if (hw->cap.nix_shaper_toggle_wait && 2698 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 2699 req->lvl, reg, regval)) 2700 continue; 2701 2702 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 2703 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 2704 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 2705 pcifunc, 0); 2706 regval &= ~(0x7FULL << 24); 2707 regval |= ((u64)nixlf << 24); 2708 } 2709 2710 /* Clear 'BP_ENA' config, if it's not allowed */ 2711 if (!hw->cap.nix_tx_link_bp) { 2712 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 2713 (schq_regbase & 0xFF00) == 2714 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 2715 regval &= ~BIT_ULL(13); 2716 } 2717 2718 /* Mark config as done for TL1 by PF */ 2719 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 2720 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 2721 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2722 mutex_lock(&rvu->rsrc_lock); 2723 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 2724 NIX_TXSCHQ_CFG_DONE); 2725 mutex_unlock(&rvu->rsrc_lock); 2726 } 2727 2728 /* SMQ flush is special hence split register writes such 2729 * that flush first and write rest of the bits later. 2730 */ 2731 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 2732 (regval & BIT_ULL(49))) { 2733 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2734 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2735 regval &= ~BIT_ULL(49); 2736 } 2737 rvu_write64(rvu, blkaddr, reg, regval); 2738 } 2739 2740 return 0; 2741 } 2742 2743 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 2744 struct nix_vtag_config *req) 2745 { 2746 u64 regval = req->vtag_size; 2747 2748 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2749 req->vtag_size > VTAGSIZE_T8) 2750 return -EINVAL; 2751 2752 /* RX VTAG Type 7 reserved for vf vlan */ 2753 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2754 return NIX_AF_ERR_RX_VTAG_INUSE; 2755 2756 if (req->rx.capture_vtag) 2757 regval |= BIT_ULL(5); 2758 if (req->rx.strip_vtag) 2759 regval |= BIT_ULL(4); 2760 2761 rvu_write64(rvu, blkaddr, 2762 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2763 return 0; 2764 } 2765 2766 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2767 u16 pcifunc, int index) 2768 { 2769 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2770 struct nix_txvlan *vlan; 2771 2772 if (!nix_hw) 2773 return NIX_AF_ERR_INVALID_NIXBLK; 2774 2775 vlan = &nix_hw->txvlan; 2776 if (vlan->entry2pfvf_map[index] != pcifunc) 2777 return NIX_AF_ERR_PARAM; 2778 2779 rvu_write64(rvu, blkaddr, 2780 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2781 rvu_write64(rvu, blkaddr, 2782 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2783 2784 vlan->entry2pfvf_map[index] = 0; 2785 rvu_free_rsrc(&vlan->rsrc, index); 2786 2787 return 0; 2788 } 2789 2790 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2791 { 2792 struct nix_txvlan *vlan; 2793 struct nix_hw *nix_hw; 2794 int index, blkaddr; 2795 2796 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2797 if (blkaddr < 0) 2798 return; 2799 2800 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2801 if (!nix_hw) 2802 return; 2803 2804 vlan = &nix_hw->txvlan; 2805 2806 mutex_lock(&vlan->rsrc_lock); 2807 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2808 for (index = 0; index < vlan->rsrc.max; index++) { 2809 if (vlan->entry2pfvf_map[index] == pcifunc) 2810 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2811 } 2812 mutex_unlock(&vlan->rsrc_lock); 2813 } 2814 2815 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2816 u64 vtag, u8 size) 2817 { 2818 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2819 struct nix_txvlan *vlan; 2820 u64 regval; 2821 int index; 2822 2823 if (!nix_hw) 2824 return NIX_AF_ERR_INVALID_NIXBLK; 2825 2826 vlan = &nix_hw->txvlan; 2827 2828 mutex_lock(&vlan->rsrc_lock); 2829 2830 index = rvu_alloc_rsrc(&vlan->rsrc); 2831 if (index < 0) { 2832 mutex_unlock(&vlan->rsrc_lock); 2833 return index; 2834 } 2835 2836 mutex_unlock(&vlan->rsrc_lock); 2837 2838 regval = size ? vtag : vtag << 32; 2839 2840 rvu_write64(rvu, blkaddr, 2841 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 2842 rvu_write64(rvu, blkaddr, 2843 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 2844 2845 return index; 2846 } 2847 2848 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 2849 struct nix_vtag_config *req) 2850 { 2851 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2852 u16 pcifunc = req->hdr.pcifunc; 2853 int idx0 = req->tx.vtag0_idx; 2854 int idx1 = req->tx.vtag1_idx; 2855 struct nix_txvlan *vlan; 2856 int err = 0; 2857 2858 if (!nix_hw) 2859 return NIX_AF_ERR_INVALID_NIXBLK; 2860 2861 vlan = &nix_hw->txvlan; 2862 if (req->tx.free_vtag0 && req->tx.free_vtag1) 2863 if (vlan->entry2pfvf_map[idx0] != pcifunc || 2864 vlan->entry2pfvf_map[idx1] != pcifunc) 2865 return NIX_AF_ERR_PARAM; 2866 2867 mutex_lock(&vlan->rsrc_lock); 2868 2869 if (req->tx.free_vtag0) { 2870 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 2871 if (err) 2872 goto exit; 2873 } 2874 2875 if (req->tx.free_vtag1) 2876 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 2877 2878 exit: 2879 mutex_unlock(&vlan->rsrc_lock); 2880 return err; 2881 } 2882 2883 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 2884 struct nix_vtag_config *req, 2885 struct nix_vtag_config_rsp *rsp) 2886 { 2887 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2888 struct nix_txvlan *vlan; 2889 u16 pcifunc = req->hdr.pcifunc; 2890 2891 if (!nix_hw) 2892 return NIX_AF_ERR_INVALID_NIXBLK; 2893 2894 vlan = &nix_hw->txvlan; 2895 if (req->tx.cfg_vtag0) { 2896 rsp->vtag0_idx = 2897 nix_tx_vtag_alloc(rvu, blkaddr, 2898 req->tx.vtag0, req->vtag_size); 2899 2900 if (rsp->vtag0_idx < 0) 2901 return NIX_AF_ERR_TX_VTAG_NOSPC; 2902 2903 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 2904 } 2905 2906 if (req->tx.cfg_vtag1) { 2907 rsp->vtag1_idx = 2908 nix_tx_vtag_alloc(rvu, blkaddr, 2909 req->tx.vtag1, req->vtag_size); 2910 2911 if (rsp->vtag1_idx < 0) 2912 goto err_free; 2913 2914 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 2915 } 2916 2917 return 0; 2918 2919 err_free: 2920 if (req->tx.cfg_vtag0) 2921 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 2922 2923 return NIX_AF_ERR_TX_VTAG_NOSPC; 2924 } 2925 2926 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 2927 struct nix_vtag_config *req, 2928 struct nix_vtag_config_rsp *rsp) 2929 { 2930 u16 pcifunc = req->hdr.pcifunc; 2931 int blkaddr, nixlf, err; 2932 2933 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2934 if (err) 2935 return err; 2936 2937 if (req->cfg_type) { 2938 /* rx vtag configuration */ 2939 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 2940 if (err) 2941 return NIX_AF_ERR_PARAM; 2942 } else { 2943 /* tx vtag configuration */ 2944 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 2945 (req->tx.free_vtag0 || req->tx.free_vtag1)) 2946 return NIX_AF_ERR_PARAM; 2947 2948 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 2949 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 2950 2951 if (req->tx.free_vtag0 || req->tx.free_vtag1) 2952 return nix_tx_vtag_decfg(rvu, blkaddr, req); 2953 } 2954 2955 return 0; 2956 } 2957 2958 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 2959 int mce, u8 op, u16 pcifunc, int next, bool eol) 2960 { 2961 struct nix_aq_enq_req aq_req; 2962 int err; 2963 2964 aq_req.hdr.pcifunc = 0; 2965 aq_req.ctype = NIX_AQ_CTYPE_MCE; 2966 aq_req.op = op; 2967 aq_req.qidx = mce; 2968 2969 /* Use RSS with RSS index 0 */ 2970 aq_req.mce.op = 1; 2971 aq_req.mce.index = 0; 2972 aq_req.mce.eol = eol; 2973 aq_req.mce.pf_func = pcifunc; 2974 aq_req.mce.next = next; 2975 2976 /* All fields valid */ 2977 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 2978 2979 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 2980 if (err) { 2981 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 2982 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 2983 return err; 2984 } 2985 return 0; 2986 } 2987 2988 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, 2989 u16 pcifunc, bool add) 2990 { 2991 struct mce *mce, *tail = NULL; 2992 bool delete = false; 2993 2994 /* Scan through the current list */ 2995 hlist_for_each_entry(mce, &mce_list->head, node) { 2996 /* If already exists, then delete */ 2997 if (mce->pcifunc == pcifunc && !add) { 2998 delete = true; 2999 break; 3000 } else if (mce->pcifunc == pcifunc && add) { 3001 /* entry already exists */ 3002 return 0; 3003 } 3004 tail = mce; 3005 } 3006 3007 if (delete) { 3008 hlist_del(&mce->node); 3009 kfree(mce); 3010 mce_list->count--; 3011 return 0; 3012 } 3013 3014 if (!add) 3015 return 0; 3016 3017 /* Add a new one to the list, at the tail */ 3018 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 3019 if (!mce) 3020 return -ENOMEM; 3021 mce->pcifunc = pcifunc; 3022 if (!tail) 3023 hlist_add_head(&mce->node, &mce_list->head); 3024 else 3025 hlist_add_behind(&mce->node, &tail->node); 3026 mce_list->count++; 3027 return 0; 3028 } 3029 3030 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, 3031 struct nix_mce_list *mce_list, 3032 int mce_idx, int mcam_index, bool add) 3033 { 3034 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; 3035 struct npc_mcam *mcam = &rvu->hw->mcam; 3036 struct nix_mcast *mcast; 3037 struct nix_hw *nix_hw; 3038 struct mce *mce; 3039 3040 if (!mce_list) 3041 return -EINVAL; 3042 3043 /* Get this PF/VF func's MCE index */ 3044 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 3045 3046 if (idx > (mce_idx + mce_list->max)) { 3047 dev_err(rvu->dev, 3048 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 3049 __func__, idx, mce_list->max, 3050 pcifunc >> RVU_PFVF_PF_SHIFT); 3051 return -EINVAL; 3052 } 3053 3054 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 3055 if (err) 3056 return err; 3057 3058 mcast = &nix_hw->mcast; 3059 mutex_lock(&mcast->mce_lock); 3060 3061 err = nix_update_mce_list_entry(mce_list, pcifunc, add); 3062 if (err) 3063 goto end; 3064 3065 /* Disable MCAM entry in NPC */ 3066 if (!mce_list->count) { 3067 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3068 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); 3069 goto end; 3070 } 3071 3072 /* Dump the updated list to HW */ 3073 idx = mce_idx; 3074 last_idx = idx + mce_list->count - 1; 3075 hlist_for_each_entry(mce, &mce_list->head, node) { 3076 if (idx > last_idx) 3077 break; 3078 3079 next_idx = idx + 1; 3080 /* EOL should be set in last MCE */ 3081 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 3082 mce->pcifunc, next_idx, 3083 (next_idx > last_idx) ? true : false); 3084 if (err) 3085 goto end; 3086 idx++; 3087 } 3088 3089 end: 3090 mutex_unlock(&mcast->mce_lock); 3091 return err; 3092 } 3093 3094 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, 3095 struct nix_mce_list **mce_list, int *mce_idx) 3096 { 3097 struct rvu_hwinfo *hw = rvu->hw; 3098 struct rvu_pfvf *pfvf; 3099 3100 if (!hw->cap.nix_rx_multicast || 3101 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { 3102 *mce_list = NULL; 3103 *mce_idx = 0; 3104 return; 3105 } 3106 3107 /* Get this PF/VF func's MCE index */ 3108 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 3109 3110 if (type == NIXLF_BCAST_ENTRY) { 3111 *mce_list = &pfvf->bcast_mce_list; 3112 *mce_idx = pfvf->bcast_mce_idx; 3113 } else if (type == NIXLF_ALLMULTI_ENTRY) { 3114 *mce_list = &pfvf->mcast_mce_list; 3115 *mce_idx = pfvf->mcast_mce_idx; 3116 } else if (type == NIXLF_PROMISC_ENTRY) { 3117 *mce_list = &pfvf->promisc_mce_list; 3118 *mce_idx = pfvf->promisc_mce_idx; 3119 } else { 3120 *mce_list = NULL; 3121 *mce_idx = 0; 3122 } 3123 } 3124 3125 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 3126 int type, bool add) 3127 { 3128 int err = 0, nixlf, blkaddr, mcam_index, mce_idx; 3129 struct npc_mcam *mcam = &rvu->hw->mcam; 3130 struct rvu_hwinfo *hw = rvu->hw; 3131 struct nix_mce_list *mce_list; 3132 int pf; 3133 3134 /* skip multicast pkt replication for AF's VFs & SDP links */ 3135 if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc)) 3136 return 0; 3137 3138 if (!hw->cap.nix_rx_multicast) 3139 return 0; 3140 3141 pf = rvu_get_pf(pcifunc); 3142 if (!is_pf_cgxmapped(rvu, pf)) 3143 return 0; 3144 3145 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3146 if (blkaddr < 0) 3147 return -EINVAL; 3148 3149 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 3150 if (nixlf < 0) 3151 return -EINVAL; 3152 3153 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 3154 3155 mcam_index = npc_get_nixlf_mcam_index(mcam, 3156 pcifunc & ~RVU_PFVF_FUNC_MASK, 3157 nixlf, type); 3158 err = nix_update_mce_list(rvu, pcifunc, mce_list, 3159 mce_idx, mcam_index, add); 3160 return err; 3161 } 3162 3163 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) 3164 { 3165 struct nix_mcast *mcast = &nix_hw->mcast; 3166 int err, pf, numvfs, idx; 3167 struct rvu_pfvf *pfvf; 3168 u16 pcifunc; 3169 u64 cfg; 3170 3171 /* Skip PF0 (i.e AF) */ 3172 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 3173 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 3174 /* If PF is not enabled, nothing to do */ 3175 if (!((cfg >> 20) & 0x01)) 3176 continue; 3177 /* Get numVFs attached to this PF */ 3178 numvfs = (cfg >> 12) & 0xFF; 3179 3180 pfvf = &rvu->pf[pf]; 3181 3182 /* This NIX0/1 block mapped to PF ? */ 3183 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 3184 continue; 3185 3186 /* save start idx of broadcast mce list */ 3187 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 3188 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 3189 3190 /* save start idx of multicast mce list */ 3191 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 3192 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); 3193 3194 /* save the start idx of promisc mce list */ 3195 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 3196 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); 3197 3198 for (idx = 0; idx < (numvfs + 1); idx++) { 3199 /* idx-0 is for PF, followed by VFs */ 3200 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 3201 pcifunc |= idx; 3202 /* Add dummy entries now, so that we don't have to check 3203 * for whether AQ_OP should be INIT/WRITE later on. 3204 * Will be updated when a NIXLF is attached/detached to 3205 * these PF/VFs. 3206 */ 3207 err = nix_blk_setup_mce(rvu, nix_hw, 3208 pfvf->bcast_mce_idx + idx, 3209 NIX_AQ_INSTOP_INIT, 3210 pcifunc, 0, true); 3211 if (err) 3212 return err; 3213 3214 /* add dummy entries to multicast mce list */ 3215 err = nix_blk_setup_mce(rvu, nix_hw, 3216 pfvf->mcast_mce_idx + idx, 3217 NIX_AQ_INSTOP_INIT, 3218 pcifunc, 0, true); 3219 if (err) 3220 return err; 3221 3222 /* add dummy entries to promisc mce list */ 3223 err = nix_blk_setup_mce(rvu, nix_hw, 3224 pfvf->promisc_mce_idx + idx, 3225 NIX_AQ_INSTOP_INIT, 3226 pcifunc, 0, true); 3227 if (err) 3228 return err; 3229 } 3230 } 3231 return 0; 3232 } 3233 3234 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3235 { 3236 struct nix_mcast *mcast = &nix_hw->mcast; 3237 struct rvu_hwinfo *hw = rvu->hw; 3238 int err, size; 3239 3240 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 3241 size = (1ULL << size); 3242 3243 /* Alloc memory for multicast/mirror replication entries */ 3244 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 3245 (256UL << MC_TBL_SIZE), size); 3246 if (err) 3247 return -ENOMEM; 3248 3249 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 3250 (u64)mcast->mce_ctx->iova); 3251 3252 /* Set max list length equal to max no of VFs per PF + PF itself */ 3253 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 3254 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 3255 3256 /* Alloc memory for multicast replication buffers */ 3257 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 3258 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 3259 (8UL << MC_BUF_CNT), size); 3260 if (err) 3261 return -ENOMEM; 3262 3263 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 3264 (u64)mcast->mcast_buf->iova); 3265 3266 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 3267 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 3268 3269 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 3270 BIT_ULL(63) | (mcast->replay_pkind << 24) | 3271 BIT_ULL(20) | MC_BUF_CNT); 3272 3273 mutex_init(&mcast->mce_lock); 3274 3275 return nix_setup_mce_tables(rvu, nix_hw); 3276 } 3277 3278 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 3279 { 3280 struct nix_txvlan *vlan = &nix_hw->txvlan; 3281 int err; 3282 3283 /* Allocate resource bimap for tx vtag def registers*/ 3284 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 3285 err = rvu_alloc_bitmap(&vlan->rsrc); 3286 if (err) 3287 return -ENOMEM; 3288 3289 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 3290 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 3291 sizeof(u16), GFP_KERNEL); 3292 if (!vlan->entry2pfvf_map) 3293 goto free_mem; 3294 3295 mutex_init(&vlan->rsrc_lock); 3296 return 0; 3297 3298 free_mem: 3299 kfree(vlan->rsrc.bmap); 3300 return -ENOMEM; 3301 } 3302 3303 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3304 { 3305 struct nix_txsch *txsch; 3306 int err, lvl, schq; 3307 u64 cfg, reg; 3308 3309 /* Get scheduler queue count of each type and alloc 3310 * bitmap for each for alloc/free/attach operations. 3311 */ 3312 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3313 txsch = &nix_hw->txsch[lvl]; 3314 txsch->lvl = lvl; 3315 switch (lvl) { 3316 case NIX_TXSCH_LVL_SMQ: 3317 reg = NIX_AF_MDQ_CONST; 3318 break; 3319 case NIX_TXSCH_LVL_TL4: 3320 reg = NIX_AF_TL4_CONST; 3321 break; 3322 case NIX_TXSCH_LVL_TL3: 3323 reg = NIX_AF_TL3_CONST; 3324 break; 3325 case NIX_TXSCH_LVL_TL2: 3326 reg = NIX_AF_TL2_CONST; 3327 break; 3328 case NIX_TXSCH_LVL_TL1: 3329 reg = NIX_AF_TL1_CONST; 3330 break; 3331 } 3332 cfg = rvu_read64(rvu, blkaddr, reg); 3333 txsch->schq.max = cfg & 0xFFFF; 3334 err = rvu_alloc_bitmap(&txsch->schq); 3335 if (err) 3336 return err; 3337 3338 /* Allocate memory for scheduler queues to 3339 * PF/VF pcifunc mapping info. 3340 */ 3341 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 3342 sizeof(u32), GFP_KERNEL); 3343 if (!txsch->pfvf_map) 3344 return -ENOMEM; 3345 for (schq = 0; schq < txsch->schq.max; schq++) 3346 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 3347 } 3348 3349 /* Setup a default value of 8192 as DWRR MTU */ 3350 if (rvu->hw->cap.nix_common_dwrr_mtu || 3351 rvu->hw->cap.nix_multiple_dwrr_mtu) { 3352 rvu_write64(rvu, blkaddr, 3353 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), 3354 convert_bytes_to_dwrr_mtu(8192)); 3355 rvu_write64(rvu, blkaddr, 3356 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK), 3357 convert_bytes_to_dwrr_mtu(8192)); 3358 rvu_write64(rvu, blkaddr, 3359 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP), 3360 convert_bytes_to_dwrr_mtu(8192)); 3361 } 3362 3363 return 0; 3364 } 3365 3366 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 3367 int blkaddr, u32 cfg) 3368 { 3369 int fmt_idx; 3370 3371 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 3372 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 3373 return fmt_idx; 3374 } 3375 if (fmt_idx >= nix_hw->mark_format.total) 3376 return -ERANGE; 3377 3378 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 3379 nix_hw->mark_format.cfg[fmt_idx] = cfg; 3380 nix_hw->mark_format.in_use++; 3381 return fmt_idx; 3382 } 3383 3384 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 3385 int blkaddr) 3386 { 3387 u64 cfgs[] = { 3388 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 3389 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 3390 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 3391 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 3392 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 3393 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 3394 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 3395 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 3396 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 3397 }; 3398 int i, rc; 3399 u64 total; 3400 3401 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 3402 nix_hw->mark_format.total = (u8)total; 3403 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 3404 GFP_KERNEL); 3405 if (!nix_hw->mark_format.cfg) 3406 return -ENOMEM; 3407 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 3408 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 3409 if (rc < 0) 3410 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 3411 i, rc); 3412 } 3413 3414 return 0; 3415 } 3416 3417 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3418 { 3419 /* CN10K supports LBK FIFO size 72 KB */ 3420 if (rvu->hw->lbk_bufsize == 0x12000) 3421 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 3422 else 3423 *max_mtu = NIC_HW_MAX_FRS; 3424 } 3425 3426 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3427 { 3428 int fifo_size = rvu_cgx_get_fifolen(rvu); 3429 3430 /* RPM supports FIFO len 128 KB and RPM2 supports double the 3431 * FIFO len to accommodate 8 LMACS 3432 */ 3433 if (fifo_size == 0x20000 || fifo_size == 0x40000) 3434 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 3435 else 3436 *max_mtu = NIC_HW_MAX_FRS; 3437 } 3438 3439 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 3440 struct nix_hw_info *rsp) 3441 { 3442 u16 pcifunc = req->hdr.pcifunc; 3443 u64 dwrr_mtu; 3444 int blkaddr; 3445 3446 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3447 if (blkaddr < 0) 3448 return NIX_AF_ERR_AF_LF_INVALID; 3449 3450 if (is_afvf(pcifunc)) 3451 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 3452 else 3453 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 3454 3455 rsp->min_mtu = NIC_HW_MIN_FRS; 3456 3457 if (!rvu->hw->cap.nix_common_dwrr_mtu && 3458 !rvu->hw->cap.nix_multiple_dwrr_mtu) { 3459 /* Return '1' on OTx2 */ 3460 rsp->rpm_dwrr_mtu = 1; 3461 rsp->sdp_dwrr_mtu = 1; 3462 rsp->lbk_dwrr_mtu = 1; 3463 return 0; 3464 } 3465 3466 /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */ 3467 dwrr_mtu = rvu_read64(rvu, blkaddr, 3468 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM)); 3469 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3470 3471 dwrr_mtu = rvu_read64(rvu, blkaddr, 3472 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP)); 3473 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3474 3475 dwrr_mtu = rvu_read64(rvu, blkaddr, 3476 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK)); 3477 rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3478 3479 return 0; 3480 } 3481 3482 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 3483 struct msg_rsp *rsp) 3484 { 3485 u16 pcifunc = req->hdr.pcifunc; 3486 int i, nixlf, blkaddr, err; 3487 u64 stats; 3488 3489 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3490 if (err) 3491 return err; 3492 3493 /* Get stats count supported by HW */ 3494 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 3495 3496 /* Reset tx stats */ 3497 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 3498 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 3499 3500 /* Reset rx stats */ 3501 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 3502 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 3503 3504 return 0; 3505 } 3506 3507 /* Returns the ALG index to be set into NPC_RX_ACTION */ 3508 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 3509 { 3510 int i; 3511 3512 /* Scan over exiting algo entries to find a match */ 3513 for (i = 0; i < nix_hw->flowkey.in_use; i++) 3514 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 3515 return i; 3516 3517 return -ERANGE; 3518 } 3519 3520 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 3521 { 3522 int idx, nr_field, key_off, field_marker, keyoff_marker; 3523 int max_key_off, max_bit_pos, group_member; 3524 struct nix_rx_flowkey_alg *field; 3525 struct nix_rx_flowkey_alg tmp; 3526 u32 key_type, valid_key; 3527 u32 l3_l4_src_dst; 3528 int l4_key_offset = 0; 3529 3530 if (!alg) 3531 return -EINVAL; 3532 3533 #define FIELDS_PER_ALG 5 3534 #define MAX_KEY_OFF 40 3535 /* Clear all fields */ 3536 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 3537 3538 /* Each of the 32 possible flow key algorithm definitions should 3539 * fall into above incremental config (except ALG0). Otherwise a 3540 * single NPC MCAM entry is not sufficient for supporting RSS. 3541 * 3542 * If a different definition or combination needed then NPC MCAM 3543 * has to be programmed to filter such pkts and it's action should 3544 * point to this definition to calculate flowtag or hash. 3545 * 3546 * The `for loop` goes over _all_ protocol field and the following 3547 * variables depicts the state machine forward progress logic. 3548 * 3549 * keyoff_marker - Enabled when hash byte length needs to be accounted 3550 * in field->key_offset update. 3551 * field_marker - Enabled when a new field needs to be selected. 3552 * group_member - Enabled when protocol is part of a group. 3553 */ 3554 3555 /* Last 4 bits (31:28) are reserved to specify SRC, DST 3556 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST, 3557 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST 3558 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST 3559 */ 3560 l3_l4_src_dst = flow_cfg; 3561 /* Reset these 4 bits, so that these won't be part of key */ 3562 flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK; 3563 3564 keyoff_marker = 0; max_key_off = 0; group_member = 0; 3565 nr_field = 0; key_off = 0; field_marker = 1; 3566 field = &tmp; max_bit_pos = fls(flow_cfg); 3567 for (idx = 0; 3568 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 3569 key_off < MAX_KEY_OFF; idx++) { 3570 key_type = BIT(idx); 3571 valid_key = flow_cfg & key_type; 3572 /* Found a field marker, reset the field values */ 3573 if (field_marker) 3574 memset(&tmp, 0, sizeof(tmp)); 3575 3576 field_marker = true; 3577 keyoff_marker = true; 3578 switch (key_type) { 3579 case NIX_FLOW_KEY_TYPE_PORT: 3580 field->sel_chan = true; 3581 /* This should be set to 1, when SEL_CHAN is set */ 3582 field->bytesm1 = 1; 3583 break; 3584 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 3585 field->lid = NPC_LID_LC; 3586 field->hdr_offset = 9; /* offset */ 3587 field->bytesm1 = 0; /* 1 byte */ 3588 field->ltype_match = NPC_LT_LC_IP; 3589 field->ltype_mask = 0xF; 3590 break; 3591 case NIX_FLOW_KEY_TYPE_IPV4: 3592 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 3593 field->lid = NPC_LID_LC; 3594 field->ltype_match = NPC_LT_LC_IP; 3595 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 3596 field->lid = NPC_LID_LG; 3597 field->ltype_match = NPC_LT_LG_TU_IP; 3598 } 3599 field->hdr_offset = 12; /* SIP offset */ 3600 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 3601 3602 /* Only SIP */ 3603 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 3604 field->bytesm1 = 3; /* SIP, 4 bytes */ 3605 3606 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 3607 /* Both SIP + DIP */ 3608 if (field->bytesm1 == 3) { 3609 field->bytesm1 = 7; /* SIP + DIP, 8B */ 3610 } else { 3611 /* Only DIP */ 3612 field->hdr_offset = 16; /* DIP off */ 3613 field->bytesm1 = 3; /* DIP, 4 bytes */ 3614 } 3615 } 3616 3617 field->ltype_mask = 0xF; /* Match only IPv4 */ 3618 keyoff_marker = false; 3619 break; 3620 case NIX_FLOW_KEY_TYPE_IPV6: 3621 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 3622 field->lid = NPC_LID_LC; 3623 field->ltype_match = NPC_LT_LC_IP6; 3624 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 3625 field->lid = NPC_LID_LG; 3626 field->ltype_match = NPC_LT_LG_TU_IP6; 3627 } 3628 field->hdr_offset = 8; /* SIP offset */ 3629 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 3630 3631 /* Only SIP */ 3632 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 3633 field->bytesm1 = 15; /* SIP, 16 bytes */ 3634 3635 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 3636 /* Both SIP + DIP */ 3637 if (field->bytesm1 == 15) { 3638 /* SIP + DIP, 32 bytes */ 3639 field->bytesm1 = 31; 3640 } else { 3641 /* Only DIP */ 3642 field->hdr_offset = 24; /* DIP off */ 3643 field->bytesm1 = 15; /* DIP,16 bytes */ 3644 } 3645 } 3646 field->ltype_mask = 0xF; /* Match only IPv6 */ 3647 break; 3648 case NIX_FLOW_KEY_TYPE_TCP: 3649 case NIX_FLOW_KEY_TYPE_UDP: 3650 case NIX_FLOW_KEY_TYPE_SCTP: 3651 case NIX_FLOW_KEY_TYPE_INNR_TCP: 3652 case NIX_FLOW_KEY_TYPE_INNR_UDP: 3653 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 3654 field->lid = NPC_LID_LD; 3655 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 3656 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 3657 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 3658 field->lid = NPC_LID_LH; 3659 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 3660 3661 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY) 3662 field->bytesm1 = 1; /* SRC, 2 bytes */ 3663 3664 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) { 3665 /* Both SRC + DST */ 3666 if (field->bytesm1 == 1) { 3667 /* SRC + DST, 4 bytes */ 3668 field->bytesm1 = 3; 3669 } else { 3670 /* Only DIP */ 3671 field->hdr_offset = 2; /* DST off */ 3672 field->bytesm1 = 1; /* DST, 2 bytes */ 3673 } 3674 } 3675 3676 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 3677 * so no need to change the ltype_match, just change 3678 * the lid for inner protocols 3679 */ 3680 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 3681 (int)NPC_LT_LH_TU_TCP); 3682 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 3683 (int)NPC_LT_LH_TU_UDP); 3684 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 3685 (int)NPC_LT_LH_TU_SCTP); 3686 3687 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 3688 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 3689 valid_key) { 3690 field->ltype_match |= NPC_LT_LD_TCP; 3691 group_member = true; 3692 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 3693 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 3694 valid_key) { 3695 field->ltype_match |= NPC_LT_LD_UDP; 3696 group_member = true; 3697 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 3698 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 3699 valid_key) { 3700 field->ltype_match |= NPC_LT_LD_SCTP; 3701 group_member = true; 3702 } 3703 field->ltype_mask = ~field->ltype_match; 3704 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 3705 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 3706 /* Handle the case where any of the group item 3707 * is enabled in the group but not the final one 3708 */ 3709 if (group_member) { 3710 valid_key = true; 3711 group_member = false; 3712 } 3713 } else { 3714 field_marker = false; 3715 keyoff_marker = false; 3716 } 3717 3718 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 3719 * remember the TCP key offset of 40 byte hash key. 3720 */ 3721 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 3722 l4_key_offset = key_off; 3723 break; 3724 case NIX_FLOW_KEY_TYPE_NVGRE: 3725 field->lid = NPC_LID_LD; 3726 field->hdr_offset = 4; /* VSID offset */ 3727 field->bytesm1 = 2; 3728 field->ltype_match = NPC_LT_LD_NVGRE; 3729 field->ltype_mask = 0xF; 3730 break; 3731 case NIX_FLOW_KEY_TYPE_VXLAN: 3732 case NIX_FLOW_KEY_TYPE_GENEVE: 3733 field->lid = NPC_LID_LE; 3734 field->bytesm1 = 2; 3735 field->hdr_offset = 4; 3736 field->ltype_mask = 0xF; 3737 field_marker = false; 3738 keyoff_marker = false; 3739 3740 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 3741 field->ltype_match |= NPC_LT_LE_VXLAN; 3742 group_member = true; 3743 } 3744 3745 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 3746 field->ltype_match |= NPC_LT_LE_GENEVE; 3747 group_member = true; 3748 } 3749 3750 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 3751 if (group_member) { 3752 field->ltype_mask = ~field->ltype_match; 3753 field_marker = true; 3754 keyoff_marker = true; 3755 valid_key = true; 3756 group_member = false; 3757 } 3758 } 3759 break; 3760 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 3761 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 3762 field->lid = NPC_LID_LA; 3763 field->ltype_match = NPC_LT_LA_ETHER; 3764 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 3765 field->lid = NPC_LID_LF; 3766 field->ltype_match = NPC_LT_LF_TU_ETHER; 3767 } 3768 field->hdr_offset = 0; 3769 field->bytesm1 = 5; /* DMAC 6 Byte */ 3770 field->ltype_mask = 0xF; 3771 break; 3772 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 3773 field->lid = NPC_LID_LC; 3774 field->hdr_offset = 40; /* IPV6 hdr */ 3775 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 3776 field->ltype_match = NPC_LT_LC_IP6_EXT; 3777 field->ltype_mask = 0xF; 3778 break; 3779 case NIX_FLOW_KEY_TYPE_GTPU: 3780 field->lid = NPC_LID_LE; 3781 field->hdr_offset = 4; 3782 field->bytesm1 = 3; /* 4 bytes TID*/ 3783 field->ltype_match = NPC_LT_LE_GTPU; 3784 field->ltype_mask = 0xF; 3785 break; 3786 case NIX_FLOW_KEY_TYPE_VLAN: 3787 field->lid = NPC_LID_LB; 3788 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 3789 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 3790 field->ltype_match = NPC_LT_LB_CTAG; 3791 field->ltype_mask = 0xF; 3792 field->fn_mask = 1; /* Mask out the first nibble */ 3793 break; 3794 case NIX_FLOW_KEY_TYPE_AH: 3795 case NIX_FLOW_KEY_TYPE_ESP: 3796 field->hdr_offset = 0; 3797 field->bytesm1 = 7; /* SPI + sequence number */ 3798 field->ltype_mask = 0xF; 3799 field->lid = NPC_LID_LE; 3800 field->ltype_match = NPC_LT_LE_ESP; 3801 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 3802 field->lid = NPC_LID_LD; 3803 field->ltype_match = NPC_LT_LD_AH; 3804 field->hdr_offset = 4; 3805 keyoff_marker = false; 3806 } 3807 break; 3808 } 3809 field->ena = 1; 3810 3811 /* Found a valid flow key type */ 3812 if (valid_key) { 3813 /* Use the key offset of TCP/UDP/SCTP fields 3814 * for ESP/AH fields. 3815 */ 3816 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 3817 key_type == NIX_FLOW_KEY_TYPE_AH) 3818 key_off = l4_key_offset; 3819 field->key_offset = key_off; 3820 memcpy(&alg[nr_field], field, sizeof(*field)); 3821 max_key_off = max(max_key_off, field->bytesm1 + 1); 3822 3823 /* Found a field marker, get the next field */ 3824 if (field_marker) 3825 nr_field++; 3826 } 3827 3828 /* Found a keyoff marker, update the new key_off */ 3829 if (keyoff_marker) { 3830 key_off += max_key_off; 3831 max_key_off = 0; 3832 } 3833 } 3834 /* Processed all the flow key types */ 3835 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 3836 return 0; 3837 else 3838 return NIX_AF_ERR_RSS_NOSPC_FIELD; 3839 } 3840 3841 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 3842 { 3843 u64 field[FIELDS_PER_ALG]; 3844 struct nix_hw *hw; 3845 int fid, rc; 3846 3847 hw = get_nix_hw(rvu->hw, blkaddr); 3848 if (!hw) 3849 return NIX_AF_ERR_INVALID_NIXBLK; 3850 3851 /* No room to add new flow hash algoritham */ 3852 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 3853 return NIX_AF_ERR_RSS_NOSPC_ALGO; 3854 3855 /* Generate algo fields for the given flow_cfg */ 3856 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 3857 if (rc) 3858 return rc; 3859 3860 /* Update ALGX_FIELDX register with generated fields */ 3861 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3862 rvu_write64(rvu, blkaddr, 3863 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 3864 fid), field[fid]); 3865 3866 /* Store the flow_cfg for futher lookup */ 3867 rc = hw->flowkey.in_use; 3868 hw->flowkey.flowkey[rc] = flow_cfg; 3869 hw->flowkey.in_use++; 3870 3871 return rc; 3872 } 3873 3874 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 3875 struct nix_rss_flowkey_cfg *req, 3876 struct nix_rss_flowkey_cfg_rsp *rsp) 3877 { 3878 u16 pcifunc = req->hdr.pcifunc; 3879 int alg_idx, nixlf, blkaddr; 3880 struct nix_hw *nix_hw; 3881 int err; 3882 3883 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3884 if (err) 3885 return err; 3886 3887 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3888 if (!nix_hw) 3889 return NIX_AF_ERR_INVALID_NIXBLK; 3890 3891 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 3892 /* Failed to get algo index from the exiting list, reserve new */ 3893 if (alg_idx < 0) { 3894 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 3895 req->flowkey_cfg); 3896 if (alg_idx < 0) 3897 return alg_idx; 3898 } 3899 rsp->alg_idx = alg_idx; 3900 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 3901 alg_idx, req->mcam_index); 3902 return 0; 3903 } 3904 3905 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 3906 { 3907 u32 flowkey_cfg, minkey_cfg; 3908 int alg, fid, rc; 3909 3910 /* Disable all flow key algx fieldx */ 3911 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 3912 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3913 rvu_write64(rvu, blkaddr, 3914 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 3915 0); 3916 } 3917 3918 /* IPv4/IPv6 SIP/DIPs */ 3919 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 3920 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3921 if (rc < 0) 3922 return rc; 3923 3924 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3925 minkey_cfg = flowkey_cfg; 3926 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 3927 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3928 if (rc < 0) 3929 return rc; 3930 3931 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3932 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 3933 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3934 if (rc < 0) 3935 return rc; 3936 3937 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3938 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 3939 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3940 if (rc < 0) 3941 return rc; 3942 3943 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3944 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3945 NIX_FLOW_KEY_TYPE_UDP; 3946 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3947 if (rc < 0) 3948 return rc; 3949 3950 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3951 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3952 NIX_FLOW_KEY_TYPE_SCTP; 3953 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3954 if (rc < 0) 3955 return rc; 3956 3957 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3958 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 3959 NIX_FLOW_KEY_TYPE_SCTP; 3960 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3961 if (rc < 0) 3962 return rc; 3963 3964 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3965 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3966 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 3967 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3968 if (rc < 0) 3969 return rc; 3970 3971 return 0; 3972 } 3973 3974 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 3975 struct nix_set_mac_addr *req, 3976 struct msg_rsp *rsp) 3977 { 3978 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 3979 u16 pcifunc = req->hdr.pcifunc; 3980 int blkaddr, nixlf, err; 3981 struct rvu_pfvf *pfvf; 3982 3983 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3984 if (err) 3985 return err; 3986 3987 pfvf = rvu_get_pfvf(rvu, pcifunc); 3988 3989 /* untrusted VF can't overwrite admin(PF) changes */ 3990 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 3991 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { 3992 dev_warn(rvu->dev, 3993 "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); 3994 return -EPERM; 3995 } 3996 3997 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 3998 3999 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 4000 pfvf->rx_chan_base, req->mac_addr); 4001 4002 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) 4003 ether_addr_copy(pfvf->default_mac, req->mac_addr); 4004 4005 rvu_switch_update_rules(rvu, pcifunc); 4006 4007 return 0; 4008 } 4009 4010 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 4011 struct msg_req *req, 4012 struct nix_get_mac_addr_rsp *rsp) 4013 { 4014 u16 pcifunc = req->hdr.pcifunc; 4015 struct rvu_pfvf *pfvf; 4016 4017 if (!is_nixlf_attached(rvu, pcifunc)) 4018 return NIX_AF_ERR_AF_LF_INVALID; 4019 4020 pfvf = rvu_get_pfvf(rvu, pcifunc); 4021 4022 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 4023 4024 return 0; 4025 } 4026 4027 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 4028 struct msg_rsp *rsp) 4029 { 4030 bool allmulti, promisc, nix_rx_multicast; 4031 u16 pcifunc = req->hdr.pcifunc; 4032 struct rvu_pfvf *pfvf; 4033 int nixlf, err; 4034 4035 pfvf = rvu_get_pfvf(rvu, pcifunc); 4036 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; 4037 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; 4038 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; 4039 4040 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; 4041 4042 if (is_vf(pcifunc) && !nix_rx_multicast && 4043 (promisc || allmulti)) { 4044 dev_warn_ratelimited(rvu->dev, 4045 "VF promisc/multicast not supported\n"); 4046 return 0; 4047 } 4048 4049 /* untrusted VF can't configure promisc/allmulti */ 4050 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 4051 (promisc || allmulti)) 4052 return 0; 4053 4054 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4055 if (err) 4056 return err; 4057 4058 if (nix_rx_multicast) { 4059 /* add/del this PF_FUNC to/from mcast pkt replication list */ 4060 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, 4061 allmulti); 4062 if (err) { 4063 dev_err(rvu->dev, 4064 "Failed to update pcifunc 0x%x to multicast list\n", 4065 pcifunc); 4066 return err; 4067 } 4068 4069 /* add/del this PF_FUNC to/from promisc pkt replication list */ 4070 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, 4071 promisc); 4072 if (err) { 4073 dev_err(rvu->dev, 4074 "Failed to update pcifunc 0x%x to promisc list\n", 4075 pcifunc); 4076 return err; 4077 } 4078 } 4079 4080 /* install/uninstall allmulti entry */ 4081 if (allmulti) { 4082 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, 4083 pfvf->rx_chan_base); 4084 } else { 4085 if (!nix_rx_multicast) 4086 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); 4087 } 4088 4089 /* install/uninstall promisc entry */ 4090 if (promisc) 4091 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 4092 pfvf->rx_chan_base, 4093 pfvf->rx_chan_cnt); 4094 else 4095 if (!nix_rx_multicast) 4096 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 4097 4098 return 0; 4099 } 4100 4101 static void nix_find_link_frs(struct rvu *rvu, 4102 struct nix_frs_cfg *req, u16 pcifunc) 4103 { 4104 int pf = rvu_get_pf(pcifunc); 4105 struct rvu_pfvf *pfvf; 4106 int maxlen, minlen; 4107 int numvfs, hwvf; 4108 int vf; 4109 4110 /* Update with requester's min/max lengths */ 4111 pfvf = rvu_get_pfvf(rvu, pcifunc); 4112 pfvf->maxlen = req->maxlen; 4113 if (req->update_minlen) 4114 pfvf->minlen = req->minlen; 4115 4116 maxlen = req->maxlen; 4117 minlen = req->update_minlen ? req->minlen : 0; 4118 4119 /* Get this PF's numVFs and starting hwvf */ 4120 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 4121 4122 /* For each VF, compare requested max/minlen */ 4123 for (vf = 0; vf < numvfs; vf++) { 4124 pfvf = &rvu->hwvf[hwvf + vf]; 4125 if (pfvf->maxlen > maxlen) 4126 maxlen = pfvf->maxlen; 4127 if (req->update_minlen && 4128 pfvf->minlen && pfvf->minlen < minlen) 4129 minlen = pfvf->minlen; 4130 } 4131 4132 /* Compare requested max/minlen with PF's max/minlen */ 4133 pfvf = &rvu->pf[pf]; 4134 if (pfvf->maxlen > maxlen) 4135 maxlen = pfvf->maxlen; 4136 if (req->update_minlen && 4137 pfvf->minlen && pfvf->minlen < minlen) 4138 minlen = pfvf->minlen; 4139 4140 /* Update the request with max/min PF's and it's VF's max/min */ 4141 req->maxlen = maxlen; 4142 if (req->update_minlen) 4143 req->minlen = minlen; 4144 } 4145 4146 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 4147 struct msg_rsp *rsp) 4148 { 4149 struct rvu_hwinfo *hw = rvu->hw; 4150 u16 pcifunc = req->hdr.pcifunc; 4151 int pf = rvu_get_pf(pcifunc); 4152 int blkaddr, link = -1; 4153 struct nix_hw *nix_hw; 4154 struct rvu_pfvf *pfvf; 4155 u8 cgx = 0, lmac = 0; 4156 u16 max_mtu; 4157 u64 cfg; 4158 4159 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4160 if (blkaddr < 0) 4161 return NIX_AF_ERR_AF_LF_INVALID; 4162 4163 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4164 if (!nix_hw) 4165 return NIX_AF_ERR_INVALID_NIXBLK; 4166 4167 if (is_afvf(pcifunc)) 4168 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 4169 else 4170 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 4171 4172 if (!req->sdp_link && req->maxlen > max_mtu) 4173 return NIX_AF_ERR_FRS_INVALID; 4174 4175 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 4176 return NIX_AF_ERR_FRS_INVALID; 4177 4178 /* Check if config is for SDP link */ 4179 if (req->sdp_link) { 4180 if (!hw->sdp_links) 4181 return NIX_AF_ERR_RX_LINK_INVALID; 4182 link = hw->cgx_links + hw->lbk_links; 4183 goto linkcfg; 4184 } 4185 4186 /* Check if the request is from CGX mapped RVU PF */ 4187 if (is_pf_cgxmapped(rvu, pf)) { 4188 /* Get CGX and LMAC to which this PF is mapped and find link */ 4189 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 4190 link = (cgx * hw->lmac_per_cgx) + lmac; 4191 } else if (pf == 0) { 4192 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 4193 pfvf = rvu_get_pfvf(rvu, pcifunc); 4194 link = hw->cgx_links + pfvf->lbkid; 4195 } 4196 4197 if (link < 0) 4198 return NIX_AF_ERR_RX_LINK_INVALID; 4199 4200 linkcfg: 4201 nix_find_link_frs(rvu, req, pcifunc); 4202 4203 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 4204 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 4205 if (req->update_minlen) 4206 cfg = (cfg & ~0xFFFFULL) | req->minlen; 4207 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 4208 4209 return 0; 4210 } 4211 4212 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 4213 struct msg_rsp *rsp) 4214 { 4215 int nixlf, blkaddr, err; 4216 u64 cfg; 4217 4218 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 4219 if (err) 4220 return err; 4221 4222 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 4223 /* Set the interface configuration */ 4224 if (req->len_verify & BIT(0)) 4225 cfg |= BIT_ULL(41); 4226 else 4227 cfg &= ~BIT_ULL(41); 4228 4229 if (req->len_verify & BIT(1)) 4230 cfg |= BIT_ULL(40); 4231 else 4232 cfg &= ~BIT_ULL(40); 4233 4234 if (req->len_verify & NIX_RX_DROP_RE) 4235 cfg |= BIT_ULL(32); 4236 else 4237 cfg &= ~BIT_ULL(32); 4238 4239 if (req->csum_verify & BIT(0)) 4240 cfg |= BIT_ULL(37); 4241 else 4242 cfg &= ~BIT_ULL(37); 4243 4244 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 4245 4246 return 0; 4247 } 4248 4249 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 4250 { 4251 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 4252 } 4253 4254 static void nix_link_config(struct rvu *rvu, int blkaddr, 4255 struct nix_hw *nix_hw) 4256 { 4257 struct rvu_hwinfo *hw = rvu->hw; 4258 int cgx, lmac_cnt, slink, link; 4259 u16 lbk_max_frs, lmac_max_frs; 4260 unsigned long lmac_bmap; 4261 u64 tx_credits, cfg; 4262 u64 lmac_fifo_len; 4263 int iter; 4264 4265 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 4266 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 4267 4268 /* Set default min/max packet lengths allowed on NIX Rx links. 4269 * 4270 * With HW reset minlen value of 60byte, HW will treat ARP pkts 4271 * as undersize and report them to SW as error pkts, hence 4272 * setting it to 40 bytes. 4273 */ 4274 for (link = 0; link < hw->cgx_links; link++) { 4275 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4276 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 4277 } 4278 4279 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 4280 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4281 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 4282 } 4283 if (hw->sdp_links) { 4284 link = hw->cgx_links + hw->lbk_links; 4285 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4286 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 4287 } 4288 4289 /* Get MCS external bypass status for CN10K-B */ 4290 if (mcs_get_blkcnt() == 1) { 4291 /* Adjust for 2 credits when external bypass is disabled */ 4292 nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2; 4293 } 4294 4295 /* Set credits for Tx links assuming max packet length allowed. 4296 * This will be reconfigured based on MTU set for PF/VF. 4297 */ 4298 for (cgx = 0; cgx < hw->cgx; cgx++) { 4299 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 4300 /* Skip when cgx is not available or lmac cnt is zero */ 4301 if (lmac_cnt <= 0) 4302 continue; 4303 slink = cgx * hw->lmac_per_cgx; 4304 4305 /* Get LMAC id's from bitmap */ 4306 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); 4307 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { 4308 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter); 4309 if (!lmac_fifo_len) { 4310 dev_err(rvu->dev, 4311 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", 4312 __func__, cgx, iter); 4313 continue; 4314 } 4315 tx_credits = (lmac_fifo_len - lmac_max_frs) / 16; 4316 /* Enable credits and set credit pkt count to max allowed */ 4317 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4318 cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt); 4319 4320 link = iter + slink; 4321 nix_hw->tx_credits[link] = tx_credits; 4322 rvu_write64(rvu, blkaddr, 4323 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 4324 } 4325 } 4326 4327 /* Set Tx credits for LBK link */ 4328 slink = hw->cgx_links; 4329 for (link = slink; link < (slink + hw->lbk_links); link++) { 4330 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 4331 nix_hw->tx_credits[link] = tx_credits; 4332 /* Enable credits and set credit pkt count to max allowed */ 4333 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4334 rvu_write64(rvu, blkaddr, 4335 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 4336 } 4337 } 4338 4339 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 4340 { 4341 int idx, err; 4342 u64 status; 4343 4344 /* Start X2P bus calibration */ 4345 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4346 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 4347 /* Wait for calibration to complete */ 4348 err = rvu_poll_reg(rvu, blkaddr, 4349 NIX_AF_STATUS, BIT_ULL(10), false); 4350 if (err) { 4351 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 4352 return err; 4353 } 4354 4355 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 4356 /* Check if CGX devices are ready */ 4357 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 4358 /* Skip when cgx port is not available */ 4359 if (!rvu_cgx_pdata(idx, rvu) || 4360 (status & (BIT_ULL(16 + idx)))) 4361 continue; 4362 dev_err(rvu->dev, 4363 "CGX%d didn't respond to NIX X2P calibration\n", idx); 4364 err = -EBUSY; 4365 } 4366 4367 /* Check if LBK is ready */ 4368 if (!(status & BIT_ULL(19))) { 4369 dev_err(rvu->dev, 4370 "LBK didn't respond to NIX X2P calibration\n"); 4371 err = -EBUSY; 4372 } 4373 4374 /* Clear 'calibrate_x2p' bit */ 4375 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4376 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 4377 if (err || (status & 0x3FFULL)) 4378 dev_err(rvu->dev, 4379 "NIX X2P calibration failed, status 0x%llx\n", status); 4380 if (err) 4381 return err; 4382 return 0; 4383 } 4384 4385 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 4386 { 4387 u64 cfg; 4388 int err; 4389 4390 /* Set admin queue endianness */ 4391 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 4392 #ifdef __BIG_ENDIAN 4393 cfg |= BIT_ULL(8); 4394 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4395 #else 4396 cfg &= ~BIT_ULL(8); 4397 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4398 #endif 4399 4400 /* Do not bypass NDC cache */ 4401 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 4402 cfg &= ~0x3FFEULL; 4403 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 4404 /* Disable caching of SQB aka SQEs */ 4405 cfg |= 0x04ULL; 4406 #endif 4407 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 4408 4409 /* Result structure can be followed by RQ/SQ/CQ context at 4410 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 4411 * operation type. Alloc sufficient result memory for all operations. 4412 */ 4413 err = rvu_aq_alloc(rvu, &block->aq, 4414 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 4415 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 4416 if (err) 4417 return err; 4418 4419 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 4420 rvu_write64(rvu, block->addr, 4421 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 4422 return 0; 4423 } 4424 4425 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) 4426 { 4427 struct rvu_hwinfo *hw = rvu->hw; 4428 u64 hw_const; 4429 4430 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 4431 4432 /* On OcteonTx2 DWRR quantum is directly configured into each of 4433 * the transmit scheduler queues. And PF/VF drivers were free to 4434 * config any value upto 2^24. 4435 * On CN10K, HW is modified, the quantum configuration at scheduler 4436 * queues is in terms of weight. And SW needs to setup a base DWRR MTU 4437 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do 4438 * 'DWRR MTU * weight' to get the quantum. 4439 * 4440 * Check if HW uses a common MTU for all DWRR quantum configs. 4441 * On OcteonTx2 this register field is '0'. 4442 */ 4443 if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61))) 4444 hw->cap.nix_common_dwrr_mtu = true; 4445 4446 if (hw_const & BIT_ULL(61)) 4447 hw->cap.nix_multiple_dwrr_mtu = true; 4448 } 4449 4450 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 4451 { 4452 const struct npc_lt_def_cfg *ltdefs; 4453 struct rvu_hwinfo *hw = rvu->hw; 4454 int blkaddr = nix_hw->blkaddr; 4455 struct rvu_block *block; 4456 int err; 4457 u64 cfg; 4458 4459 block = &hw->block[blkaddr]; 4460 4461 if (is_rvu_96xx_B0(rvu)) { 4462 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 4463 * internal state when conditional clocks are turned off. 4464 * Hence enable them. 4465 */ 4466 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4467 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 4468 } 4469 4470 /* Set chan/link to backpressure TL3 instead of TL2 */ 4471 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 4472 4473 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 4474 * This sticky mode is known to cause SQ stalls when multiple 4475 * SQs are mapped to same SMQ and transmitting pkts at a time. 4476 */ 4477 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 4478 cfg &= ~BIT_ULL(15); 4479 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 4480 4481 ltdefs = rvu->kpu.lt_def; 4482 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 4483 err = nix_calibrate_x2p(rvu, blkaddr); 4484 if (err) 4485 return err; 4486 4487 /* Setup capabilities of the NIX block */ 4488 rvu_nix_setup_capabilities(rvu, blkaddr); 4489 4490 /* Initialize admin queue */ 4491 err = nix_aq_init(rvu, block); 4492 if (err) 4493 return err; 4494 4495 /* Restore CINT timer delay to HW reset values */ 4496 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 4497 4498 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG); 4499 4500 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ 4501 cfg |= 1ULL; 4502 if (!is_rvu_otx2(rvu)) 4503 cfg |= NIX_PTP_1STEP_EN; 4504 4505 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg); 4506 4507 if (!is_rvu_otx2(rvu)) 4508 rvu_nix_block_cn10k_init(rvu, nix_hw); 4509 4510 if (is_block_implemented(hw, blkaddr)) { 4511 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 4512 if (err) 4513 return err; 4514 4515 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); 4516 if (err) 4517 return err; 4518 4519 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 4520 if (err) 4521 return err; 4522 4523 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 4524 if (err) 4525 return err; 4526 4527 err = nix_setup_txvlan(rvu, nix_hw); 4528 if (err) 4529 return err; 4530 4531 /* Configure segmentation offload formats */ 4532 nix_setup_lso(rvu, nix_hw, blkaddr); 4533 4534 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 4535 * This helps HW protocol checker to identify headers 4536 * and validate length and checksums. 4537 */ 4538 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 4539 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 4540 ltdefs->rx_ol2.ltype_mask); 4541 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 4542 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 4543 ltdefs->rx_oip4.ltype_mask); 4544 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 4545 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 4546 ltdefs->rx_iip4.ltype_mask); 4547 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 4548 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 4549 ltdefs->rx_oip6.ltype_mask); 4550 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 4551 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 4552 ltdefs->rx_iip6.ltype_mask); 4553 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 4554 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 4555 ltdefs->rx_otcp.ltype_mask); 4556 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 4557 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 4558 ltdefs->rx_itcp.ltype_mask); 4559 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 4560 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 4561 ltdefs->rx_oudp.ltype_mask); 4562 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 4563 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 4564 ltdefs->rx_iudp.ltype_mask); 4565 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 4566 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 4567 ltdefs->rx_osctp.ltype_mask); 4568 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 4569 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 4570 ltdefs->rx_isctp.ltype_mask); 4571 4572 if (!is_rvu_otx2(rvu)) { 4573 /* Enable APAD calculation for other protocols 4574 * matching APAD0 and APAD1 lt def registers. 4575 */ 4576 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, 4577 (ltdefs->rx_apad0.valid << 11) | 4578 (ltdefs->rx_apad0.lid << 8) | 4579 (ltdefs->rx_apad0.ltype_match << 4) | 4580 ltdefs->rx_apad0.ltype_mask); 4581 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, 4582 (ltdefs->rx_apad1.valid << 11) | 4583 (ltdefs->rx_apad1.lid << 8) | 4584 (ltdefs->rx_apad1.ltype_match << 4) | 4585 ltdefs->rx_apad1.ltype_mask); 4586 4587 /* Receive ethertype defination register defines layer 4588 * information in NPC_RESULT_S to identify the Ethertype 4589 * location in L2 header. Used for Ethertype overwriting 4590 * in inline IPsec flow. 4591 */ 4592 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), 4593 (ltdefs->rx_et[0].offset << 12) | 4594 (ltdefs->rx_et[0].valid << 11) | 4595 (ltdefs->rx_et[0].lid << 8) | 4596 (ltdefs->rx_et[0].ltype_match << 4) | 4597 ltdefs->rx_et[0].ltype_mask); 4598 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), 4599 (ltdefs->rx_et[1].offset << 12) | 4600 (ltdefs->rx_et[1].valid << 11) | 4601 (ltdefs->rx_et[1].lid << 8) | 4602 (ltdefs->rx_et[1].ltype_match << 4) | 4603 ltdefs->rx_et[1].ltype_mask); 4604 } 4605 4606 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 4607 if (err) 4608 return err; 4609 4610 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, 4611 sizeof(u64), GFP_KERNEL); 4612 if (!nix_hw->tx_credits) 4613 return -ENOMEM; 4614 4615 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 4616 nix_link_config(rvu, blkaddr, nix_hw); 4617 4618 /* Enable Channel backpressure */ 4619 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 4620 } 4621 return 0; 4622 } 4623 4624 int rvu_nix_init(struct rvu *rvu) 4625 { 4626 struct rvu_hwinfo *hw = rvu->hw; 4627 struct nix_hw *nix_hw; 4628 int blkaddr = 0, err; 4629 int i = 0; 4630 4631 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 4632 GFP_KERNEL); 4633 if (!hw->nix) 4634 return -ENOMEM; 4635 4636 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4637 while (blkaddr) { 4638 nix_hw = &hw->nix[i]; 4639 nix_hw->rvu = rvu; 4640 nix_hw->blkaddr = blkaddr; 4641 err = rvu_nix_block_init(rvu, nix_hw); 4642 if (err) 4643 return err; 4644 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4645 i++; 4646 } 4647 4648 return 0; 4649 } 4650 4651 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 4652 struct rvu_block *block) 4653 { 4654 struct nix_txsch *txsch; 4655 struct nix_mcast *mcast; 4656 struct nix_txvlan *vlan; 4657 struct nix_hw *nix_hw; 4658 int lvl; 4659 4660 rvu_aq_free(rvu, block->aq); 4661 4662 if (is_block_implemented(rvu->hw, blkaddr)) { 4663 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4664 if (!nix_hw) 4665 return; 4666 4667 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 4668 txsch = &nix_hw->txsch[lvl]; 4669 kfree(txsch->schq.bmap); 4670 } 4671 4672 kfree(nix_hw->tx_credits); 4673 4674 nix_ipolicer_freemem(rvu, nix_hw); 4675 4676 vlan = &nix_hw->txvlan; 4677 kfree(vlan->rsrc.bmap); 4678 mutex_destroy(&vlan->rsrc_lock); 4679 4680 mcast = &nix_hw->mcast; 4681 qmem_free(rvu->dev, mcast->mce_ctx); 4682 qmem_free(rvu->dev, mcast->mcast_buf); 4683 mutex_destroy(&mcast->mce_lock); 4684 } 4685 } 4686 4687 void rvu_nix_freemem(struct rvu *rvu) 4688 { 4689 struct rvu_hwinfo *hw = rvu->hw; 4690 struct rvu_block *block; 4691 int blkaddr = 0; 4692 4693 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4694 while (blkaddr) { 4695 block = &hw->block[blkaddr]; 4696 rvu_nix_block_freemem(rvu, blkaddr, block); 4697 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4698 } 4699 } 4700 4701 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 4702 struct msg_rsp *rsp) 4703 { 4704 u16 pcifunc = req->hdr.pcifunc; 4705 struct rvu_pfvf *pfvf; 4706 int nixlf, err; 4707 4708 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4709 if (err) 4710 return err; 4711 4712 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 4713 4714 npc_mcam_enable_flows(rvu, pcifunc); 4715 4716 pfvf = rvu_get_pfvf(rvu, pcifunc); 4717 set_bit(NIXLF_INITIALIZED, &pfvf->flags); 4718 4719 rvu_switch_update_rules(rvu, pcifunc); 4720 4721 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 4722 } 4723 4724 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 4725 struct msg_rsp *rsp) 4726 { 4727 u16 pcifunc = req->hdr.pcifunc; 4728 struct rvu_pfvf *pfvf; 4729 int nixlf, err; 4730 4731 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4732 if (err) 4733 return err; 4734 4735 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 4736 4737 pfvf = rvu_get_pfvf(rvu, pcifunc); 4738 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 4739 4740 err = rvu_cgx_start_stop_io(rvu, pcifunc, false); 4741 if (err) 4742 return err; 4743 4744 rvu_cgx_tx_enable(rvu, pcifunc, true); 4745 4746 return 0; 4747 } 4748 4749 #define RX_SA_BASE GENMASK_ULL(52, 7) 4750 4751 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 4752 { 4753 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 4754 struct hwctx_disable_req ctx_req; 4755 int pf = rvu_get_pf(pcifunc); 4756 struct mac_ops *mac_ops; 4757 u8 cgx_id, lmac_id; 4758 u64 sa_base; 4759 void *cgxd; 4760 int err; 4761 4762 ctx_req.hdr.pcifunc = pcifunc; 4763 4764 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 4765 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 4766 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 4767 nix_interface_deinit(rvu, pcifunc, nixlf); 4768 nix_rx_sync(rvu, blkaddr); 4769 nix_txschq_free(rvu, pcifunc); 4770 4771 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 4772 4773 rvu_cgx_start_stop_io(rvu, pcifunc, false); 4774 4775 if (pfvf->sq_ctx) { 4776 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 4777 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4778 if (err) 4779 dev_err(rvu->dev, "SQ ctx disable failed\n"); 4780 } 4781 4782 if (pfvf->rq_ctx) { 4783 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 4784 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4785 if (err) 4786 dev_err(rvu->dev, "RQ ctx disable failed\n"); 4787 } 4788 4789 if (pfvf->cq_ctx) { 4790 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 4791 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4792 if (err) 4793 dev_err(rvu->dev, "CQ ctx disable failed\n"); 4794 } 4795 4796 /* reset HW config done for Switch headers */ 4797 rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, 4798 (PKIND_TX | PKIND_RX), 0, 0, 0, 0); 4799 4800 /* Disabling CGX and NPC config done for PTP */ 4801 if (pfvf->hw_rx_tstamp_en) { 4802 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 4803 cgxd = rvu_cgx_pdata(cgx_id, rvu); 4804 mac_ops = get_mac_ops(cgxd); 4805 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); 4806 /* Undo NPC config done for PTP */ 4807 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) 4808 dev_err(rvu->dev, "NPC config for PTP failed\n"); 4809 pfvf->hw_rx_tstamp_en = false; 4810 } 4811 4812 /* reset priority flow control config */ 4813 rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0); 4814 4815 /* reset 802.3x flow control config */ 4816 rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0); 4817 4818 nix_ctx_free(rvu, pfvf); 4819 4820 nix_free_all_bandprof(rvu, pcifunc); 4821 4822 sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf)); 4823 if (FIELD_GET(RX_SA_BASE, sa_base)) { 4824 err = rvu_cpt_ctx_flush(rvu, pcifunc); 4825 if (err) 4826 dev_err(rvu->dev, 4827 "CPT ctx flush failed with error: %d\n", err); 4828 } 4829 } 4830 4831 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 4832 4833 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 4834 { 4835 struct rvu_hwinfo *hw = rvu->hw; 4836 struct rvu_block *block; 4837 int blkaddr, pf; 4838 int nixlf; 4839 u64 cfg; 4840 4841 pf = rvu_get_pf(pcifunc); 4842 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 4843 return 0; 4844 4845 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4846 if (blkaddr < 0) 4847 return NIX_AF_ERR_AF_LF_INVALID; 4848 4849 block = &hw->block[blkaddr]; 4850 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 4851 if (nixlf < 0) 4852 return NIX_AF_ERR_AF_LF_INVALID; 4853 4854 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 4855 4856 if (enable) 4857 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 4858 else 4859 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 4860 4861 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 4862 4863 return 0; 4864 } 4865 4866 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 4867 struct msg_rsp *rsp) 4868 { 4869 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 4870 } 4871 4872 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 4873 struct msg_rsp *rsp) 4874 { 4875 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 4876 } 4877 4878 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 4879 struct nix_lso_format_cfg *req, 4880 struct nix_lso_format_cfg_rsp *rsp) 4881 { 4882 u16 pcifunc = req->hdr.pcifunc; 4883 struct nix_hw *nix_hw; 4884 struct rvu_pfvf *pfvf; 4885 int blkaddr, idx, f; 4886 u64 reg; 4887 4888 pfvf = rvu_get_pfvf(rvu, pcifunc); 4889 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4890 if (!pfvf->nixlf || blkaddr < 0) 4891 return NIX_AF_ERR_AF_LF_INVALID; 4892 4893 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4894 if (!nix_hw) 4895 return NIX_AF_ERR_INVALID_NIXBLK; 4896 4897 /* Find existing matching LSO format, if any */ 4898 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 4899 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 4900 reg = rvu_read64(rvu, blkaddr, 4901 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 4902 if (req->fields[f] != (reg & req->field_mask)) 4903 break; 4904 } 4905 4906 if (f == NIX_LSO_FIELD_MAX) 4907 break; 4908 } 4909 4910 if (idx < nix_hw->lso.in_use) { 4911 /* Match found */ 4912 rsp->lso_format_idx = idx; 4913 return 0; 4914 } 4915 4916 if (nix_hw->lso.in_use == nix_hw->lso.total) 4917 return NIX_AF_ERR_LSO_CFG_FAIL; 4918 4919 rsp->lso_format_idx = nix_hw->lso.in_use++; 4920 4921 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 4922 rvu_write64(rvu, blkaddr, 4923 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 4924 req->fields[f]); 4925 4926 return 0; 4927 } 4928 4929 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) 4930 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) 4931 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) 4932 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) 4933 4934 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) 4935 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) 4936 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) 4937 4938 #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32) 4939 #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22) 4940 #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0) 4941 4942 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, 4943 int blkaddr) 4944 { 4945 u8 cpt_idx, cpt_blkaddr; 4946 u64 val; 4947 4948 cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; 4949 if (req->enable) { 4950 val = 0; 4951 /* Enable context prefetching */ 4952 if (!is_rvu_otx2(rvu)) 4953 val |= BIT_ULL(51); 4954 4955 /* Set OPCODE and EGRP */ 4956 val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); 4957 val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); 4958 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); 4959 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); 4960 4961 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); 4962 4963 /* Set CPT queue for inline IPSec */ 4964 val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); 4965 val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, 4966 req->inst_qsel.cpt_pf_func); 4967 4968 if (!is_rvu_otx2(rvu)) { 4969 cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : 4970 BLKADDR_CPT1; 4971 val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); 4972 } 4973 4974 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 4975 val); 4976 4977 /* Set CPT credit */ 4978 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 4979 if ((val & 0x3FFFFF) != 0x3FFFFF) 4980 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 4981 0x3FFFFF - val); 4982 4983 val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit); 4984 val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid); 4985 val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th); 4986 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val); 4987 } else { 4988 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); 4989 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 4990 0x0); 4991 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 4992 if ((val & 0x3FFFFF) != 0x3FFFFF) 4993 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 4994 0x3FFFFF - val); 4995 } 4996 } 4997 4998 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, 4999 struct nix_inline_ipsec_cfg *req, 5000 struct msg_rsp *rsp) 5001 { 5002 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5003 return 0; 5004 5005 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); 5006 if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) 5007 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); 5008 5009 return 0; 5010 } 5011 5012 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, 5013 struct msg_req *req, 5014 struct nix_inline_ipsec_cfg *rsp) 5015 5016 { 5017 u64 val; 5018 5019 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5020 return 0; 5021 5022 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG); 5023 rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val); 5024 rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val); 5025 rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val); 5026 rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val); 5027 5028 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0)); 5029 rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val); 5030 rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val); 5031 rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val); 5032 5033 return 0; 5034 } 5035 5036 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, 5037 struct nix_inline_ipsec_lf_cfg *req, 5038 struct msg_rsp *rsp) 5039 { 5040 int lf, blkaddr, err; 5041 u64 val; 5042 5043 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5044 return 0; 5045 5046 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); 5047 if (err) 5048 return err; 5049 5050 if (req->enable) { 5051 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ 5052 val = (u64)req->ipsec_cfg0.tt << 44 | 5053 (u64)req->ipsec_cfg0.tag_const << 20 | 5054 (u64)req->ipsec_cfg0.sa_pow2_size << 16 | 5055 req->ipsec_cfg0.lenm1_max; 5056 5057 if (blkaddr == BLKADDR_NIX1) 5058 val |= BIT_ULL(46); 5059 5060 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); 5061 5062 /* Set SA_IDX_W and SA_IDX_MAX */ 5063 val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | 5064 req->ipsec_cfg1.sa_idx_max; 5065 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); 5066 5067 /* Set SA base address */ 5068 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5069 req->sa_base_addr); 5070 } else { 5071 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); 5072 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); 5073 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5074 0x0); 5075 } 5076 5077 return 0; 5078 } 5079 5080 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 5081 { 5082 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 5083 5084 /* overwrite vf mac address with default_mac */ 5085 if (from_vf) 5086 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 5087 } 5088 5089 /* NIX ingress policers or bandwidth profiles APIs */ 5090 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) 5091 { 5092 struct npc_lt_def_cfg defs, *ltdefs; 5093 5094 ltdefs = &defs; 5095 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); 5096 5097 /* Extract PCP and DEI fields from outer VLAN from byte offset 5098 * 2 from the start of LB_PTR (ie TAG). 5099 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN 5100 * fields are considered when 'Tunnel enable' is set in profile. 5101 */ 5102 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, 5103 (2UL << 12) | (ltdefs->ovlan.lid << 8) | 5104 (ltdefs->ovlan.ltype_match << 4) | 5105 ltdefs->ovlan.ltype_mask); 5106 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, 5107 (2UL << 12) | (ltdefs->ivlan.lid << 8) | 5108 (ltdefs->ivlan.ltype_match << 4) | 5109 ltdefs->ivlan.ltype_mask); 5110 5111 /* DSCP field in outer and tunneled IPv4 packets */ 5112 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, 5113 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | 5114 (ltdefs->rx_oip4.ltype_match << 4) | 5115 ltdefs->rx_oip4.ltype_mask); 5116 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, 5117 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | 5118 (ltdefs->rx_iip4.ltype_match << 4) | 5119 ltdefs->rx_iip4.ltype_mask); 5120 5121 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ 5122 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, 5123 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | 5124 (ltdefs->rx_oip6.ltype_match << 4) | 5125 ltdefs->rx_oip6.ltype_mask); 5126 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, 5127 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | 5128 (ltdefs->rx_iip6.ltype_match << 4) | 5129 ltdefs->rx_iip6.ltype_mask); 5130 } 5131 5132 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, 5133 int layer, int prof_idx) 5134 { 5135 struct nix_cn10k_aq_enq_req aq_req; 5136 int rc; 5137 5138 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5139 5140 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); 5141 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5142 aq_req.op = NIX_AQ_INSTOP_INIT; 5143 5144 /* Context is all zeros, submit to AQ */ 5145 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5146 (struct nix_aq_enq_req *)&aq_req, NULL); 5147 if (rc) 5148 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", 5149 layer, prof_idx); 5150 return rc; 5151 } 5152 5153 static int nix_setup_ipolicers(struct rvu *rvu, 5154 struct nix_hw *nix_hw, int blkaddr) 5155 { 5156 struct rvu_hwinfo *hw = rvu->hw; 5157 struct nix_ipolicer *ipolicer; 5158 int err, layer, prof_idx; 5159 u64 cfg; 5160 5161 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 5162 if (!(cfg & BIT_ULL(61))) { 5163 hw->cap.ipolicer = false; 5164 return 0; 5165 } 5166 5167 hw->cap.ipolicer = true; 5168 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, 5169 sizeof(*ipolicer), GFP_KERNEL); 5170 if (!nix_hw->ipolicer) 5171 return -ENOMEM; 5172 5173 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); 5174 5175 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5176 ipolicer = &nix_hw->ipolicer[layer]; 5177 switch (layer) { 5178 case BAND_PROF_LEAF_LAYER: 5179 ipolicer->band_prof.max = cfg & 0XFFFF; 5180 break; 5181 case BAND_PROF_MID_LAYER: 5182 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; 5183 break; 5184 case BAND_PROF_TOP_LAYER: 5185 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; 5186 break; 5187 } 5188 5189 if (!ipolicer->band_prof.max) 5190 continue; 5191 5192 err = rvu_alloc_bitmap(&ipolicer->band_prof); 5193 if (err) 5194 return err; 5195 5196 ipolicer->pfvf_map = devm_kcalloc(rvu->dev, 5197 ipolicer->band_prof.max, 5198 sizeof(u16), GFP_KERNEL); 5199 if (!ipolicer->pfvf_map) 5200 return -ENOMEM; 5201 5202 ipolicer->match_id = devm_kcalloc(rvu->dev, 5203 ipolicer->band_prof.max, 5204 sizeof(u16), GFP_KERNEL); 5205 if (!ipolicer->match_id) 5206 return -ENOMEM; 5207 5208 for (prof_idx = 0; 5209 prof_idx < ipolicer->band_prof.max; prof_idx++) { 5210 /* Set AF as current owner for INIT ops to succeed */ 5211 ipolicer->pfvf_map[prof_idx] = 0x00; 5212 5213 /* There is no enable bit in the profile context, 5214 * so no context disable. So let's INIT them here 5215 * so that PF/VF later on have to just do WRITE to 5216 * setup policer rates and config. 5217 */ 5218 err = nix_init_policer_context(rvu, nix_hw, 5219 layer, prof_idx); 5220 if (err) 5221 return err; 5222 } 5223 5224 /* Allocate memory for maintaining ref_counts for MID level 5225 * profiles, this will be needed for leaf layer profiles' 5226 * aggregation. 5227 */ 5228 if (layer != BAND_PROF_MID_LAYER) 5229 continue; 5230 5231 ipolicer->ref_count = devm_kcalloc(rvu->dev, 5232 ipolicer->band_prof.max, 5233 sizeof(u16), GFP_KERNEL); 5234 if (!ipolicer->ref_count) 5235 return -ENOMEM; 5236 } 5237 5238 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ 5239 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); 5240 5241 nix_config_rx_pkt_policer_precolor(rvu, blkaddr); 5242 5243 return 0; 5244 } 5245 5246 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) 5247 { 5248 struct nix_ipolicer *ipolicer; 5249 int layer; 5250 5251 if (!rvu->hw->cap.ipolicer) 5252 return; 5253 5254 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5255 ipolicer = &nix_hw->ipolicer[layer]; 5256 5257 if (!ipolicer->band_prof.max) 5258 continue; 5259 5260 kfree(ipolicer->band_prof.bmap); 5261 } 5262 } 5263 5264 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 5265 struct nix_hw *nix_hw, u16 pcifunc) 5266 { 5267 struct nix_ipolicer *ipolicer; 5268 int layer, hi_layer, prof_idx; 5269 5270 /* Bits [15:14] in profile index represent layer */ 5271 layer = (req->qidx >> 14) & 0x03; 5272 prof_idx = req->qidx & 0x3FFF; 5273 5274 ipolicer = &nix_hw->ipolicer[layer]; 5275 if (prof_idx >= ipolicer->band_prof.max) 5276 return -EINVAL; 5277 5278 /* Check if the profile is allocated to the requesting PCIFUNC or not 5279 * with the exception of AF. AF is allowed to read and update contexts. 5280 */ 5281 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) 5282 return -EINVAL; 5283 5284 /* If this profile is linked to higher layer profile then check 5285 * if that profile is also allocated to the requesting PCIFUNC 5286 * or not. 5287 */ 5288 if (!req->prof.hl_en) 5289 return 0; 5290 5291 /* Leaf layer profile can link only to mid layer and 5292 * mid layer to top layer. 5293 */ 5294 if (layer == BAND_PROF_LEAF_LAYER) 5295 hi_layer = BAND_PROF_MID_LAYER; 5296 else if (layer == BAND_PROF_MID_LAYER) 5297 hi_layer = BAND_PROF_TOP_LAYER; 5298 else 5299 return -EINVAL; 5300 5301 ipolicer = &nix_hw->ipolicer[hi_layer]; 5302 prof_idx = req->prof.band_prof_id; 5303 if (prof_idx >= ipolicer->band_prof.max || 5304 ipolicer->pfvf_map[prof_idx] != pcifunc) 5305 return -EINVAL; 5306 5307 return 0; 5308 } 5309 5310 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, 5311 struct nix_bandprof_alloc_req *req, 5312 struct nix_bandprof_alloc_rsp *rsp) 5313 { 5314 int blkaddr, layer, prof, idx, err; 5315 u16 pcifunc = req->hdr.pcifunc; 5316 struct nix_ipolicer *ipolicer; 5317 struct nix_hw *nix_hw; 5318 5319 if (!rvu->hw->cap.ipolicer) 5320 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5321 5322 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5323 if (err) 5324 return err; 5325 5326 mutex_lock(&rvu->rsrc_lock); 5327 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5328 if (layer == BAND_PROF_INVAL_LAYER) 5329 continue; 5330 if (!req->prof_count[layer]) 5331 continue; 5332 5333 ipolicer = &nix_hw->ipolicer[layer]; 5334 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5335 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ 5336 if (idx == MAX_BANDPROF_PER_PFFUNC) 5337 break; 5338 5339 prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5340 if (prof < 0) 5341 break; 5342 rsp->prof_count[layer]++; 5343 rsp->prof_idx[layer][idx] = prof; 5344 ipolicer->pfvf_map[prof] = pcifunc; 5345 } 5346 } 5347 mutex_unlock(&rvu->rsrc_lock); 5348 return 0; 5349 } 5350 5351 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) 5352 { 5353 int blkaddr, layer, prof_idx, err; 5354 struct nix_ipolicer *ipolicer; 5355 struct nix_hw *nix_hw; 5356 5357 if (!rvu->hw->cap.ipolicer) 5358 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5359 5360 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5361 if (err) 5362 return err; 5363 5364 mutex_lock(&rvu->rsrc_lock); 5365 /* Free all the profiles allocated to the PCIFUNC */ 5366 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5367 if (layer == BAND_PROF_INVAL_LAYER) 5368 continue; 5369 ipolicer = &nix_hw->ipolicer[layer]; 5370 5371 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { 5372 if (ipolicer->pfvf_map[prof_idx] != pcifunc) 5373 continue; 5374 5375 /* Clear ratelimit aggregation, if any */ 5376 if (layer == BAND_PROF_LEAF_LAYER && 5377 ipolicer->match_id[prof_idx]) 5378 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5379 5380 ipolicer->pfvf_map[prof_idx] = 0x00; 5381 ipolicer->match_id[prof_idx] = 0; 5382 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5383 } 5384 } 5385 mutex_unlock(&rvu->rsrc_lock); 5386 return 0; 5387 } 5388 5389 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, 5390 struct nix_bandprof_free_req *req, 5391 struct msg_rsp *rsp) 5392 { 5393 int blkaddr, layer, prof_idx, idx, err; 5394 u16 pcifunc = req->hdr.pcifunc; 5395 struct nix_ipolicer *ipolicer; 5396 struct nix_hw *nix_hw; 5397 5398 if (req->free_all) 5399 return nix_free_all_bandprof(rvu, pcifunc); 5400 5401 if (!rvu->hw->cap.ipolicer) 5402 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5403 5404 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5405 if (err) 5406 return err; 5407 5408 mutex_lock(&rvu->rsrc_lock); 5409 /* Free the requested profile indices */ 5410 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5411 if (layer == BAND_PROF_INVAL_LAYER) 5412 continue; 5413 if (!req->prof_count[layer]) 5414 continue; 5415 5416 ipolicer = &nix_hw->ipolicer[layer]; 5417 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5418 if (idx == MAX_BANDPROF_PER_PFFUNC) 5419 break; 5420 prof_idx = req->prof_idx[layer][idx]; 5421 if (prof_idx >= ipolicer->band_prof.max || 5422 ipolicer->pfvf_map[prof_idx] != pcifunc) 5423 continue; 5424 5425 /* Clear ratelimit aggregation, if any */ 5426 if (layer == BAND_PROF_LEAF_LAYER && 5427 ipolicer->match_id[prof_idx]) 5428 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5429 5430 ipolicer->pfvf_map[prof_idx] = 0x00; 5431 ipolicer->match_id[prof_idx] = 0; 5432 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5433 } 5434 } 5435 mutex_unlock(&rvu->rsrc_lock); 5436 return 0; 5437 } 5438 5439 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, 5440 struct nix_cn10k_aq_enq_req *aq_req, 5441 struct nix_cn10k_aq_enq_rsp *aq_rsp, 5442 u16 pcifunc, u8 ctype, u32 qidx) 5443 { 5444 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5445 aq_req->hdr.pcifunc = pcifunc; 5446 aq_req->ctype = ctype; 5447 aq_req->op = NIX_AQ_INSTOP_READ; 5448 aq_req->qidx = qidx; 5449 5450 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5451 (struct nix_aq_enq_req *)aq_req, 5452 (struct nix_aq_enq_rsp *)aq_rsp); 5453 } 5454 5455 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, 5456 struct nix_hw *nix_hw, 5457 struct nix_cn10k_aq_enq_req *aq_req, 5458 struct nix_cn10k_aq_enq_rsp *aq_rsp, 5459 u32 leaf_prof, u16 mid_prof) 5460 { 5461 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5462 aq_req->hdr.pcifunc = 0x00; 5463 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; 5464 aq_req->op = NIX_AQ_INSTOP_WRITE; 5465 aq_req->qidx = leaf_prof; 5466 5467 aq_req->prof.band_prof_id = mid_prof; 5468 aq_req->prof_mask.band_prof_id = GENMASK(6, 0); 5469 aq_req->prof.hl_en = 1; 5470 aq_req->prof_mask.hl_en = 1; 5471 5472 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5473 (struct nix_aq_enq_req *)aq_req, 5474 (struct nix_aq_enq_rsp *)aq_rsp); 5475 } 5476 5477 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, 5478 u16 rq_idx, u16 match_id) 5479 { 5480 int leaf_prof, mid_prof, leaf_match; 5481 struct nix_cn10k_aq_enq_req aq_req; 5482 struct nix_cn10k_aq_enq_rsp aq_rsp; 5483 struct nix_ipolicer *ipolicer; 5484 struct nix_hw *nix_hw; 5485 int blkaddr, idx, rc; 5486 5487 if (!rvu->hw->cap.ipolicer) 5488 return 0; 5489 5490 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5491 if (rc) 5492 return rc; 5493 5494 /* Fetch the RQ's context to see if policing is enabled */ 5495 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, 5496 NIX_AQ_CTYPE_RQ, rq_idx); 5497 if (rc) { 5498 dev_err(rvu->dev, 5499 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", 5500 __func__, rq_idx, pcifunc); 5501 return rc; 5502 } 5503 5504 if (!aq_rsp.rq.policer_ena) 5505 return 0; 5506 5507 /* Get the bandwidth profile ID mapped to this RQ */ 5508 leaf_prof = aq_rsp.rq.band_prof_id; 5509 5510 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; 5511 ipolicer->match_id[leaf_prof] = match_id; 5512 5513 /* Check if any other leaf profile is marked with same match_id */ 5514 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 5515 if (idx == leaf_prof) 5516 continue; 5517 if (ipolicer->match_id[idx] != match_id) 5518 continue; 5519 5520 leaf_match = idx; 5521 break; 5522 } 5523 5524 if (idx == ipolicer->band_prof.max) 5525 return 0; 5526 5527 /* Fetch the matching profile's context to check if it's already 5528 * mapped to a mid level profile. 5529 */ 5530 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5531 NIX_AQ_CTYPE_BANDPROF, leaf_match); 5532 if (rc) { 5533 dev_err(rvu->dev, 5534 "%s: Failed to fetch context of leaf profile %d\n", 5535 __func__, leaf_match); 5536 return rc; 5537 } 5538 5539 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5540 if (aq_rsp.prof.hl_en) { 5541 /* Get Mid layer prof index and map leaf_prof index 5542 * also such that flows that are being steered 5543 * to different RQs and marked with same match_id 5544 * are rate limited in a aggregate fashion 5545 */ 5546 mid_prof = aq_rsp.prof.band_prof_id; 5547 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5548 &aq_req, &aq_rsp, 5549 leaf_prof, mid_prof); 5550 if (rc) { 5551 dev_err(rvu->dev, 5552 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5553 __func__, leaf_prof, mid_prof); 5554 goto exit; 5555 } 5556 5557 mutex_lock(&rvu->rsrc_lock); 5558 ipolicer->ref_count[mid_prof]++; 5559 mutex_unlock(&rvu->rsrc_lock); 5560 goto exit; 5561 } 5562 5563 /* Allocate a mid layer profile and 5564 * map both 'leaf_prof' and 'leaf_match' profiles to it. 5565 */ 5566 mutex_lock(&rvu->rsrc_lock); 5567 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5568 if (mid_prof < 0) { 5569 dev_err(rvu->dev, 5570 "%s: Unable to allocate mid layer profile\n", __func__); 5571 mutex_unlock(&rvu->rsrc_lock); 5572 goto exit; 5573 } 5574 mutex_unlock(&rvu->rsrc_lock); 5575 ipolicer->pfvf_map[mid_prof] = 0x00; 5576 ipolicer->ref_count[mid_prof] = 0; 5577 5578 /* Initialize mid layer profile same as 'leaf_prof' */ 5579 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5580 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 5581 if (rc) { 5582 dev_err(rvu->dev, 5583 "%s: Failed to fetch context of leaf profile %d\n", 5584 __func__, leaf_prof); 5585 goto exit; 5586 } 5587 5588 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5589 aq_req.hdr.pcifunc = 0x00; 5590 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); 5591 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5592 aq_req.op = NIX_AQ_INSTOP_WRITE; 5593 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); 5594 memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s)); 5595 /* Clear higher layer enable bit in the mid profile, just in case */ 5596 aq_req.prof.hl_en = 0; 5597 aq_req.prof_mask.hl_en = 1; 5598 5599 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5600 (struct nix_aq_enq_req *)&aq_req, NULL); 5601 if (rc) { 5602 dev_err(rvu->dev, 5603 "%s: Failed to INIT context of mid layer profile %d\n", 5604 __func__, mid_prof); 5605 goto exit; 5606 } 5607 5608 /* Map both leaf profiles to this mid layer profile */ 5609 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5610 &aq_req, &aq_rsp, 5611 leaf_prof, mid_prof); 5612 if (rc) { 5613 dev_err(rvu->dev, 5614 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5615 __func__, leaf_prof, mid_prof); 5616 goto exit; 5617 } 5618 5619 mutex_lock(&rvu->rsrc_lock); 5620 ipolicer->ref_count[mid_prof]++; 5621 mutex_unlock(&rvu->rsrc_lock); 5622 5623 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5624 &aq_req, &aq_rsp, 5625 leaf_match, mid_prof); 5626 if (rc) { 5627 dev_err(rvu->dev, 5628 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5629 __func__, leaf_match, mid_prof); 5630 ipolicer->ref_count[mid_prof]--; 5631 goto exit; 5632 } 5633 5634 mutex_lock(&rvu->rsrc_lock); 5635 ipolicer->ref_count[mid_prof]++; 5636 mutex_unlock(&rvu->rsrc_lock); 5637 5638 exit: 5639 return rc; 5640 } 5641 5642 /* Called with mutex rsrc_lock */ 5643 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 5644 u32 leaf_prof) 5645 { 5646 struct nix_cn10k_aq_enq_req aq_req; 5647 struct nix_cn10k_aq_enq_rsp aq_rsp; 5648 struct nix_ipolicer *ipolicer; 5649 u16 mid_prof; 5650 int rc; 5651 5652 mutex_unlock(&rvu->rsrc_lock); 5653 5654 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5655 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 5656 5657 mutex_lock(&rvu->rsrc_lock); 5658 if (rc) { 5659 dev_err(rvu->dev, 5660 "%s: Failed to fetch context of leaf profile %d\n", 5661 __func__, leaf_prof); 5662 return; 5663 } 5664 5665 if (!aq_rsp.prof.hl_en) 5666 return; 5667 5668 mid_prof = aq_rsp.prof.band_prof_id; 5669 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5670 ipolicer->ref_count[mid_prof]--; 5671 /* If ref_count is zero, free mid layer profile */ 5672 if (!ipolicer->ref_count[mid_prof]) { 5673 ipolicer->pfvf_map[mid_prof] = 0x00; 5674 rvu_free_rsrc(&ipolicer->band_prof, mid_prof); 5675 } 5676 } 5677 5678 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, 5679 struct nix_bandprof_get_hwinfo_rsp *rsp) 5680 { 5681 struct nix_ipolicer *ipolicer; 5682 int blkaddr, layer, err; 5683 struct nix_hw *nix_hw; 5684 u64 tu; 5685 5686 if (!rvu->hw->cap.ipolicer) 5687 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5688 5689 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 5690 if (err) 5691 return err; 5692 5693 /* Return number of bandwidth profiles free at each layer */ 5694 mutex_lock(&rvu->rsrc_lock); 5695 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5696 if (layer == BAND_PROF_INVAL_LAYER) 5697 continue; 5698 5699 ipolicer = &nix_hw->ipolicer[layer]; 5700 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); 5701 } 5702 mutex_unlock(&rvu->rsrc_lock); 5703 5704 /* Set the policer timeunit in nanosec */ 5705 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); 5706 rsp->policer_timeunit = (tu + 1) * 100; 5707 5708 return 0; 5709 } 5710