1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 11 #include "rvu_struct.h" 12 #include "rvu_reg.h" 13 #include "rvu.h" 14 #include "npc.h" 15 #include "cgx.h" 16 #include "lmac_common.h" 17 18 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 19 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 20 int type, int chan_id); 21 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 22 int type, bool add); 23 static int nix_setup_ipolicers(struct rvu *rvu, 24 struct nix_hw *nix_hw, int blkaddr); 25 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); 26 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 27 struct nix_hw *nix_hw, u16 pcifunc); 28 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); 29 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 30 u32 leaf_prof); 31 32 enum mc_tbl_sz { 33 MC_TBL_SZ_256, 34 MC_TBL_SZ_512, 35 MC_TBL_SZ_1K, 36 MC_TBL_SZ_2K, 37 MC_TBL_SZ_4K, 38 MC_TBL_SZ_8K, 39 MC_TBL_SZ_16K, 40 MC_TBL_SZ_32K, 41 MC_TBL_SZ_64K, 42 }; 43 44 enum mc_buf_cnt { 45 MC_BUF_CNT_8, 46 MC_BUF_CNT_16, 47 MC_BUF_CNT_32, 48 MC_BUF_CNT_64, 49 MC_BUF_CNT_128, 50 MC_BUF_CNT_256, 51 MC_BUF_CNT_512, 52 MC_BUF_CNT_1024, 53 MC_BUF_CNT_2048, 54 }; 55 56 enum nix_makr_fmt_indexes { 57 NIX_MARK_CFG_IP_DSCP_RED, 58 NIX_MARK_CFG_IP_DSCP_YELLOW, 59 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 60 NIX_MARK_CFG_IP_ECN_RED, 61 NIX_MARK_CFG_IP_ECN_YELLOW, 62 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 63 NIX_MARK_CFG_VLAN_DEI_RED, 64 NIX_MARK_CFG_VLAN_DEI_YELLOW, 65 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 66 NIX_MARK_CFG_MAX, 67 }; 68 69 /* For now considering MC resources needed for broadcast 70 * pkt replication only. i.e 256 HWVFs + 12 PFs. 71 */ 72 #define MC_TBL_SIZE MC_TBL_SZ_512 73 #define MC_BUF_CNT MC_BUF_CNT_128 74 75 struct mce { 76 struct hlist_node node; 77 u16 pcifunc; 78 }; 79 80 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 81 { 82 int i = 0; 83 84 /*If blkaddr is 0, return the first nix block address*/ 85 if (blkaddr == 0) 86 return rvu->nix_blkaddr[blkaddr]; 87 88 while (i + 1 < MAX_NIX_BLKS) { 89 if (rvu->nix_blkaddr[i] == blkaddr) 90 return rvu->nix_blkaddr[i + 1]; 91 i++; 92 } 93 94 return 0; 95 } 96 97 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 98 { 99 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 100 int blkaddr; 101 102 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 103 if (!pfvf->nixlf || blkaddr < 0) 104 return false; 105 return true; 106 } 107 108 int rvu_get_nixlf_count(struct rvu *rvu) 109 { 110 int blkaddr = 0, max = 0; 111 struct rvu_block *block; 112 113 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 114 while (blkaddr) { 115 block = &rvu->hw->block[blkaddr]; 116 max += block->lf.max; 117 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 118 } 119 return max; 120 } 121 122 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 123 { 124 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 125 struct rvu_hwinfo *hw = rvu->hw; 126 int blkaddr; 127 128 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 129 if (!pfvf->nixlf || blkaddr < 0) 130 return NIX_AF_ERR_AF_LF_INVALID; 131 132 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 133 if (*nixlf < 0) 134 return NIX_AF_ERR_AF_LF_INVALID; 135 136 if (nix_blkaddr) 137 *nix_blkaddr = blkaddr; 138 139 return 0; 140 } 141 142 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, 143 struct nix_hw **nix_hw, int *blkaddr) 144 { 145 struct rvu_pfvf *pfvf; 146 147 pfvf = rvu_get_pfvf(rvu, pcifunc); 148 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 149 if (!pfvf->nixlf || *blkaddr < 0) 150 return NIX_AF_ERR_AF_LF_INVALID; 151 152 *nix_hw = get_nix_hw(rvu->hw, *blkaddr); 153 if (!*nix_hw) 154 return NIX_AF_ERR_INVALID_NIXBLK; 155 return 0; 156 } 157 158 static void nix_mce_list_init(struct nix_mce_list *list, int max) 159 { 160 INIT_HLIST_HEAD(&list->head); 161 list->count = 0; 162 list->max = max; 163 } 164 165 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) 166 { 167 int idx; 168 169 if (!mcast) 170 return 0; 171 172 idx = mcast->next_free_mce; 173 mcast->next_free_mce += count; 174 return idx; 175 } 176 177 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 178 { 179 int nix_blkaddr = 0, i = 0; 180 struct rvu *rvu = hw->rvu; 181 182 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 183 while (nix_blkaddr) { 184 if (blkaddr == nix_blkaddr && hw->nix) 185 return &hw->nix[i]; 186 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 187 i++; 188 } 189 return NULL; 190 } 191 192 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) 193 { 194 dwrr_mtu &= 0x1FULL; 195 196 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 197 * Value of 4 is reserved for MTU value of 9728 bytes. 198 * Value of 5 is reserved for MTU value of 10240 bytes. 199 */ 200 switch (dwrr_mtu) { 201 case 4: 202 return 9728; 203 case 5: 204 return 10240; 205 default: 206 return BIT_ULL(dwrr_mtu); 207 } 208 209 return 0; 210 } 211 212 u32 convert_bytes_to_dwrr_mtu(u32 bytes) 213 { 214 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 215 * Value of 4 is reserved for MTU value of 9728 bytes. 216 * Value of 5 is reserved for MTU value of 10240 bytes. 217 */ 218 if (bytes > BIT_ULL(16)) 219 return 0; 220 221 switch (bytes) { 222 case 9728: 223 return 4; 224 case 10240: 225 return 5; 226 default: 227 return ilog2(bytes); 228 } 229 230 return 0; 231 } 232 233 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 234 { 235 int err; 236 237 /* Sync all in flight RX packets to LLC/DRAM */ 238 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 239 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 240 if (err) 241 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); 242 243 /* SW_SYNC ensures all existing transactions are finished and pkts 244 * are written to LLC/DRAM, queues should be teared down after 245 * successful SW_SYNC. Due to a HW errata, in some rare scenarios 246 * an existing transaction might end after SW_SYNC operation. To 247 * ensure operation is fully done, do the SW_SYNC twice. 248 */ 249 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 250 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 251 if (err) 252 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); 253 } 254 255 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 256 int lvl, u16 pcifunc, u16 schq) 257 { 258 struct rvu_hwinfo *hw = rvu->hw; 259 struct nix_txsch *txsch; 260 struct nix_hw *nix_hw; 261 u16 map_func; 262 263 nix_hw = get_nix_hw(rvu->hw, blkaddr); 264 if (!nix_hw) 265 return false; 266 267 txsch = &nix_hw->txsch[lvl]; 268 /* Check out of bounds */ 269 if (schq >= txsch->schq.max) 270 return false; 271 272 mutex_lock(&rvu->rsrc_lock); 273 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 274 mutex_unlock(&rvu->rsrc_lock); 275 276 /* TLs aggegating traffic are shared across PF and VFs */ 277 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 278 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 279 return false; 280 else 281 return true; 282 } 283 284 if (map_func != pcifunc) 285 return false; 286 287 return true; 288 } 289 290 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, 291 struct nix_lf_alloc_rsp *rsp, bool loop) 292 { 293 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); 294 u16 req_chan_base, req_chan_end, req_chan_cnt; 295 struct rvu_hwinfo *hw = rvu->hw; 296 struct sdp_node_info *sdp_info; 297 int pkind, pf, vf, lbkid, vfid; 298 struct mac_ops *mac_ops; 299 u8 cgx_id, lmac_id; 300 bool from_vf; 301 int err; 302 303 pf = rvu_get_pf(pcifunc); 304 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 305 type != NIX_INTF_TYPE_SDP) 306 return 0; 307 308 switch (type) { 309 case NIX_INTF_TYPE_CGX: 310 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 311 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 312 313 pkind = rvu_npc_get_pkind(rvu, pf); 314 if (pkind < 0) { 315 dev_err(rvu->dev, 316 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 317 return -EINVAL; 318 } 319 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 320 pfvf->tx_chan_base = pfvf->rx_chan_base; 321 pfvf->rx_chan_cnt = 1; 322 pfvf->tx_chan_cnt = 1; 323 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; 324 325 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 326 rvu_npc_set_pkind(rvu, pkind, pfvf); 327 328 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); 329 330 /* By default we enable pause frames */ 331 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) 332 mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id, 333 rvu), 334 lmac_id, true, true); 335 break; 336 case NIX_INTF_TYPE_LBK: 337 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 338 339 /* If NIX1 block is present on the silicon then NIXes are 340 * assigned alternatively for lbk interfaces. NIX0 should 341 * send packets on lbk link 1 channels and NIX1 should send 342 * on lbk link 0 channels for the communication between 343 * NIX0 and NIX1. 344 */ 345 lbkid = 0; 346 if (rvu->hw->lbk_links > 1) 347 lbkid = vf & 0x1 ? 0 : 1; 348 349 /* By default NIX0 is configured to send packet on lbk link 1 350 * (which corresponds to LBK1), same packet will receive on 351 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 352 * (which corresponds to LBK2) packet will receive on NIX0 lbk 353 * link 1. 354 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 355 * transmits and receives on lbk link 0, whick corresponds 356 * to LBK1 block, back to back connectivity between NIX and 357 * LBK can be achieved (which is similar to 96xx) 358 * 359 * RX TX 360 * NIX0 lbk link 1 (LBK2) 1 (LBK1) 361 * NIX0 lbk link 0 (LBK0) 0 (LBK0) 362 * NIX1 lbk link 0 (LBK1) 0 (LBK2) 363 * NIX1 lbk link 1 (LBK3) 1 (LBK3) 364 */ 365 if (loop) 366 lbkid = !lbkid; 367 368 /* Note that AF's VFs work in pairs and talk over consecutive 369 * loopback channels.Therefore if odd number of AF VFs are 370 * enabled then the last VF remains with no pair. 371 */ 372 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 373 pfvf->tx_chan_base = vf & 0x1 ? 374 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 375 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 376 pfvf->rx_chan_cnt = 1; 377 pfvf->tx_chan_cnt = 1; 378 rsp->tx_link = hw->cgx_links + lbkid; 379 pfvf->lbkid = lbkid; 380 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); 381 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 382 pfvf->rx_chan_base, 383 pfvf->rx_chan_cnt); 384 385 break; 386 case NIX_INTF_TYPE_SDP: 387 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 388 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; 389 sdp_info = parent_pf->sdp_info; 390 if (!sdp_info) { 391 dev_err(rvu->dev, "Invalid sdp_info pointer\n"); 392 return -EINVAL; 393 } 394 if (from_vf) { 395 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + 396 sdp_info->num_pf_rings; 397 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 398 for (vfid = 0; vfid < vf; vfid++) 399 req_chan_base += sdp_info->vf_rings[vfid]; 400 req_chan_cnt = sdp_info->vf_rings[vf]; 401 req_chan_end = req_chan_base + req_chan_cnt - 1; 402 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || 403 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { 404 dev_err(rvu->dev, 405 "PF_Func 0x%x: Invalid channel base and count\n", 406 pcifunc); 407 return -EINVAL; 408 } 409 } else { 410 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; 411 req_chan_cnt = sdp_info->num_pf_rings; 412 } 413 414 pfvf->rx_chan_base = req_chan_base; 415 pfvf->rx_chan_cnt = req_chan_cnt; 416 pfvf->tx_chan_base = pfvf->rx_chan_base; 417 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; 418 419 rsp->tx_link = hw->cgx_links + hw->lbk_links; 420 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 421 pfvf->rx_chan_base, 422 pfvf->rx_chan_cnt); 423 break; 424 } 425 426 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 427 * RVU PF/VF's MAC address. 428 */ 429 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 430 pfvf->rx_chan_base, pfvf->mac_addr); 431 432 /* Add this PF_FUNC to bcast pkt replication list */ 433 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); 434 if (err) { 435 dev_err(rvu->dev, 436 "Bcast list, failed to enable PF_FUNC 0x%x\n", 437 pcifunc); 438 return err; 439 } 440 /* Install MCAM rule matching Ethernet broadcast mac address */ 441 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 442 nixlf, pfvf->rx_chan_base); 443 444 pfvf->maxlen = NIC_HW_MIN_FRS; 445 pfvf->minlen = NIC_HW_MIN_FRS; 446 447 return 0; 448 } 449 450 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 451 { 452 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 453 int err; 454 455 pfvf->maxlen = 0; 456 pfvf->minlen = 0; 457 458 /* Remove this PF_FUNC from bcast pkt replication list */ 459 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); 460 if (err) { 461 dev_err(rvu->dev, 462 "Bcast list, failed to disable PF_FUNC 0x%x\n", 463 pcifunc); 464 } 465 466 /* Free and disable any MCAM entries used by this NIX LF */ 467 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 468 469 /* Disable DMAC filters used */ 470 rvu_cgx_disable_dmac_entries(rvu, pcifunc); 471 } 472 473 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 474 struct nix_bp_cfg_req *req, 475 struct msg_rsp *rsp) 476 { 477 u16 pcifunc = req->hdr.pcifunc; 478 struct rvu_pfvf *pfvf; 479 int blkaddr, pf, type; 480 u16 chan_base, chan; 481 u64 cfg; 482 483 pf = rvu_get_pf(pcifunc); 484 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 485 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 486 return 0; 487 488 pfvf = rvu_get_pfvf(rvu, pcifunc); 489 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 490 491 chan_base = pfvf->rx_chan_base + req->chan_base; 492 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 493 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 494 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 495 cfg & ~BIT_ULL(16)); 496 } 497 return 0; 498 } 499 500 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 501 int type, int chan_id) 502 { 503 int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; 504 u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt; 505 struct rvu_hwinfo *hw = rvu->hw; 506 struct rvu_pfvf *pfvf; 507 u8 cgx_id, lmac_id; 508 u64 cfg; 509 510 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 511 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 512 lmac_chan_cnt = cfg & 0xFF; 513 514 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 515 sdp_chan_cnt = cfg & 0xFFF; 516 517 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; 518 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); 519 sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; 520 521 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 522 523 /* Backpressure IDs range division 524 * CGX channles are mapped to (0 - 191) BPIDs 525 * LBK channles are mapped to (192 - 255) BPIDs 526 * SDP channles are mapped to (256 - 511) BPIDs 527 * 528 * Lmac channles and bpids mapped as follows 529 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 530 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 531 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 532 */ 533 switch (type) { 534 case NIX_INTF_TYPE_CGX: 535 if ((req->chan_base + req->chan_cnt) > 15) 536 return -EINVAL; 537 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 538 /* Assign bpid based on cgx, lmac and chan id */ 539 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + 540 (lmac_id * lmac_chan_cnt) + req->chan_base; 541 542 if (req->bpid_per_chan) 543 bpid += chan_id; 544 if (bpid > cgx_bpid_cnt) 545 return -EINVAL; 546 break; 547 548 case NIX_INTF_TYPE_LBK: 549 if ((req->chan_base + req->chan_cnt) > 63) 550 return -EINVAL; 551 bpid = cgx_bpid_cnt + req->chan_base; 552 if (req->bpid_per_chan) 553 bpid += chan_id; 554 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) 555 return -EINVAL; 556 break; 557 case NIX_INTF_TYPE_SDP: 558 if ((req->chan_base + req->chan_cnt) > 255) 559 return -EINVAL; 560 561 bpid = sdp_bpid_cnt + req->chan_base; 562 if (req->bpid_per_chan) 563 bpid += chan_id; 564 565 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) 566 return -EINVAL; 567 break; 568 default: 569 return -EINVAL; 570 } 571 return bpid; 572 } 573 574 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 575 struct nix_bp_cfg_req *req, 576 struct nix_bp_cfg_rsp *rsp) 577 { 578 int blkaddr, pf, type, chan_id = 0; 579 u16 pcifunc = req->hdr.pcifunc; 580 struct rvu_pfvf *pfvf; 581 u16 chan_base, chan; 582 s16 bpid, bpid_base; 583 u64 cfg; 584 585 pf = rvu_get_pf(pcifunc); 586 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 587 if (is_sdp_pfvf(pcifunc)) 588 type = NIX_INTF_TYPE_SDP; 589 590 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ 591 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 592 type != NIX_INTF_TYPE_SDP) 593 return 0; 594 595 pfvf = rvu_get_pfvf(rvu, pcifunc); 596 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 597 598 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 599 chan_base = pfvf->rx_chan_base + req->chan_base; 600 bpid = bpid_base; 601 602 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 603 if (bpid < 0) { 604 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 605 return -EINVAL; 606 } 607 608 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 609 cfg &= ~GENMASK_ULL(8, 0); 610 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 611 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); 612 chan_id++; 613 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 614 } 615 616 for (chan = 0; chan < req->chan_cnt; chan++) { 617 /* Map channel and bpid assign to it */ 618 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 619 (bpid_base & 0x3FF); 620 if (req->bpid_per_chan) 621 bpid_base++; 622 } 623 rsp->chan_cnt = req->chan_cnt; 624 625 return 0; 626 } 627 628 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 629 u64 format, bool v4, u64 *fidx) 630 { 631 struct nix_lso_format field = {0}; 632 633 /* IP's Length field */ 634 field.layer = NIX_TXLAYER_OL3; 635 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 636 field.offset = v4 ? 2 : 4; 637 field.sizem1 = 1; /* i.e 2 bytes */ 638 field.alg = NIX_LSOALG_ADD_PAYLEN; 639 rvu_write64(rvu, blkaddr, 640 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 641 *(u64 *)&field); 642 643 /* No ID field in IPv6 header */ 644 if (!v4) 645 return; 646 647 /* IP's ID field */ 648 field.layer = NIX_TXLAYER_OL3; 649 field.offset = 4; 650 field.sizem1 = 1; /* i.e 2 bytes */ 651 field.alg = NIX_LSOALG_ADD_SEGNUM; 652 rvu_write64(rvu, blkaddr, 653 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 654 *(u64 *)&field); 655 } 656 657 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 658 u64 format, u64 *fidx) 659 { 660 struct nix_lso_format field = {0}; 661 662 /* TCP's sequence number field */ 663 field.layer = NIX_TXLAYER_OL4; 664 field.offset = 4; 665 field.sizem1 = 3; /* i.e 4 bytes */ 666 field.alg = NIX_LSOALG_ADD_OFFSET; 667 rvu_write64(rvu, blkaddr, 668 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 669 *(u64 *)&field); 670 671 /* TCP's flags field */ 672 field.layer = NIX_TXLAYER_OL4; 673 field.offset = 12; 674 field.sizem1 = 1; /* 2 bytes */ 675 field.alg = NIX_LSOALG_TCP_FLAGS; 676 rvu_write64(rvu, blkaddr, 677 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 678 *(u64 *)&field); 679 } 680 681 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 682 { 683 u64 cfg, idx, fidx = 0; 684 685 /* Get max HW supported format indices */ 686 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 687 nix_hw->lso.total = cfg; 688 689 /* Enable LSO */ 690 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 691 /* For TSO, set first and middle segment flags to 692 * mask out PSH, RST & FIN flags in TCP packet 693 */ 694 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 695 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 696 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 697 698 /* Setup default static LSO formats 699 * 700 * Configure format fields for TCPv4 segmentation offload 701 */ 702 idx = NIX_LSO_FORMAT_IDX_TSOV4; 703 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 704 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 705 706 /* Set rest of the fields to NOP */ 707 for (; fidx < 8; fidx++) { 708 rvu_write64(rvu, blkaddr, 709 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 710 } 711 nix_hw->lso.in_use++; 712 713 /* Configure format fields for TCPv6 segmentation offload */ 714 idx = NIX_LSO_FORMAT_IDX_TSOV6; 715 fidx = 0; 716 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 717 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 718 719 /* Set rest of the fields to NOP */ 720 for (; fidx < 8; fidx++) { 721 rvu_write64(rvu, blkaddr, 722 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 723 } 724 nix_hw->lso.in_use++; 725 } 726 727 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 728 { 729 kfree(pfvf->rq_bmap); 730 kfree(pfvf->sq_bmap); 731 kfree(pfvf->cq_bmap); 732 if (pfvf->rq_ctx) 733 qmem_free(rvu->dev, pfvf->rq_ctx); 734 if (pfvf->sq_ctx) 735 qmem_free(rvu->dev, pfvf->sq_ctx); 736 if (pfvf->cq_ctx) 737 qmem_free(rvu->dev, pfvf->cq_ctx); 738 if (pfvf->rss_ctx) 739 qmem_free(rvu->dev, pfvf->rss_ctx); 740 if (pfvf->nix_qints_ctx) 741 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 742 if (pfvf->cq_ints_ctx) 743 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 744 745 pfvf->rq_bmap = NULL; 746 pfvf->cq_bmap = NULL; 747 pfvf->sq_bmap = NULL; 748 pfvf->rq_ctx = NULL; 749 pfvf->sq_ctx = NULL; 750 pfvf->cq_ctx = NULL; 751 pfvf->rss_ctx = NULL; 752 pfvf->nix_qints_ctx = NULL; 753 pfvf->cq_ints_ctx = NULL; 754 } 755 756 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 757 struct rvu_pfvf *pfvf, int nixlf, 758 int rss_sz, int rss_grps, int hwctx_size, 759 u64 way_mask, bool tag_lsb_as_adder) 760 { 761 int err, grp, num_indices; 762 u64 val; 763 764 /* RSS is not requested for this NIXLF */ 765 if (!rss_sz) 766 return 0; 767 num_indices = rss_sz * rss_grps; 768 769 /* Alloc NIX RSS HW context memory and config the base */ 770 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 771 if (err) 772 return err; 773 774 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 775 (u64)pfvf->rss_ctx->iova); 776 777 /* Config full RSS table size, enable RSS and caching */ 778 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | 779 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); 780 781 if (tag_lsb_as_adder) 782 val |= BIT_ULL(5); 783 784 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); 785 /* Config RSS group offset and sizes */ 786 for (grp = 0; grp < rss_grps; grp++) 787 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 788 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 789 return 0; 790 } 791 792 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 793 struct nix_aq_inst_s *inst) 794 { 795 struct admin_queue *aq = block->aq; 796 struct nix_aq_res_s *result; 797 int timeout = 1000; 798 u64 reg, head; 799 800 result = (struct nix_aq_res_s *)aq->res->base; 801 802 /* Get current head pointer where to append this instruction */ 803 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 804 head = (reg >> 4) & AQ_PTR_MASK; 805 806 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 807 (void *)inst, aq->inst->entry_sz); 808 memset(result, 0, sizeof(*result)); 809 /* sync into memory */ 810 wmb(); 811 812 /* Ring the doorbell and wait for result */ 813 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 814 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 815 cpu_relax(); 816 udelay(1); 817 timeout--; 818 if (!timeout) 819 return -EBUSY; 820 } 821 822 if (result->compcode != NIX_AQ_COMP_GOOD) 823 /* TODO: Replace this with some error code */ 824 return -EBUSY; 825 826 return 0; 827 } 828 829 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 830 struct nix_aq_enq_req *req, 831 struct nix_aq_enq_rsp *rsp) 832 { 833 struct rvu_hwinfo *hw = rvu->hw; 834 u16 pcifunc = req->hdr.pcifunc; 835 int nixlf, blkaddr, rc = 0; 836 struct nix_aq_inst_s inst; 837 struct rvu_block *block; 838 struct admin_queue *aq; 839 struct rvu_pfvf *pfvf; 840 void *ctx, *mask; 841 bool ena; 842 u64 cfg; 843 844 blkaddr = nix_hw->blkaddr; 845 block = &hw->block[blkaddr]; 846 aq = block->aq; 847 if (!aq) { 848 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 849 return NIX_AF_ERR_AQ_ENQUEUE; 850 } 851 852 pfvf = rvu_get_pfvf(rvu, pcifunc); 853 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 854 855 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile 856 * operations done by AF itself. 857 */ 858 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || 859 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { 860 if (!pfvf->nixlf || nixlf < 0) 861 return NIX_AF_ERR_AF_LF_INVALID; 862 } 863 864 switch (req->ctype) { 865 case NIX_AQ_CTYPE_RQ: 866 /* Check if index exceeds max no of queues */ 867 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 868 rc = NIX_AF_ERR_AQ_ENQUEUE; 869 break; 870 case NIX_AQ_CTYPE_SQ: 871 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 872 rc = NIX_AF_ERR_AQ_ENQUEUE; 873 break; 874 case NIX_AQ_CTYPE_CQ: 875 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 876 rc = NIX_AF_ERR_AQ_ENQUEUE; 877 break; 878 case NIX_AQ_CTYPE_RSS: 879 /* Check if RSS is enabled and qidx is within range */ 880 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 881 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 882 (req->qidx >= (256UL << (cfg & 0xF)))) 883 rc = NIX_AF_ERR_AQ_ENQUEUE; 884 break; 885 case NIX_AQ_CTYPE_MCE: 886 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 887 888 /* Check if index exceeds MCE list length */ 889 if (!nix_hw->mcast.mce_ctx || 890 (req->qidx >= (256UL << (cfg & 0xF)))) 891 rc = NIX_AF_ERR_AQ_ENQUEUE; 892 893 /* Adding multicast lists for requests from PF/VFs is not 894 * yet supported, so ignore this. 895 */ 896 if (rsp) 897 rc = NIX_AF_ERR_AQ_ENQUEUE; 898 break; 899 case NIX_AQ_CTYPE_BANDPROF: 900 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, 901 nix_hw, pcifunc)) 902 rc = NIX_AF_ERR_INVALID_BANDPROF; 903 break; 904 default: 905 rc = NIX_AF_ERR_AQ_ENQUEUE; 906 } 907 908 if (rc) 909 return rc; 910 911 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 912 if (req->ctype == NIX_AQ_CTYPE_SQ && 913 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 914 (req->op == NIX_AQ_INSTOP_WRITE && 915 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { 916 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 917 pcifunc, req->sq.smq)) 918 return NIX_AF_ERR_AQ_ENQUEUE; 919 } 920 921 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 922 inst.lf = nixlf; 923 inst.cindex = req->qidx; 924 inst.ctype = req->ctype; 925 inst.op = req->op; 926 /* Currently we are not supporting enqueuing multiple instructions, 927 * so always choose first entry in result memory. 928 */ 929 inst.res_addr = (u64)aq->res->iova; 930 931 /* Hardware uses same aq->res->base for updating result of 932 * previous instruction hence wait here till it is done. 933 */ 934 spin_lock(&aq->lock); 935 936 /* Clean result + context memory */ 937 memset(aq->res->base, 0, aq->res->entry_sz); 938 /* Context needs to be written at RES_ADDR + 128 */ 939 ctx = aq->res->base + 128; 940 /* Mask needs to be written at RES_ADDR + 256 */ 941 mask = aq->res->base + 256; 942 943 switch (req->op) { 944 case NIX_AQ_INSTOP_WRITE: 945 if (req->ctype == NIX_AQ_CTYPE_RQ) 946 memcpy(mask, &req->rq_mask, 947 sizeof(struct nix_rq_ctx_s)); 948 else if (req->ctype == NIX_AQ_CTYPE_SQ) 949 memcpy(mask, &req->sq_mask, 950 sizeof(struct nix_sq_ctx_s)); 951 else if (req->ctype == NIX_AQ_CTYPE_CQ) 952 memcpy(mask, &req->cq_mask, 953 sizeof(struct nix_cq_ctx_s)); 954 else if (req->ctype == NIX_AQ_CTYPE_RSS) 955 memcpy(mask, &req->rss_mask, 956 sizeof(struct nix_rsse_s)); 957 else if (req->ctype == NIX_AQ_CTYPE_MCE) 958 memcpy(mask, &req->mce_mask, 959 sizeof(struct nix_rx_mce_s)); 960 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 961 memcpy(mask, &req->prof_mask, 962 sizeof(struct nix_bandprof_s)); 963 fallthrough; 964 case NIX_AQ_INSTOP_INIT: 965 if (req->ctype == NIX_AQ_CTYPE_RQ) 966 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 967 else if (req->ctype == NIX_AQ_CTYPE_SQ) 968 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 969 else if (req->ctype == NIX_AQ_CTYPE_CQ) 970 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 971 else if (req->ctype == NIX_AQ_CTYPE_RSS) 972 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 973 else if (req->ctype == NIX_AQ_CTYPE_MCE) 974 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 975 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 976 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); 977 break; 978 case NIX_AQ_INSTOP_NOP: 979 case NIX_AQ_INSTOP_READ: 980 case NIX_AQ_INSTOP_LOCK: 981 case NIX_AQ_INSTOP_UNLOCK: 982 break; 983 default: 984 rc = NIX_AF_ERR_AQ_ENQUEUE; 985 spin_unlock(&aq->lock); 986 return rc; 987 } 988 989 /* Submit the instruction to AQ */ 990 rc = nix_aq_enqueue_wait(rvu, block, &inst); 991 if (rc) { 992 spin_unlock(&aq->lock); 993 return rc; 994 } 995 996 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 997 if (req->op == NIX_AQ_INSTOP_INIT) { 998 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 999 __set_bit(req->qidx, pfvf->rq_bmap); 1000 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 1001 __set_bit(req->qidx, pfvf->sq_bmap); 1002 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 1003 __set_bit(req->qidx, pfvf->cq_bmap); 1004 } 1005 1006 if (req->op == NIX_AQ_INSTOP_WRITE) { 1007 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1008 ena = (req->rq.ena & req->rq_mask.ena) | 1009 (test_bit(req->qidx, pfvf->rq_bmap) & 1010 ~req->rq_mask.ena); 1011 if (ena) 1012 __set_bit(req->qidx, pfvf->rq_bmap); 1013 else 1014 __clear_bit(req->qidx, pfvf->rq_bmap); 1015 } 1016 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1017 ena = (req->rq.ena & req->sq_mask.ena) | 1018 (test_bit(req->qidx, pfvf->sq_bmap) & 1019 ~req->sq_mask.ena); 1020 if (ena) 1021 __set_bit(req->qidx, pfvf->sq_bmap); 1022 else 1023 __clear_bit(req->qidx, pfvf->sq_bmap); 1024 } 1025 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1026 ena = (req->rq.ena & req->cq_mask.ena) | 1027 (test_bit(req->qidx, pfvf->cq_bmap) & 1028 ~req->cq_mask.ena); 1029 if (ena) 1030 __set_bit(req->qidx, pfvf->cq_bmap); 1031 else 1032 __clear_bit(req->qidx, pfvf->cq_bmap); 1033 } 1034 } 1035 1036 if (rsp) { 1037 /* Copy read context into mailbox */ 1038 if (req->op == NIX_AQ_INSTOP_READ) { 1039 if (req->ctype == NIX_AQ_CTYPE_RQ) 1040 memcpy(&rsp->rq, ctx, 1041 sizeof(struct nix_rq_ctx_s)); 1042 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1043 memcpy(&rsp->sq, ctx, 1044 sizeof(struct nix_sq_ctx_s)); 1045 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1046 memcpy(&rsp->cq, ctx, 1047 sizeof(struct nix_cq_ctx_s)); 1048 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1049 memcpy(&rsp->rss, ctx, 1050 sizeof(struct nix_rsse_s)); 1051 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1052 memcpy(&rsp->mce, ctx, 1053 sizeof(struct nix_rx_mce_s)); 1054 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1055 memcpy(&rsp->prof, ctx, 1056 sizeof(struct nix_bandprof_s)); 1057 } 1058 } 1059 1060 spin_unlock(&aq->lock); 1061 return 0; 1062 } 1063 1064 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 1065 struct nix_aq_enq_rsp *rsp) 1066 { 1067 struct nix_hw *nix_hw; 1068 int blkaddr; 1069 1070 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 1071 if (blkaddr < 0) 1072 return NIX_AF_ERR_AF_LF_INVALID; 1073 1074 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1075 if (!nix_hw) 1076 return NIX_AF_ERR_INVALID_NIXBLK; 1077 1078 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 1079 } 1080 1081 static const char *nix_get_ctx_name(int ctype) 1082 { 1083 switch (ctype) { 1084 case NIX_AQ_CTYPE_CQ: 1085 return "CQ"; 1086 case NIX_AQ_CTYPE_SQ: 1087 return "SQ"; 1088 case NIX_AQ_CTYPE_RQ: 1089 return "RQ"; 1090 case NIX_AQ_CTYPE_RSS: 1091 return "RSS"; 1092 } 1093 return ""; 1094 } 1095 1096 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 1097 { 1098 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1099 struct nix_aq_enq_req aq_req; 1100 unsigned long *bmap; 1101 int qidx, q_cnt = 0; 1102 int err = 0, rc; 1103 1104 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 1105 return NIX_AF_ERR_AQ_ENQUEUE; 1106 1107 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 1108 aq_req.hdr.pcifunc = req->hdr.pcifunc; 1109 1110 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1111 aq_req.cq.ena = 0; 1112 aq_req.cq_mask.ena = 1; 1113 aq_req.cq.bp_ena = 0; 1114 aq_req.cq_mask.bp_ena = 1; 1115 q_cnt = pfvf->cq_ctx->qsize; 1116 bmap = pfvf->cq_bmap; 1117 } 1118 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1119 aq_req.sq.ena = 0; 1120 aq_req.sq_mask.ena = 1; 1121 q_cnt = pfvf->sq_ctx->qsize; 1122 bmap = pfvf->sq_bmap; 1123 } 1124 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1125 aq_req.rq.ena = 0; 1126 aq_req.rq_mask.ena = 1; 1127 q_cnt = pfvf->rq_ctx->qsize; 1128 bmap = pfvf->rq_bmap; 1129 } 1130 1131 aq_req.ctype = req->ctype; 1132 aq_req.op = NIX_AQ_INSTOP_WRITE; 1133 1134 for (qidx = 0; qidx < q_cnt; qidx++) { 1135 if (!test_bit(qidx, bmap)) 1136 continue; 1137 aq_req.qidx = qidx; 1138 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 1139 if (rc) { 1140 err = rc; 1141 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 1142 nix_get_ctx_name(req->ctype), qidx); 1143 } 1144 } 1145 1146 return err; 1147 } 1148 1149 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 1150 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 1151 { 1152 struct nix_aq_enq_req lock_ctx_req; 1153 int err; 1154 1155 if (req->op != NIX_AQ_INSTOP_INIT) 1156 return 0; 1157 1158 if (req->ctype == NIX_AQ_CTYPE_MCE || 1159 req->ctype == NIX_AQ_CTYPE_DYNO) 1160 return 0; 1161 1162 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 1163 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 1164 lock_ctx_req.ctype = req->ctype; 1165 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 1166 lock_ctx_req.qidx = req->qidx; 1167 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 1168 if (err) 1169 dev_err(rvu->dev, 1170 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 1171 req->hdr.pcifunc, 1172 nix_get_ctx_name(req->ctype), req->qidx); 1173 return err; 1174 } 1175 1176 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1177 struct nix_aq_enq_req *req, 1178 struct nix_aq_enq_rsp *rsp) 1179 { 1180 int err; 1181 1182 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 1183 if (!err) 1184 err = nix_lf_hwctx_lockdown(rvu, req); 1185 return err; 1186 } 1187 #else 1188 1189 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1190 struct nix_aq_enq_req *req, 1191 struct nix_aq_enq_rsp *rsp) 1192 { 1193 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1194 } 1195 #endif 1196 /* CN10K mbox handler */ 1197 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1198 struct nix_cn10k_aq_enq_req *req, 1199 struct nix_cn10k_aq_enq_rsp *rsp) 1200 { 1201 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1202 (struct nix_aq_enq_rsp *)rsp); 1203 } 1204 1205 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1206 struct hwctx_disable_req *req, 1207 struct msg_rsp *rsp) 1208 { 1209 return nix_lf_hwctx_disable(rvu, req); 1210 } 1211 1212 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1213 struct nix_lf_alloc_req *req, 1214 struct nix_lf_alloc_rsp *rsp) 1215 { 1216 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1217 struct rvu_hwinfo *hw = rvu->hw; 1218 u16 pcifunc = req->hdr.pcifunc; 1219 struct rvu_block *block; 1220 struct rvu_pfvf *pfvf; 1221 u64 cfg, ctx_cfg; 1222 int blkaddr; 1223 1224 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1225 return NIX_AF_ERR_PARAM; 1226 1227 if (req->way_mask) 1228 req->way_mask &= 0xFFFF; 1229 1230 pfvf = rvu_get_pfvf(rvu, pcifunc); 1231 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1232 if (!pfvf->nixlf || blkaddr < 0) 1233 return NIX_AF_ERR_AF_LF_INVALID; 1234 1235 block = &hw->block[blkaddr]; 1236 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1237 if (nixlf < 0) 1238 return NIX_AF_ERR_AF_LF_INVALID; 1239 1240 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1241 if (req->npa_func) { 1242 /* If default, use 'this' NIXLF's PFFUNC */ 1243 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1244 req->npa_func = pcifunc; 1245 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1246 return NIX_AF_INVAL_NPA_PF_FUNC; 1247 } 1248 1249 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1250 if (req->sso_func) { 1251 /* If default, use 'this' NIXLF's PFFUNC */ 1252 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1253 req->sso_func = pcifunc; 1254 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1255 return NIX_AF_INVAL_SSO_PF_FUNC; 1256 } 1257 1258 /* If RSS is being enabled, check if requested config is valid. 1259 * RSS table size should be power of two, otherwise 1260 * RSS_GRP::OFFSET + adder might go beyond that group or 1261 * won't be able to use entire table. 1262 */ 1263 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1264 !is_power_of_2(req->rss_sz))) 1265 return NIX_AF_ERR_RSS_SIZE_INVALID; 1266 1267 if (req->rss_sz && 1268 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1269 return NIX_AF_ERR_RSS_GRPS_INVALID; 1270 1271 /* Reset this NIX LF */ 1272 err = rvu_lf_reset(rvu, block, nixlf); 1273 if (err) { 1274 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1275 block->addr - BLKADDR_NIX0, nixlf); 1276 return NIX_AF_ERR_LF_RESET; 1277 } 1278 1279 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1280 1281 /* Alloc NIX RQ HW context memory and config the base */ 1282 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1283 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1284 if (err) 1285 goto free_mem; 1286 1287 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1288 if (!pfvf->rq_bmap) 1289 goto free_mem; 1290 1291 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1292 (u64)pfvf->rq_ctx->iova); 1293 1294 /* Set caching and queue count in HW */ 1295 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1296 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1297 1298 /* Alloc NIX SQ HW context memory and config the base */ 1299 hwctx_size = 1UL << (ctx_cfg & 0xF); 1300 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1301 if (err) 1302 goto free_mem; 1303 1304 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1305 if (!pfvf->sq_bmap) 1306 goto free_mem; 1307 1308 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1309 (u64)pfvf->sq_ctx->iova); 1310 1311 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1312 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1313 1314 /* Alloc NIX CQ HW context memory and config the base */ 1315 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1316 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1317 if (err) 1318 goto free_mem; 1319 1320 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1321 if (!pfvf->cq_bmap) 1322 goto free_mem; 1323 1324 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1325 (u64)pfvf->cq_ctx->iova); 1326 1327 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1328 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1329 1330 /* Initialize receive side scaling (RSS) */ 1331 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1332 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1333 req->rss_grps, hwctx_size, req->way_mask, 1334 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); 1335 if (err) 1336 goto free_mem; 1337 1338 /* Alloc memory for CQINT's HW contexts */ 1339 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1340 qints = (cfg >> 24) & 0xFFF; 1341 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1342 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1343 if (err) 1344 goto free_mem; 1345 1346 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1347 (u64)pfvf->cq_ints_ctx->iova); 1348 1349 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1350 BIT_ULL(36) | req->way_mask << 20); 1351 1352 /* Alloc memory for QINT's HW contexts */ 1353 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1354 qints = (cfg >> 12) & 0xFFF; 1355 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1356 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1357 if (err) 1358 goto free_mem; 1359 1360 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1361 (u64)pfvf->nix_qints_ctx->iova); 1362 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1363 BIT_ULL(36) | req->way_mask << 20); 1364 1365 /* Setup VLANX TPID's. 1366 * Use VLAN1 for 802.1Q 1367 * and VLAN0 for 802.1AD. 1368 */ 1369 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1370 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1371 1372 /* Enable LMTST for this NIX LF */ 1373 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1374 1375 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1376 if (req->npa_func) 1377 cfg = req->npa_func; 1378 if (req->sso_func) 1379 cfg |= (u64)req->sso_func << 16; 1380 1381 cfg |= (u64)req->xqe_sz << 33; 1382 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1383 1384 /* Config Rx pkt length, csum checks and apad enable / disable */ 1385 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1386 1387 /* Configure pkind for TX parse config */ 1388 cfg = NPC_TX_DEF_PKIND; 1389 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1390 1391 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1392 if (is_sdp_pfvf(pcifunc)) 1393 intf = NIX_INTF_TYPE_SDP; 1394 1395 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, 1396 !!(req->flags & NIX_LF_LBK_BLK_SEL)); 1397 if (err) 1398 goto free_mem; 1399 1400 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1401 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1402 1403 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1404 rvu_write64(rvu, blkaddr, 1405 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1406 VTAGSIZE_T4 | VTAG_STRIP); 1407 1408 goto exit; 1409 1410 free_mem: 1411 nix_ctx_free(rvu, pfvf); 1412 rc = -ENOMEM; 1413 1414 exit: 1415 /* Set macaddr of this PF/VF */ 1416 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1417 1418 /* set SQB size info */ 1419 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1420 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1421 rsp->rx_chan_base = pfvf->rx_chan_base; 1422 rsp->tx_chan_base = pfvf->tx_chan_base; 1423 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1424 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1425 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1426 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1427 /* Get HW supported stat count */ 1428 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1429 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1430 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1431 /* Get count of CQ IRQs and error IRQs supported per LF */ 1432 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1433 rsp->qints = ((cfg >> 12) & 0xFFF); 1434 rsp->cints = ((cfg >> 24) & 0xFFF); 1435 rsp->cgx_links = hw->cgx_links; 1436 rsp->lbk_links = hw->lbk_links; 1437 rsp->sdp_links = hw->sdp_links; 1438 1439 return rc; 1440 } 1441 1442 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1443 struct msg_rsp *rsp) 1444 { 1445 struct rvu_hwinfo *hw = rvu->hw; 1446 u16 pcifunc = req->hdr.pcifunc; 1447 struct rvu_block *block; 1448 int blkaddr, nixlf, err; 1449 struct rvu_pfvf *pfvf; 1450 1451 pfvf = rvu_get_pfvf(rvu, pcifunc); 1452 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1453 if (!pfvf->nixlf || blkaddr < 0) 1454 return NIX_AF_ERR_AF_LF_INVALID; 1455 1456 block = &hw->block[blkaddr]; 1457 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1458 if (nixlf < 0) 1459 return NIX_AF_ERR_AF_LF_INVALID; 1460 1461 if (req->flags & NIX_LF_DISABLE_FLOWS) 1462 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1463 else 1464 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1465 1466 /* Free any tx vtag def entries used by this NIX LF */ 1467 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1468 nix_free_tx_vtag_entries(rvu, pcifunc); 1469 1470 nix_interface_deinit(rvu, pcifunc, nixlf); 1471 1472 /* Reset this NIX LF */ 1473 err = rvu_lf_reset(rvu, block, nixlf); 1474 if (err) { 1475 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1476 block->addr - BLKADDR_NIX0, nixlf); 1477 return NIX_AF_ERR_LF_RESET; 1478 } 1479 1480 nix_ctx_free(rvu, pfvf); 1481 1482 return 0; 1483 } 1484 1485 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1486 struct nix_mark_format_cfg *req, 1487 struct nix_mark_format_cfg_rsp *rsp) 1488 { 1489 u16 pcifunc = req->hdr.pcifunc; 1490 struct nix_hw *nix_hw; 1491 struct rvu_pfvf *pfvf; 1492 int blkaddr, rc; 1493 u32 cfg; 1494 1495 pfvf = rvu_get_pfvf(rvu, pcifunc); 1496 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1497 if (!pfvf->nixlf || blkaddr < 0) 1498 return NIX_AF_ERR_AF_LF_INVALID; 1499 1500 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1501 if (!nix_hw) 1502 return NIX_AF_ERR_INVALID_NIXBLK; 1503 1504 cfg = (((u32)req->offset & 0x7) << 16) | 1505 (((u32)req->y_mask & 0xF) << 12) | 1506 (((u32)req->y_val & 0xF) << 8) | 1507 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1508 1509 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1510 if (rc < 0) { 1511 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1512 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1513 return NIX_AF_ERR_MARK_CFG_FAIL; 1514 } 1515 1516 rsp->mark_format_idx = rc; 1517 return 0; 1518 } 1519 1520 /* Handle shaper update specially for few revisions */ 1521 static bool 1522 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, 1523 int lvl, u64 reg, u64 regval) 1524 { 1525 u64 regbase, oldval, sw_xoff = 0; 1526 u64 dbgval, md_debug0 = 0; 1527 unsigned long poll_tmo; 1528 bool rate_reg = 0; 1529 u32 schq; 1530 1531 regbase = reg & 0xFFFF; 1532 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1533 1534 /* Check for rate register */ 1535 switch (lvl) { 1536 case NIX_TXSCH_LVL_TL1: 1537 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); 1538 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); 1539 1540 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); 1541 break; 1542 case NIX_TXSCH_LVL_TL2: 1543 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); 1544 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); 1545 1546 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || 1547 regbase == NIX_AF_TL2X_PIR(0)); 1548 break; 1549 case NIX_TXSCH_LVL_TL3: 1550 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); 1551 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); 1552 1553 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || 1554 regbase == NIX_AF_TL3X_PIR(0)); 1555 break; 1556 case NIX_TXSCH_LVL_TL4: 1557 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); 1558 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); 1559 1560 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || 1561 regbase == NIX_AF_TL4X_PIR(0)); 1562 break; 1563 case NIX_TXSCH_LVL_MDQ: 1564 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); 1565 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || 1566 regbase == NIX_AF_MDQX_PIR(0)); 1567 break; 1568 } 1569 1570 if (!rate_reg) 1571 return false; 1572 1573 /* Nothing special to do when state is not toggled */ 1574 oldval = rvu_read64(rvu, blkaddr, reg); 1575 if ((oldval & 0x1) == (regval & 0x1)) { 1576 rvu_write64(rvu, blkaddr, reg, regval); 1577 return true; 1578 } 1579 1580 /* PIR/CIR disable */ 1581 if (!(regval & 0x1)) { 1582 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1583 rvu_write64(rvu, blkaddr, reg, 0); 1584 udelay(4); 1585 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1586 return true; 1587 } 1588 1589 /* PIR/CIR enable */ 1590 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1591 if (md_debug0) { 1592 poll_tmo = jiffies + usecs_to_jiffies(10000); 1593 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ 1594 do { 1595 if (time_after(jiffies, poll_tmo)) { 1596 dev_err(rvu->dev, 1597 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", 1598 nixlf, schq, lvl); 1599 goto exit; 1600 } 1601 usleep_range(1, 5); 1602 dbgval = rvu_read64(rvu, blkaddr, md_debug0); 1603 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); 1604 } 1605 rvu_write64(rvu, blkaddr, reg, regval); 1606 exit: 1607 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1608 return true; 1609 } 1610 1611 /* Disable shaping of pkts by a scheduler queue 1612 * at a given scheduler level. 1613 */ 1614 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1615 int nixlf, int lvl, int schq) 1616 { 1617 struct rvu_hwinfo *hw = rvu->hw; 1618 u64 cir_reg = 0, pir_reg = 0; 1619 u64 cfg; 1620 1621 switch (lvl) { 1622 case NIX_TXSCH_LVL_TL1: 1623 cir_reg = NIX_AF_TL1X_CIR(schq); 1624 pir_reg = 0; /* PIR not available at TL1 */ 1625 break; 1626 case NIX_TXSCH_LVL_TL2: 1627 cir_reg = NIX_AF_TL2X_CIR(schq); 1628 pir_reg = NIX_AF_TL2X_PIR(schq); 1629 break; 1630 case NIX_TXSCH_LVL_TL3: 1631 cir_reg = NIX_AF_TL3X_CIR(schq); 1632 pir_reg = NIX_AF_TL3X_PIR(schq); 1633 break; 1634 case NIX_TXSCH_LVL_TL4: 1635 cir_reg = NIX_AF_TL4X_CIR(schq); 1636 pir_reg = NIX_AF_TL4X_PIR(schq); 1637 break; 1638 case NIX_TXSCH_LVL_MDQ: 1639 cir_reg = NIX_AF_MDQX_CIR(schq); 1640 pir_reg = NIX_AF_MDQX_PIR(schq); 1641 break; 1642 } 1643 1644 /* Shaper state toggle needs wait/poll */ 1645 if (hw->cap.nix_shaper_toggle_wait) { 1646 if (cir_reg) 1647 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1648 lvl, cir_reg, 0); 1649 if (pir_reg) 1650 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1651 lvl, pir_reg, 0); 1652 return; 1653 } 1654 1655 if (!cir_reg) 1656 return; 1657 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1658 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1659 1660 if (!pir_reg) 1661 return; 1662 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1663 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1664 } 1665 1666 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1667 int lvl, int schq) 1668 { 1669 struct rvu_hwinfo *hw = rvu->hw; 1670 int link_level; 1671 int link; 1672 1673 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1674 return; 1675 1676 /* Reset TL4's SDP link config */ 1677 if (lvl == NIX_TXSCH_LVL_TL4) 1678 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1679 1680 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1681 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1682 if (lvl != link_level) 1683 return; 1684 1685 /* Reset TL2's CGX or LBK link config */ 1686 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1687 rvu_write64(rvu, blkaddr, 1688 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1689 } 1690 1691 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, 1692 int lvl, int schq) 1693 { 1694 struct rvu_hwinfo *hw = rvu->hw; 1695 u64 reg; 1696 1697 /* Skip this if shaping is not supported */ 1698 if (!hw->cap.nix_shaping) 1699 return; 1700 1701 /* Clear level specific SW_XOFF */ 1702 switch (lvl) { 1703 case NIX_TXSCH_LVL_TL1: 1704 reg = NIX_AF_TL1X_SW_XOFF(schq); 1705 break; 1706 case NIX_TXSCH_LVL_TL2: 1707 reg = NIX_AF_TL2X_SW_XOFF(schq); 1708 break; 1709 case NIX_TXSCH_LVL_TL3: 1710 reg = NIX_AF_TL3X_SW_XOFF(schq); 1711 break; 1712 case NIX_TXSCH_LVL_TL4: 1713 reg = NIX_AF_TL4X_SW_XOFF(schq); 1714 break; 1715 case NIX_TXSCH_LVL_MDQ: 1716 reg = NIX_AF_MDQX_SW_XOFF(schq); 1717 break; 1718 default: 1719 return; 1720 } 1721 1722 rvu_write64(rvu, blkaddr, reg, 0x0); 1723 } 1724 1725 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1726 { 1727 struct rvu_hwinfo *hw = rvu->hw; 1728 int pf = rvu_get_pf(pcifunc); 1729 u8 cgx_id = 0, lmac_id = 0; 1730 1731 if (is_afvf(pcifunc)) {/* LBK links */ 1732 return hw->cgx_links; 1733 } else if (is_pf_cgxmapped(rvu, pf)) { 1734 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1735 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1736 } 1737 1738 /* SDP link */ 1739 return hw->cgx_links + hw->lbk_links; 1740 } 1741 1742 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 1743 int link, int *start, int *end) 1744 { 1745 struct rvu_hwinfo *hw = rvu->hw; 1746 int pf = rvu_get_pf(pcifunc); 1747 1748 if (is_afvf(pcifunc)) { /* LBK links */ 1749 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1750 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 1751 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 1752 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1753 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 1754 } else { /* SDP link */ 1755 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 1756 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 1757 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 1758 } 1759 } 1760 1761 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 1762 struct nix_hw *nix_hw, 1763 struct nix_txsch_alloc_req *req) 1764 { 1765 struct rvu_hwinfo *hw = rvu->hw; 1766 int schq, req_schq, free_cnt; 1767 struct nix_txsch *txsch; 1768 int link, start, end; 1769 1770 txsch = &nix_hw->txsch[lvl]; 1771 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 1772 1773 if (!req_schq) 1774 return 0; 1775 1776 link = nix_get_tx_link(rvu, pcifunc); 1777 1778 /* For traffic aggregating scheduler level, one queue is enough */ 1779 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1780 if (req_schq != 1) 1781 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1782 return 0; 1783 } 1784 1785 /* Get free SCHQ count and check if request can be accomodated */ 1786 if (hw->cap.nix_fixed_txschq_mapping) { 1787 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1788 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 1789 if (end <= txsch->schq.max && schq < end && 1790 !test_bit(schq, txsch->schq.bmap)) 1791 free_cnt = 1; 1792 else 1793 free_cnt = 0; 1794 } else { 1795 free_cnt = rvu_rsrc_free_count(&txsch->schq); 1796 } 1797 1798 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) 1799 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1800 1801 /* If contiguous queues are needed, check for availability */ 1802 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 1803 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 1804 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1805 1806 return 0; 1807 } 1808 1809 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 1810 struct nix_txsch_alloc_rsp *rsp, 1811 int lvl, int start, int end) 1812 { 1813 struct rvu_hwinfo *hw = rvu->hw; 1814 u16 pcifunc = rsp->hdr.pcifunc; 1815 int idx, schq; 1816 1817 /* For traffic aggregating levels, queue alloc is based 1818 * on transmit link to which PF_FUNC is mapped to. 1819 */ 1820 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1821 /* A single TL queue is allocated */ 1822 if (rsp->schq_contig[lvl]) { 1823 rsp->schq_contig[lvl] = 1; 1824 rsp->schq_contig_list[lvl][0] = start; 1825 } 1826 1827 /* Both contig and non-contig reqs doesn't make sense here */ 1828 if (rsp->schq_contig[lvl]) 1829 rsp->schq[lvl] = 0; 1830 1831 if (rsp->schq[lvl]) { 1832 rsp->schq[lvl] = 1; 1833 rsp->schq_list[lvl][0] = start; 1834 } 1835 return; 1836 } 1837 1838 /* Adjust the queue request count if HW supports 1839 * only one queue per level configuration. 1840 */ 1841 if (hw->cap.nix_fixed_txschq_mapping) { 1842 idx = pcifunc & RVU_PFVF_FUNC_MASK; 1843 schq = start + idx; 1844 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 1845 rsp->schq_contig[lvl] = 0; 1846 rsp->schq[lvl] = 0; 1847 return; 1848 } 1849 1850 if (rsp->schq_contig[lvl]) { 1851 rsp->schq_contig[lvl] = 1; 1852 set_bit(schq, txsch->schq.bmap); 1853 rsp->schq_contig_list[lvl][0] = schq; 1854 rsp->schq[lvl] = 0; 1855 } else if (rsp->schq[lvl]) { 1856 rsp->schq[lvl] = 1; 1857 set_bit(schq, txsch->schq.bmap); 1858 rsp->schq_list[lvl][0] = schq; 1859 } 1860 return; 1861 } 1862 1863 /* Allocate contiguous queue indices requesty first */ 1864 if (rsp->schq_contig[lvl]) { 1865 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 1866 txsch->schq.max, start, 1867 rsp->schq_contig[lvl], 0); 1868 if (schq >= end) 1869 rsp->schq_contig[lvl] = 0; 1870 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 1871 set_bit(schq, txsch->schq.bmap); 1872 rsp->schq_contig_list[lvl][idx] = schq; 1873 schq++; 1874 } 1875 } 1876 1877 /* Allocate non-contiguous queue indices */ 1878 if (rsp->schq[lvl]) { 1879 idx = 0; 1880 for (schq = start; schq < end; schq++) { 1881 if (!test_bit(schq, txsch->schq.bmap)) { 1882 set_bit(schq, txsch->schq.bmap); 1883 rsp->schq_list[lvl][idx++] = schq; 1884 } 1885 if (idx == rsp->schq[lvl]) 1886 break; 1887 } 1888 /* Update how many were allocated */ 1889 rsp->schq[lvl] = idx; 1890 } 1891 } 1892 1893 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 1894 struct nix_txsch_alloc_req *req, 1895 struct nix_txsch_alloc_rsp *rsp) 1896 { 1897 struct rvu_hwinfo *hw = rvu->hw; 1898 u16 pcifunc = req->hdr.pcifunc; 1899 int link, blkaddr, rc = 0; 1900 int lvl, idx, start, end; 1901 struct nix_txsch *txsch; 1902 struct nix_hw *nix_hw; 1903 u32 *pfvf_map; 1904 int nixlf; 1905 u16 schq; 1906 1907 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 1908 if (rc) 1909 return rc; 1910 1911 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1912 if (!nix_hw) 1913 return NIX_AF_ERR_INVALID_NIXBLK; 1914 1915 mutex_lock(&rvu->rsrc_lock); 1916 1917 /* Check if request is valid as per HW capabilities 1918 * and can be accomodated. 1919 */ 1920 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1921 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 1922 if (rc) 1923 goto err; 1924 } 1925 1926 /* Allocate requested Tx scheduler queues */ 1927 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1928 txsch = &nix_hw->txsch[lvl]; 1929 pfvf_map = txsch->pfvf_map; 1930 1931 if (!req->schq[lvl] && !req->schq_contig[lvl]) 1932 continue; 1933 1934 rsp->schq[lvl] = req->schq[lvl]; 1935 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 1936 1937 link = nix_get_tx_link(rvu, pcifunc); 1938 1939 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1940 start = link; 1941 end = link; 1942 } else if (hw->cap.nix_fixed_txschq_mapping) { 1943 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1944 } else { 1945 start = 0; 1946 end = txsch->schq.max; 1947 } 1948 1949 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 1950 1951 /* Reset queue config */ 1952 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 1953 schq = rsp->schq_contig_list[lvl][idx]; 1954 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1955 NIX_TXSCHQ_CFG_DONE)) 1956 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1957 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1958 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 1959 } 1960 1961 for (idx = 0; idx < req->schq[lvl]; idx++) { 1962 schq = rsp->schq_list[lvl][idx]; 1963 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1964 NIX_TXSCHQ_CFG_DONE)) 1965 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1966 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1967 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 1968 } 1969 } 1970 1971 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 1972 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 1973 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 1974 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1975 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1976 goto exit; 1977 err: 1978 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 1979 exit: 1980 mutex_unlock(&rvu->rsrc_lock); 1981 return rc; 1982 } 1983 1984 static int nix_smq_flush(struct rvu *rvu, int blkaddr, 1985 int smq, u16 pcifunc, int nixlf) 1986 { 1987 int pf = rvu_get_pf(pcifunc); 1988 u8 cgx_id = 0, lmac_id = 0; 1989 int err, restore_tx_en = 0; 1990 u64 cfg; 1991 1992 /* enable cgx tx if disabled */ 1993 if (is_pf_cgxmapped(rvu, pf)) { 1994 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1995 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), 1996 lmac_id, true); 1997 } 1998 1999 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 2000 /* Do SMQ flush and set enqueue xoff */ 2001 cfg |= BIT_ULL(50) | BIT_ULL(49); 2002 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 2003 2004 /* Disable backpressure from physical link, 2005 * otherwise SMQ flush may stall. 2006 */ 2007 rvu_cgx_enadis_rx_bp(rvu, pf, false); 2008 2009 /* Wait for flush to complete */ 2010 err = rvu_poll_reg(rvu, blkaddr, 2011 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 2012 if (err) 2013 dev_err(rvu->dev, 2014 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq); 2015 2016 rvu_cgx_enadis_rx_bp(rvu, pf, true); 2017 /* restore cgx tx state */ 2018 if (restore_tx_en) 2019 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 2020 return err; 2021 } 2022 2023 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 2024 { 2025 int blkaddr, nixlf, lvl, schq, err; 2026 struct rvu_hwinfo *hw = rvu->hw; 2027 struct nix_txsch *txsch; 2028 struct nix_hw *nix_hw; 2029 u16 map_func; 2030 2031 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2032 if (blkaddr < 0) 2033 return NIX_AF_ERR_AF_LF_INVALID; 2034 2035 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2036 if (!nix_hw) 2037 return NIX_AF_ERR_INVALID_NIXBLK; 2038 2039 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2040 if (nixlf < 0) 2041 return NIX_AF_ERR_AF_LF_INVALID; 2042 2043 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ 2044 mutex_lock(&rvu->rsrc_lock); 2045 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2046 txsch = &nix_hw->txsch[lvl]; 2047 2048 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2049 continue; 2050 2051 for (schq = 0; schq < txsch->schq.max; schq++) { 2052 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2053 continue; 2054 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2055 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2056 } 2057 } 2058 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, 2059 nix_get_tx_link(rvu, pcifunc)); 2060 2061 /* On PF cleanup, clear cfg done flag as 2062 * PF would have changed default config. 2063 */ 2064 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 2065 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; 2066 schq = nix_get_tx_link(rvu, pcifunc); 2067 /* Do not clear pcifunc in txsch->pfvf_map[schq] because 2068 * VF might be using this TL1 queue 2069 */ 2070 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 2071 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); 2072 } 2073 2074 /* Flush SMQs */ 2075 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 2076 for (schq = 0; schq < txsch->schq.max; schq++) { 2077 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2078 continue; 2079 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2080 } 2081 2082 /* Now free scheduler queues to free pool */ 2083 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2084 /* TLs above aggregation level are shared across all PF 2085 * and it's VFs, hence skip freeing them. 2086 */ 2087 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2088 continue; 2089 2090 txsch = &nix_hw->txsch[lvl]; 2091 for (schq = 0; schq < txsch->schq.max; schq++) { 2092 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2093 continue; 2094 rvu_free_rsrc(&txsch->schq, schq); 2095 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2096 } 2097 } 2098 mutex_unlock(&rvu->rsrc_lock); 2099 2100 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ 2101 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); 2102 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); 2103 if (err) 2104 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 2105 2106 return 0; 2107 } 2108 2109 static int nix_txschq_free_one(struct rvu *rvu, 2110 struct nix_txsch_free_req *req) 2111 { 2112 struct rvu_hwinfo *hw = rvu->hw; 2113 u16 pcifunc = req->hdr.pcifunc; 2114 int lvl, schq, nixlf, blkaddr; 2115 struct nix_txsch *txsch; 2116 struct nix_hw *nix_hw; 2117 u32 *pfvf_map; 2118 int rc; 2119 2120 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2121 if (blkaddr < 0) 2122 return NIX_AF_ERR_AF_LF_INVALID; 2123 2124 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2125 if (!nix_hw) 2126 return NIX_AF_ERR_INVALID_NIXBLK; 2127 2128 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2129 if (nixlf < 0) 2130 return NIX_AF_ERR_AF_LF_INVALID; 2131 2132 lvl = req->schq_lvl; 2133 schq = req->schq; 2134 txsch = &nix_hw->txsch[lvl]; 2135 2136 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 2137 return 0; 2138 2139 pfvf_map = txsch->pfvf_map; 2140 mutex_lock(&rvu->rsrc_lock); 2141 2142 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 2143 rc = NIX_AF_ERR_TLX_INVALID; 2144 goto err; 2145 } 2146 2147 /* Clear SW_XOFF of this resource only. 2148 * For SMQ level, all path XOFF's 2149 * need to be made clear by user 2150 */ 2151 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2152 2153 /* Flush if it is a SMQ. Onus of disabling 2154 * TL2/3 queue links before SMQ flush is on user 2155 */ 2156 if (lvl == NIX_TXSCH_LVL_SMQ && 2157 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { 2158 rc = NIX_AF_SMQ_FLUSH_FAILED; 2159 goto err; 2160 } 2161 2162 /* Free the resource */ 2163 rvu_free_rsrc(&txsch->schq, schq); 2164 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2165 mutex_unlock(&rvu->rsrc_lock); 2166 return 0; 2167 err: 2168 mutex_unlock(&rvu->rsrc_lock); 2169 return rc; 2170 } 2171 2172 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 2173 struct nix_txsch_free_req *req, 2174 struct msg_rsp *rsp) 2175 { 2176 if (req->flags & TXSCHQ_FREE_ALL) 2177 return nix_txschq_free(rvu, req->hdr.pcifunc); 2178 else 2179 return nix_txschq_free_one(rvu, req); 2180 } 2181 2182 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 2183 int lvl, u64 reg, u64 regval) 2184 { 2185 u64 regbase = reg & 0xFFFF; 2186 u16 schq, parent; 2187 2188 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 2189 return false; 2190 2191 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2192 /* Check if this schq belongs to this PF/VF or not */ 2193 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 2194 return false; 2195 2196 parent = (regval >> 16) & 0x1FF; 2197 /* Validate MDQ's TL4 parent */ 2198 if (regbase == NIX_AF_MDQX_PARENT(0) && 2199 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 2200 return false; 2201 2202 /* Validate TL4's TL3 parent */ 2203 if (regbase == NIX_AF_TL4X_PARENT(0) && 2204 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 2205 return false; 2206 2207 /* Validate TL3's TL2 parent */ 2208 if (regbase == NIX_AF_TL3X_PARENT(0) && 2209 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 2210 return false; 2211 2212 /* Validate TL2's TL1 parent */ 2213 if (regbase == NIX_AF_TL2X_PARENT(0) && 2214 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 2215 return false; 2216 2217 return true; 2218 } 2219 2220 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 2221 { 2222 u64 regbase; 2223 2224 if (hw->cap.nix_shaping) 2225 return true; 2226 2227 /* If shaping and coloring is not supported, then 2228 * *_CIR and *_PIR registers should not be configured. 2229 */ 2230 regbase = reg & 0xFFFF; 2231 2232 switch (lvl) { 2233 case NIX_TXSCH_LVL_TL1: 2234 if (regbase == NIX_AF_TL1X_CIR(0)) 2235 return false; 2236 break; 2237 case NIX_TXSCH_LVL_TL2: 2238 if (regbase == NIX_AF_TL2X_CIR(0) || 2239 regbase == NIX_AF_TL2X_PIR(0)) 2240 return false; 2241 break; 2242 case NIX_TXSCH_LVL_TL3: 2243 if (regbase == NIX_AF_TL3X_CIR(0) || 2244 regbase == NIX_AF_TL3X_PIR(0)) 2245 return false; 2246 break; 2247 case NIX_TXSCH_LVL_TL4: 2248 if (regbase == NIX_AF_TL4X_CIR(0) || 2249 regbase == NIX_AF_TL4X_PIR(0)) 2250 return false; 2251 break; 2252 case NIX_TXSCH_LVL_MDQ: 2253 if (regbase == NIX_AF_MDQX_CIR(0) || 2254 regbase == NIX_AF_MDQX_PIR(0)) 2255 return false; 2256 break; 2257 } 2258 return true; 2259 } 2260 2261 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 2262 u16 pcifunc, int blkaddr) 2263 { 2264 u32 *pfvf_map; 2265 int schq; 2266 2267 schq = nix_get_tx_link(rvu, pcifunc); 2268 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 2269 /* Skip if PF has already done the config */ 2270 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 2271 return; 2272 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 2273 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 2274 2275 /* On OcteonTx2 the config was in bytes and newer silcons 2276 * it's changed to weight. 2277 */ 2278 if (!rvu->hw->cap.nix_common_dwrr_mtu) 2279 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2280 TXSCH_TL1_DFLT_RR_QTM); 2281 else 2282 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2283 CN10K_MAX_DWRR_WEIGHT); 2284 2285 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 2286 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 2287 } 2288 2289 /* Register offset - [15:0] 2290 * Scheduler Queue number - [25:16] 2291 */ 2292 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) 2293 2294 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, 2295 int blkaddr, struct nix_txschq_config *req, 2296 struct nix_txschq_config *rsp) 2297 { 2298 u16 pcifunc = req->hdr.pcifunc; 2299 int idx, schq; 2300 u64 reg; 2301 2302 for (idx = 0; idx < req->num_regs; idx++) { 2303 reg = req->reg[idx]; 2304 reg &= NIX_TX_SCHQ_MASK; 2305 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2306 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || 2307 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) 2308 return NIX_AF_INVAL_TXSCHQ_CFG; 2309 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); 2310 } 2311 rsp->lvl = req->lvl; 2312 rsp->num_regs = req->num_regs; 2313 return 0; 2314 } 2315 2316 static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, 2317 u16 pcifunc, struct nix_txsch *txsch) 2318 { 2319 struct rvu_hwinfo *hw = rvu->hw; 2320 int lbk_link_start, lbk_links; 2321 u8 pf = rvu_get_pf(pcifunc); 2322 int schq; 2323 2324 if (!is_pf_cgxmapped(rvu, pf)) 2325 return; 2326 2327 lbk_link_start = hw->cgx_links; 2328 2329 for (schq = 0; schq < txsch->schq.max; schq++) { 2330 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2331 continue; 2332 /* Enable all LBK links with channel 63 by default so that 2333 * packets can be sent to LBK with a NPC TX MCAM rule 2334 */ 2335 lbk_links = hw->lbk_links; 2336 while (lbk_links--) 2337 rvu_write64(rvu, blkaddr, 2338 NIX_AF_TL3_TL2X_LINKX_CFG(schq, 2339 lbk_link_start + 2340 lbk_links), 2341 BIT_ULL(12) | RVU_SWITCH_LBK_CHAN); 2342 } 2343 } 2344 2345 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 2346 struct nix_txschq_config *req, 2347 struct nix_txschq_config *rsp) 2348 { 2349 u64 reg, val, regval, schq_regbase, val_mask; 2350 struct rvu_hwinfo *hw = rvu->hw; 2351 u16 pcifunc = req->hdr.pcifunc; 2352 struct nix_txsch *txsch; 2353 struct nix_hw *nix_hw; 2354 int blkaddr, idx, err; 2355 int nixlf, schq; 2356 u32 *pfvf_map; 2357 2358 if (req->lvl >= NIX_TXSCH_LVL_CNT || 2359 req->num_regs > MAX_REGS_PER_MBOX_MSG) 2360 return NIX_AF_INVAL_TXSCHQ_CFG; 2361 2362 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2363 if (err) 2364 return err; 2365 2366 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2367 if (!nix_hw) 2368 return NIX_AF_ERR_INVALID_NIXBLK; 2369 2370 if (req->read) 2371 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); 2372 2373 txsch = &nix_hw->txsch[req->lvl]; 2374 pfvf_map = txsch->pfvf_map; 2375 2376 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 2377 pcifunc & RVU_PFVF_FUNC_MASK) { 2378 mutex_lock(&rvu->rsrc_lock); 2379 if (req->lvl == NIX_TXSCH_LVL_TL1) 2380 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 2381 mutex_unlock(&rvu->rsrc_lock); 2382 return 0; 2383 } 2384 2385 for (idx = 0; idx < req->num_regs; idx++) { 2386 reg = req->reg[idx]; 2387 reg &= NIX_TX_SCHQ_MASK; 2388 regval = req->regval[idx]; 2389 schq_regbase = reg & 0xFFFF; 2390 val_mask = req->regval_mask[idx]; 2391 2392 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 2393 txsch->lvl, reg, regval)) 2394 return NIX_AF_INVAL_TXSCHQ_CFG; 2395 2396 /* Check if shaping and coloring is supported */ 2397 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 2398 continue; 2399 2400 val = rvu_read64(rvu, blkaddr, reg); 2401 regval = (val & val_mask) | (regval & ~val_mask); 2402 2403 /* Handle shaping state toggle specially */ 2404 if (hw->cap.nix_shaper_toggle_wait && 2405 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 2406 req->lvl, reg, regval)) 2407 continue; 2408 2409 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 2410 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 2411 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 2412 pcifunc, 0); 2413 regval &= ~(0x7FULL << 24); 2414 regval |= ((u64)nixlf << 24); 2415 } 2416 2417 /* Clear 'BP_ENA' config, if it's not allowed */ 2418 if (!hw->cap.nix_tx_link_bp) { 2419 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 2420 (schq_regbase & 0xFF00) == 2421 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 2422 regval &= ~BIT_ULL(13); 2423 } 2424 2425 /* Mark config as done for TL1 by PF */ 2426 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 2427 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 2428 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2429 mutex_lock(&rvu->rsrc_lock); 2430 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 2431 NIX_TXSCHQ_CFG_DONE); 2432 mutex_unlock(&rvu->rsrc_lock); 2433 } 2434 2435 /* SMQ flush is special hence split register writes such 2436 * that flush first and write rest of the bits later. 2437 */ 2438 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 2439 (regval & BIT_ULL(49))) { 2440 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2441 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2442 regval &= ~BIT_ULL(49); 2443 } 2444 rvu_write64(rvu, blkaddr, reg, regval); 2445 } 2446 2447 rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc, 2448 &nix_hw->txsch[NIX_TXSCH_LVL_TL2]); 2449 return 0; 2450 } 2451 2452 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 2453 struct nix_vtag_config *req) 2454 { 2455 u64 regval = req->vtag_size; 2456 2457 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2458 req->vtag_size > VTAGSIZE_T8) 2459 return -EINVAL; 2460 2461 /* RX VTAG Type 7 reserved for vf vlan */ 2462 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2463 return NIX_AF_ERR_RX_VTAG_INUSE; 2464 2465 if (req->rx.capture_vtag) 2466 regval |= BIT_ULL(5); 2467 if (req->rx.strip_vtag) 2468 regval |= BIT_ULL(4); 2469 2470 rvu_write64(rvu, blkaddr, 2471 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2472 return 0; 2473 } 2474 2475 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2476 u16 pcifunc, int index) 2477 { 2478 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2479 struct nix_txvlan *vlan; 2480 2481 if (!nix_hw) 2482 return NIX_AF_ERR_INVALID_NIXBLK; 2483 2484 vlan = &nix_hw->txvlan; 2485 if (vlan->entry2pfvf_map[index] != pcifunc) 2486 return NIX_AF_ERR_PARAM; 2487 2488 rvu_write64(rvu, blkaddr, 2489 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2490 rvu_write64(rvu, blkaddr, 2491 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2492 2493 vlan->entry2pfvf_map[index] = 0; 2494 rvu_free_rsrc(&vlan->rsrc, index); 2495 2496 return 0; 2497 } 2498 2499 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2500 { 2501 struct nix_txvlan *vlan; 2502 struct nix_hw *nix_hw; 2503 int index, blkaddr; 2504 2505 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2506 if (blkaddr < 0) 2507 return; 2508 2509 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2510 vlan = &nix_hw->txvlan; 2511 2512 mutex_lock(&vlan->rsrc_lock); 2513 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2514 for (index = 0; index < vlan->rsrc.max; index++) { 2515 if (vlan->entry2pfvf_map[index] == pcifunc) 2516 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2517 } 2518 mutex_unlock(&vlan->rsrc_lock); 2519 } 2520 2521 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2522 u64 vtag, u8 size) 2523 { 2524 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2525 struct nix_txvlan *vlan; 2526 u64 regval; 2527 int index; 2528 2529 if (!nix_hw) 2530 return NIX_AF_ERR_INVALID_NIXBLK; 2531 2532 vlan = &nix_hw->txvlan; 2533 2534 mutex_lock(&vlan->rsrc_lock); 2535 2536 index = rvu_alloc_rsrc(&vlan->rsrc); 2537 if (index < 0) { 2538 mutex_unlock(&vlan->rsrc_lock); 2539 return index; 2540 } 2541 2542 mutex_unlock(&vlan->rsrc_lock); 2543 2544 regval = size ? vtag : vtag << 32; 2545 2546 rvu_write64(rvu, blkaddr, 2547 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 2548 rvu_write64(rvu, blkaddr, 2549 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 2550 2551 return index; 2552 } 2553 2554 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 2555 struct nix_vtag_config *req) 2556 { 2557 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2558 u16 pcifunc = req->hdr.pcifunc; 2559 int idx0 = req->tx.vtag0_idx; 2560 int idx1 = req->tx.vtag1_idx; 2561 struct nix_txvlan *vlan; 2562 int err = 0; 2563 2564 if (!nix_hw) 2565 return NIX_AF_ERR_INVALID_NIXBLK; 2566 2567 vlan = &nix_hw->txvlan; 2568 if (req->tx.free_vtag0 && req->tx.free_vtag1) 2569 if (vlan->entry2pfvf_map[idx0] != pcifunc || 2570 vlan->entry2pfvf_map[idx1] != pcifunc) 2571 return NIX_AF_ERR_PARAM; 2572 2573 mutex_lock(&vlan->rsrc_lock); 2574 2575 if (req->tx.free_vtag0) { 2576 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 2577 if (err) 2578 goto exit; 2579 } 2580 2581 if (req->tx.free_vtag1) 2582 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 2583 2584 exit: 2585 mutex_unlock(&vlan->rsrc_lock); 2586 return err; 2587 } 2588 2589 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 2590 struct nix_vtag_config *req, 2591 struct nix_vtag_config_rsp *rsp) 2592 { 2593 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2594 struct nix_txvlan *vlan; 2595 u16 pcifunc = req->hdr.pcifunc; 2596 2597 if (!nix_hw) 2598 return NIX_AF_ERR_INVALID_NIXBLK; 2599 2600 vlan = &nix_hw->txvlan; 2601 if (req->tx.cfg_vtag0) { 2602 rsp->vtag0_idx = 2603 nix_tx_vtag_alloc(rvu, blkaddr, 2604 req->tx.vtag0, req->vtag_size); 2605 2606 if (rsp->vtag0_idx < 0) 2607 return NIX_AF_ERR_TX_VTAG_NOSPC; 2608 2609 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 2610 } 2611 2612 if (req->tx.cfg_vtag1) { 2613 rsp->vtag1_idx = 2614 nix_tx_vtag_alloc(rvu, blkaddr, 2615 req->tx.vtag1, req->vtag_size); 2616 2617 if (rsp->vtag1_idx < 0) 2618 goto err_free; 2619 2620 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 2621 } 2622 2623 return 0; 2624 2625 err_free: 2626 if (req->tx.cfg_vtag0) 2627 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 2628 2629 return NIX_AF_ERR_TX_VTAG_NOSPC; 2630 } 2631 2632 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 2633 struct nix_vtag_config *req, 2634 struct nix_vtag_config_rsp *rsp) 2635 { 2636 u16 pcifunc = req->hdr.pcifunc; 2637 int blkaddr, nixlf, err; 2638 2639 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2640 if (err) 2641 return err; 2642 2643 if (req->cfg_type) { 2644 /* rx vtag configuration */ 2645 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 2646 if (err) 2647 return NIX_AF_ERR_PARAM; 2648 } else { 2649 /* tx vtag configuration */ 2650 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 2651 (req->tx.free_vtag0 || req->tx.free_vtag1)) 2652 return NIX_AF_ERR_PARAM; 2653 2654 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 2655 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 2656 2657 if (req->tx.free_vtag0 || req->tx.free_vtag1) 2658 return nix_tx_vtag_decfg(rvu, blkaddr, req); 2659 } 2660 2661 return 0; 2662 } 2663 2664 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 2665 int mce, u8 op, u16 pcifunc, int next, bool eol) 2666 { 2667 struct nix_aq_enq_req aq_req; 2668 int err; 2669 2670 aq_req.hdr.pcifunc = 0; 2671 aq_req.ctype = NIX_AQ_CTYPE_MCE; 2672 aq_req.op = op; 2673 aq_req.qidx = mce; 2674 2675 /* Use RSS with RSS index 0 */ 2676 aq_req.mce.op = 1; 2677 aq_req.mce.index = 0; 2678 aq_req.mce.eol = eol; 2679 aq_req.mce.pf_func = pcifunc; 2680 aq_req.mce.next = next; 2681 2682 /* All fields valid */ 2683 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 2684 2685 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 2686 if (err) { 2687 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 2688 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 2689 return err; 2690 } 2691 return 0; 2692 } 2693 2694 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, 2695 u16 pcifunc, bool add) 2696 { 2697 struct mce *mce, *tail = NULL; 2698 bool delete = false; 2699 2700 /* Scan through the current list */ 2701 hlist_for_each_entry(mce, &mce_list->head, node) { 2702 /* If already exists, then delete */ 2703 if (mce->pcifunc == pcifunc && !add) { 2704 delete = true; 2705 break; 2706 } else if (mce->pcifunc == pcifunc && add) { 2707 /* entry already exists */ 2708 return 0; 2709 } 2710 tail = mce; 2711 } 2712 2713 if (delete) { 2714 hlist_del(&mce->node); 2715 kfree(mce); 2716 mce_list->count--; 2717 return 0; 2718 } 2719 2720 if (!add) 2721 return 0; 2722 2723 /* Add a new one to the list, at the tail */ 2724 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 2725 if (!mce) 2726 return -ENOMEM; 2727 mce->pcifunc = pcifunc; 2728 if (!tail) 2729 hlist_add_head(&mce->node, &mce_list->head); 2730 else 2731 hlist_add_behind(&mce->node, &tail->node); 2732 mce_list->count++; 2733 return 0; 2734 } 2735 2736 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, 2737 struct nix_mce_list *mce_list, 2738 int mce_idx, int mcam_index, bool add) 2739 { 2740 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; 2741 struct npc_mcam *mcam = &rvu->hw->mcam; 2742 struct nix_mcast *mcast; 2743 struct nix_hw *nix_hw; 2744 struct mce *mce; 2745 2746 if (!mce_list) 2747 return -EINVAL; 2748 2749 /* Get this PF/VF func's MCE index */ 2750 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 2751 2752 if (idx > (mce_idx + mce_list->max)) { 2753 dev_err(rvu->dev, 2754 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 2755 __func__, idx, mce_list->max, 2756 pcifunc >> RVU_PFVF_PF_SHIFT); 2757 return -EINVAL; 2758 } 2759 2760 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 2761 if (err) 2762 return err; 2763 2764 mcast = &nix_hw->mcast; 2765 mutex_lock(&mcast->mce_lock); 2766 2767 err = nix_update_mce_list_entry(mce_list, pcifunc, add); 2768 if (err) 2769 goto end; 2770 2771 /* Disable MCAM entry in NPC */ 2772 if (!mce_list->count) { 2773 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2774 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); 2775 goto end; 2776 } 2777 2778 /* Dump the updated list to HW */ 2779 idx = mce_idx; 2780 last_idx = idx + mce_list->count - 1; 2781 hlist_for_each_entry(mce, &mce_list->head, node) { 2782 if (idx > last_idx) 2783 break; 2784 2785 next_idx = idx + 1; 2786 /* EOL should be set in last MCE */ 2787 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 2788 mce->pcifunc, next_idx, 2789 (next_idx > last_idx) ? true : false); 2790 if (err) 2791 goto end; 2792 idx++; 2793 } 2794 2795 end: 2796 mutex_unlock(&mcast->mce_lock); 2797 return err; 2798 } 2799 2800 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, 2801 struct nix_mce_list **mce_list, int *mce_idx) 2802 { 2803 struct rvu_hwinfo *hw = rvu->hw; 2804 struct rvu_pfvf *pfvf; 2805 2806 if (!hw->cap.nix_rx_multicast || 2807 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { 2808 *mce_list = NULL; 2809 *mce_idx = 0; 2810 return; 2811 } 2812 2813 /* Get this PF/VF func's MCE index */ 2814 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 2815 2816 if (type == NIXLF_BCAST_ENTRY) { 2817 *mce_list = &pfvf->bcast_mce_list; 2818 *mce_idx = pfvf->bcast_mce_idx; 2819 } else if (type == NIXLF_ALLMULTI_ENTRY) { 2820 *mce_list = &pfvf->mcast_mce_list; 2821 *mce_idx = pfvf->mcast_mce_idx; 2822 } else if (type == NIXLF_PROMISC_ENTRY) { 2823 *mce_list = &pfvf->promisc_mce_list; 2824 *mce_idx = pfvf->promisc_mce_idx; 2825 } else { 2826 *mce_list = NULL; 2827 *mce_idx = 0; 2828 } 2829 } 2830 2831 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 2832 int type, bool add) 2833 { 2834 int err = 0, nixlf, blkaddr, mcam_index, mce_idx; 2835 struct npc_mcam *mcam = &rvu->hw->mcam; 2836 struct rvu_hwinfo *hw = rvu->hw; 2837 struct nix_mce_list *mce_list; 2838 int pf; 2839 2840 /* skip multicast pkt replication for AF's VFs & SDP links */ 2841 if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc)) 2842 return 0; 2843 2844 if (!hw->cap.nix_rx_multicast) 2845 return 0; 2846 2847 pf = rvu_get_pf(pcifunc); 2848 if (!is_pf_cgxmapped(rvu, pf)) 2849 return 0; 2850 2851 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2852 if (blkaddr < 0) 2853 return -EINVAL; 2854 2855 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2856 if (nixlf < 0) 2857 return -EINVAL; 2858 2859 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 2860 2861 mcam_index = npc_get_nixlf_mcam_index(mcam, 2862 pcifunc & ~RVU_PFVF_FUNC_MASK, 2863 nixlf, type); 2864 err = nix_update_mce_list(rvu, pcifunc, mce_list, 2865 mce_idx, mcam_index, add); 2866 return err; 2867 } 2868 2869 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) 2870 { 2871 struct nix_mcast *mcast = &nix_hw->mcast; 2872 int err, pf, numvfs, idx; 2873 struct rvu_pfvf *pfvf; 2874 u16 pcifunc; 2875 u64 cfg; 2876 2877 /* Skip PF0 (i.e AF) */ 2878 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 2879 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2880 /* If PF is not enabled, nothing to do */ 2881 if (!((cfg >> 20) & 0x01)) 2882 continue; 2883 /* Get numVFs attached to this PF */ 2884 numvfs = (cfg >> 12) & 0xFF; 2885 2886 pfvf = &rvu->pf[pf]; 2887 2888 /* This NIX0/1 block mapped to PF ? */ 2889 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 2890 continue; 2891 2892 /* save start idx of broadcast mce list */ 2893 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2894 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 2895 2896 /* save start idx of multicast mce list */ 2897 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2898 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); 2899 2900 /* save the start idx of promisc mce list */ 2901 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2902 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); 2903 2904 for (idx = 0; idx < (numvfs + 1); idx++) { 2905 /* idx-0 is for PF, followed by VFs */ 2906 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 2907 pcifunc |= idx; 2908 /* Add dummy entries now, so that we don't have to check 2909 * for whether AQ_OP should be INIT/WRITE later on. 2910 * Will be updated when a NIXLF is attached/detached to 2911 * these PF/VFs. 2912 */ 2913 err = nix_blk_setup_mce(rvu, nix_hw, 2914 pfvf->bcast_mce_idx + idx, 2915 NIX_AQ_INSTOP_INIT, 2916 pcifunc, 0, true); 2917 if (err) 2918 return err; 2919 2920 /* add dummy entries to multicast mce list */ 2921 err = nix_blk_setup_mce(rvu, nix_hw, 2922 pfvf->mcast_mce_idx + idx, 2923 NIX_AQ_INSTOP_INIT, 2924 pcifunc, 0, true); 2925 if (err) 2926 return err; 2927 2928 /* add dummy entries to promisc mce list */ 2929 err = nix_blk_setup_mce(rvu, nix_hw, 2930 pfvf->promisc_mce_idx + idx, 2931 NIX_AQ_INSTOP_INIT, 2932 pcifunc, 0, true); 2933 if (err) 2934 return err; 2935 } 2936 } 2937 return 0; 2938 } 2939 2940 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2941 { 2942 struct nix_mcast *mcast = &nix_hw->mcast; 2943 struct rvu_hwinfo *hw = rvu->hw; 2944 int err, size; 2945 2946 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 2947 size = (1ULL << size); 2948 2949 /* Alloc memory for multicast/mirror replication entries */ 2950 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 2951 (256UL << MC_TBL_SIZE), size); 2952 if (err) 2953 return -ENOMEM; 2954 2955 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 2956 (u64)mcast->mce_ctx->iova); 2957 2958 /* Set max list length equal to max no of VFs per PF + PF itself */ 2959 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 2960 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 2961 2962 /* Alloc memory for multicast replication buffers */ 2963 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 2964 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 2965 (8UL << MC_BUF_CNT), size); 2966 if (err) 2967 return -ENOMEM; 2968 2969 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 2970 (u64)mcast->mcast_buf->iova); 2971 2972 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 2973 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 2974 2975 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 2976 BIT_ULL(63) | (mcast->replay_pkind << 24) | 2977 BIT_ULL(20) | MC_BUF_CNT); 2978 2979 mutex_init(&mcast->mce_lock); 2980 2981 return nix_setup_mce_tables(rvu, nix_hw); 2982 } 2983 2984 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 2985 { 2986 struct nix_txvlan *vlan = &nix_hw->txvlan; 2987 int err; 2988 2989 /* Allocate resource bimap for tx vtag def registers*/ 2990 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 2991 err = rvu_alloc_bitmap(&vlan->rsrc); 2992 if (err) 2993 return -ENOMEM; 2994 2995 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 2996 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 2997 sizeof(u16), GFP_KERNEL); 2998 if (!vlan->entry2pfvf_map) 2999 goto free_mem; 3000 3001 mutex_init(&vlan->rsrc_lock); 3002 return 0; 3003 3004 free_mem: 3005 kfree(vlan->rsrc.bmap); 3006 return -ENOMEM; 3007 } 3008 3009 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3010 { 3011 struct nix_txsch *txsch; 3012 int err, lvl, schq; 3013 u64 cfg, reg; 3014 3015 /* Get scheduler queue count of each type and alloc 3016 * bitmap for each for alloc/free/attach operations. 3017 */ 3018 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3019 txsch = &nix_hw->txsch[lvl]; 3020 txsch->lvl = lvl; 3021 switch (lvl) { 3022 case NIX_TXSCH_LVL_SMQ: 3023 reg = NIX_AF_MDQ_CONST; 3024 break; 3025 case NIX_TXSCH_LVL_TL4: 3026 reg = NIX_AF_TL4_CONST; 3027 break; 3028 case NIX_TXSCH_LVL_TL3: 3029 reg = NIX_AF_TL3_CONST; 3030 break; 3031 case NIX_TXSCH_LVL_TL2: 3032 reg = NIX_AF_TL2_CONST; 3033 break; 3034 case NIX_TXSCH_LVL_TL1: 3035 reg = NIX_AF_TL1_CONST; 3036 break; 3037 } 3038 cfg = rvu_read64(rvu, blkaddr, reg); 3039 txsch->schq.max = cfg & 0xFFFF; 3040 err = rvu_alloc_bitmap(&txsch->schq); 3041 if (err) 3042 return err; 3043 3044 /* Allocate memory for scheduler queues to 3045 * PF/VF pcifunc mapping info. 3046 */ 3047 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 3048 sizeof(u32), GFP_KERNEL); 3049 if (!txsch->pfvf_map) 3050 return -ENOMEM; 3051 for (schq = 0; schq < txsch->schq.max; schq++) 3052 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 3053 } 3054 3055 /* Setup a default value of 8192 as DWRR MTU */ 3056 if (rvu->hw->cap.nix_common_dwrr_mtu) { 3057 rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU, 3058 convert_bytes_to_dwrr_mtu(8192)); 3059 rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU, 3060 convert_bytes_to_dwrr_mtu(8192)); 3061 } 3062 3063 return 0; 3064 } 3065 3066 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 3067 int blkaddr, u32 cfg) 3068 { 3069 int fmt_idx; 3070 3071 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 3072 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 3073 return fmt_idx; 3074 } 3075 if (fmt_idx >= nix_hw->mark_format.total) 3076 return -ERANGE; 3077 3078 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 3079 nix_hw->mark_format.cfg[fmt_idx] = cfg; 3080 nix_hw->mark_format.in_use++; 3081 return fmt_idx; 3082 } 3083 3084 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 3085 int blkaddr) 3086 { 3087 u64 cfgs[] = { 3088 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 3089 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 3090 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 3091 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 3092 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 3093 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 3094 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 3095 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 3096 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 3097 }; 3098 int i, rc; 3099 u64 total; 3100 3101 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 3102 nix_hw->mark_format.total = (u8)total; 3103 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 3104 GFP_KERNEL); 3105 if (!nix_hw->mark_format.cfg) 3106 return -ENOMEM; 3107 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 3108 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 3109 if (rc < 0) 3110 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 3111 i, rc); 3112 } 3113 3114 return 0; 3115 } 3116 3117 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3118 { 3119 /* CN10K supports LBK FIFO size 72 KB */ 3120 if (rvu->hw->lbk_bufsize == 0x12000) 3121 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 3122 else 3123 *max_mtu = NIC_HW_MAX_FRS; 3124 } 3125 3126 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3127 { 3128 /* RPM supports FIFO len 128 KB */ 3129 if (rvu_cgx_get_fifolen(rvu) == 0x20000) 3130 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 3131 else 3132 *max_mtu = NIC_HW_MAX_FRS; 3133 } 3134 3135 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 3136 struct nix_hw_info *rsp) 3137 { 3138 u16 pcifunc = req->hdr.pcifunc; 3139 u64 dwrr_mtu; 3140 int blkaddr; 3141 3142 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3143 if (blkaddr < 0) 3144 return NIX_AF_ERR_AF_LF_INVALID; 3145 3146 if (is_afvf(pcifunc)) 3147 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 3148 else 3149 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 3150 3151 rsp->min_mtu = NIC_HW_MIN_FRS; 3152 3153 if (!rvu->hw->cap.nix_common_dwrr_mtu) { 3154 /* Return '1' on OTx2 */ 3155 rsp->rpm_dwrr_mtu = 1; 3156 rsp->sdp_dwrr_mtu = 1; 3157 return 0; 3158 } 3159 3160 dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU); 3161 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3162 3163 dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU); 3164 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3165 3166 return 0; 3167 } 3168 3169 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 3170 struct msg_rsp *rsp) 3171 { 3172 u16 pcifunc = req->hdr.pcifunc; 3173 int i, nixlf, blkaddr, err; 3174 u64 stats; 3175 3176 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3177 if (err) 3178 return err; 3179 3180 /* Get stats count supported by HW */ 3181 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 3182 3183 /* Reset tx stats */ 3184 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 3185 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 3186 3187 /* Reset rx stats */ 3188 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 3189 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 3190 3191 return 0; 3192 } 3193 3194 /* Returns the ALG index to be set into NPC_RX_ACTION */ 3195 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 3196 { 3197 int i; 3198 3199 /* Scan over exiting algo entries to find a match */ 3200 for (i = 0; i < nix_hw->flowkey.in_use; i++) 3201 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 3202 return i; 3203 3204 return -ERANGE; 3205 } 3206 3207 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 3208 { 3209 int idx, nr_field, key_off, field_marker, keyoff_marker; 3210 int max_key_off, max_bit_pos, group_member; 3211 struct nix_rx_flowkey_alg *field; 3212 struct nix_rx_flowkey_alg tmp; 3213 u32 key_type, valid_key; 3214 int l4_key_offset = 0; 3215 3216 if (!alg) 3217 return -EINVAL; 3218 3219 #define FIELDS_PER_ALG 5 3220 #define MAX_KEY_OFF 40 3221 /* Clear all fields */ 3222 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 3223 3224 /* Each of the 32 possible flow key algorithm definitions should 3225 * fall into above incremental config (except ALG0). Otherwise a 3226 * single NPC MCAM entry is not sufficient for supporting RSS. 3227 * 3228 * If a different definition or combination needed then NPC MCAM 3229 * has to be programmed to filter such pkts and it's action should 3230 * point to this definition to calculate flowtag or hash. 3231 * 3232 * The `for loop` goes over _all_ protocol field and the following 3233 * variables depicts the state machine forward progress logic. 3234 * 3235 * keyoff_marker - Enabled when hash byte length needs to be accounted 3236 * in field->key_offset update. 3237 * field_marker - Enabled when a new field needs to be selected. 3238 * group_member - Enabled when protocol is part of a group. 3239 */ 3240 3241 keyoff_marker = 0; max_key_off = 0; group_member = 0; 3242 nr_field = 0; key_off = 0; field_marker = 1; 3243 field = &tmp; max_bit_pos = fls(flow_cfg); 3244 for (idx = 0; 3245 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 3246 key_off < MAX_KEY_OFF; idx++) { 3247 key_type = BIT(idx); 3248 valid_key = flow_cfg & key_type; 3249 /* Found a field marker, reset the field values */ 3250 if (field_marker) 3251 memset(&tmp, 0, sizeof(tmp)); 3252 3253 field_marker = true; 3254 keyoff_marker = true; 3255 switch (key_type) { 3256 case NIX_FLOW_KEY_TYPE_PORT: 3257 field->sel_chan = true; 3258 /* This should be set to 1, when SEL_CHAN is set */ 3259 field->bytesm1 = 1; 3260 break; 3261 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 3262 field->lid = NPC_LID_LC; 3263 field->hdr_offset = 9; /* offset */ 3264 field->bytesm1 = 0; /* 1 byte */ 3265 field->ltype_match = NPC_LT_LC_IP; 3266 field->ltype_mask = 0xF; 3267 break; 3268 case NIX_FLOW_KEY_TYPE_IPV4: 3269 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 3270 field->lid = NPC_LID_LC; 3271 field->ltype_match = NPC_LT_LC_IP; 3272 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 3273 field->lid = NPC_LID_LG; 3274 field->ltype_match = NPC_LT_LG_TU_IP; 3275 } 3276 field->hdr_offset = 12; /* SIP offset */ 3277 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 3278 field->ltype_mask = 0xF; /* Match only IPv4 */ 3279 keyoff_marker = false; 3280 break; 3281 case NIX_FLOW_KEY_TYPE_IPV6: 3282 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 3283 field->lid = NPC_LID_LC; 3284 field->ltype_match = NPC_LT_LC_IP6; 3285 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 3286 field->lid = NPC_LID_LG; 3287 field->ltype_match = NPC_LT_LG_TU_IP6; 3288 } 3289 field->hdr_offset = 8; /* SIP offset */ 3290 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 3291 field->ltype_mask = 0xF; /* Match only IPv6 */ 3292 break; 3293 case NIX_FLOW_KEY_TYPE_TCP: 3294 case NIX_FLOW_KEY_TYPE_UDP: 3295 case NIX_FLOW_KEY_TYPE_SCTP: 3296 case NIX_FLOW_KEY_TYPE_INNR_TCP: 3297 case NIX_FLOW_KEY_TYPE_INNR_UDP: 3298 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 3299 field->lid = NPC_LID_LD; 3300 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 3301 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 3302 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 3303 field->lid = NPC_LID_LH; 3304 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 3305 3306 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 3307 * so no need to change the ltype_match, just change 3308 * the lid for inner protocols 3309 */ 3310 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 3311 (int)NPC_LT_LH_TU_TCP); 3312 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 3313 (int)NPC_LT_LH_TU_UDP); 3314 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 3315 (int)NPC_LT_LH_TU_SCTP); 3316 3317 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 3318 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 3319 valid_key) { 3320 field->ltype_match |= NPC_LT_LD_TCP; 3321 group_member = true; 3322 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 3323 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 3324 valid_key) { 3325 field->ltype_match |= NPC_LT_LD_UDP; 3326 group_member = true; 3327 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 3328 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 3329 valid_key) { 3330 field->ltype_match |= NPC_LT_LD_SCTP; 3331 group_member = true; 3332 } 3333 field->ltype_mask = ~field->ltype_match; 3334 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 3335 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 3336 /* Handle the case where any of the group item 3337 * is enabled in the group but not the final one 3338 */ 3339 if (group_member) { 3340 valid_key = true; 3341 group_member = false; 3342 } 3343 } else { 3344 field_marker = false; 3345 keyoff_marker = false; 3346 } 3347 3348 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 3349 * remember the TCP key offset of 40 byte hash key. 3350 */ 3351 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 3352 l4_key_offset = key_off; 3353 break; 3354 case NIX_FLOW_KEY_TYPE_NVGRE: 3355 field->lid = NPC_LID_LD; 3356 field->hdr_offset = 4; /* VSID offset */ 3357 field->bytesm1 = 2; 3358 field->ltype_match = NPC_LT_LD_NVGRE; 3359 field->ltype_mask = 0xF; 3360 break; 3361 case NIX_FLOW_KEY_TYPE_VXLAN: 3362 case NIX_FLOW_KEY_TYPE_GENEVE: 3363 field->lid = NPC_LID_LE; 3364 field->bytesm1 = 2; 3365 field->hdr_offset = 4; 3366 field->ltype_mask = 0xF; 3367 field_marker = false; 3368 keyoff_marker = false; 3369 3370 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 3371 field->ltype_match |= NPC_LT_LE_VXLAN; 3372 group_member = true; 3373 } 3374 3375 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 3376 field->ltype_match |= NPC_LT_LE_GENEVE; 3377 group_member = true; 3378 } 3379 3380 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 3381 if (group_member) { 3382 field->ltype_mask = ~field->ltype_match; 3383 field_marker = true; 3384 keyoff_marker = true; 3385 valid_key = true; 3386 group_member = false; 3387 } 3388 } 3389 break; 3390 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 3391 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 3392 field->lid = NPC_LID_LA; 3393 field->ltype_match = NPC_LT_LA_ETHER; 3394 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 3395 field->lid = NPC_LID_LF; 3396 field->ltype_match = NPC_LT_LF_TU_ETHER; 3397 } 3398 field->hdr_offset = 0; 3399 field->bytesm1 = 5; /* DMAC 6 Byte */ 3400 field->ltype_mask = 0xF; 3401 break; 3402 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 3403 field->lid = NPC_LID_LC; 3404 field->hdr_offset = 40; /* IPV6 hdr */ 3405 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 3406 field->ltype_match = NPC_LT_LC_IP6_EXT; 3407 field->ltype_mask = 0xF; 3408 break; 3409 case NIX_FLOW_KEY_TYPE_GTPU: 3410 field->lid = NPC_LID_LE; 3411 field->hdr_offset = 4; 3412 field->bytesm1 = 3; /* 4 bytes TID*/ 3413 field->ltype_match = NPC_LT_LE_GTPU; 3414 field->ltype_mask = 0xF; 3415 break; 3416 case NIX_FLOW_KEY_TYPE_VLAN: 3417 field->lid = NPC_LID_LB; 3418 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 3419 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 3420 field->ltype_match = NPC_LT_LB_CTAG; 3421 field->ltype_mask = 0xF; 3422 field->fn_mask = 1; /* Mask out the first nibble */ 3423 break; 3424 case NIX_FLOW_KEY_TYPE_AH: 3425 case NIX_FLOW_KEY_TYPE_ESP: 3426 field->hdr_offset = 0; 3427 field->bytesm1 = 7; /* SPI + sequence number */ 3428 field->ltype_mask = 0xF; 3429 field->lid = NPC_LID_LE; 3430 field->ltype_match = NPC_LT_LE_ESP; 3431 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 3432 field->lid = NPC_LID_LD; 3433 field->ltype_match = NPC_LT_LD_AH; 3434 field->hdr_offset = 4; 3435 keyoff_marker = false; 3436 } 3437 break; 3438 } 3439 field->ena = 1; 3440 3441 /* Found a valid flow key type */ 3442 if (valid_key) { 3443 /* Use the key offset of TCP/UDP/SCTP fields 3444 * for ESP/AH fields. 3445 */ 3446 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 3447 key_type == NIX_FLOW_KEY_TYPE_AH) 3448 key_off = l4_key_offset; 3449 field->key_offset = key_off; 3450 memcpy(&alg[nr_field], field, sizeof(*field)); 3451 max_key_off = max(max_key_off, field->bytesm1 + 1); 3452 3453 /* Found a field marker, get the next field */ 3454 if (field_marker) 3455 nr_field++; 3456 } 3457 3458 /* Found a keyoff marker, update the new key_off */ 3459 if (keyoff_marker) { 3460 key_off += max_key_off; 3461 max_key_off = 0; 3462 } 3463 } 3464 /* Processed all the flow key types */ 3465 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 3466 return 0; 3467 else 3468 return NIX_AF_ERR_RSS_NOSPC_FIELD; 3469 } 3470 3471 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 3472 { 3473 u64 field[FIELDS_PER_ALG]; 3474 struct nix_hw *hw; 3475 int fid, rc; 3476 3477 hw = get_nix_hw(rvu->hw, blkaddr); 3478 if (!hw) 3479 return NIX_AF_ERR_INVALID_NIXBLK; 3480 3481 /* No room to add new flow hash algoritham */ 3482 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 3483 return NIX_AF_ERR_RSS_NOSPC_ALGO; 3484 3485 /* Generate algo fields for the given flow_cfg */ 3486 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 3487 if (rc) 3488 return rc; 3489 3490 /* Update ALGX_FIELDX register with generated fields */ 3491 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3492 rvu_write64(rvu, blkaddr, 3493 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 3494 fid), field[fid]); 3495 3496 /* Store the flow_cfg for futher lookup */ 3497 rc = hw->flowkey.in_use; 3498 hw->flowkey.flowkey[rc] = flow_cfg; 3499 hw->flowkey.in_use++; 3500 3501 return rc; 3502 } 3503 3504 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 3505 struct nix_rss_flowkey_cfg *req, 3506 struct nix_rss_flowkey_cfg_rsp *rsp) 3507 { 3508 u16 pcifunc = req->hdr.pcifunc; 3509 int alg_idx, nixlf, blkaddr; 3510 struct nix_hw *nix_hw; 3511 int err; 3512 3513 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3514 if (err) 3515 return err; 3516 3517 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3518 if (!nix_hw) 3519 return NIX_AF_ERR_INVALID_NIXBLK; 3520 3521 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 3522 /* Failed to get algo index from the exiting list, reserve new */ 3523 if (alg_idx < 0) { 3524 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 3525 req->flowkey_cfg); 3526 if (alg_idx < 0) 3527 return alg_idx; 3528 } 3529 rsp->alg_idx = alg_idx; 3530 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 3531 alg_idx, req->mcam_index); 3532 return 0; 3533 } 3534 3535 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 3536 { 3537 u32 flowkey_cfg, minkey_cfg; 3538 int alg, fid, rc; 3539 3540 /* Disable all flow key algx fieldx */ 3541 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 3542 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3543 rvu_write64(rvu, blkaddr, 3544 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 3545 0); 3546 } 3547 3548 /* IPv4/IPv6 SIP/DIPs */ 3549 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 3550 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3551 if (rc < 0) 3552 return rc; 3553 3554 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3555 minkey_cfg = flowkey_cfg; 3556 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 3557 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3558 if (rc < 0) 3559 return rc; 3560 3561 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3562 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 3563 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3564 if (rc < 0) 3565 return rc; 3566 3567 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3568 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 3569 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3570 if (rc < 0) 3571 return rc; 3572 3573 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3574 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3575 NIX_FLOW_KEY_TYPE_UDP; 3576 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3577 if (rc < 0) 3578 return rc; 3579 3580 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3581 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3582 NIX_FLOW_KEY_TYPE_SCTP; 3583 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3584 if (rc < 0) 3585 return rc; 3586 3587 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3588 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 3589 NIX_FLOW_KEY_TYPE_SCTP; 3590 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3591 if (rc < 0) 3592 return rc; 3593 3594 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3595 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3596 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 3597 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3598 if (rc < 0) 3599 return rc; 3600 3601 return 0; 3602 } 3603 3604 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 3605 struct nix_set_mac_addr *req, 3606 struct msg_rsp *rsp) 3607 { 3608 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 3609 u16 pcifunc = req->hdr.pcifunc; 3610 int blkaddr, nixlf, err; 3611 struct rvu_pfvf *pfvf; 3612 3613 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3614 if (err) 3615 return err; 3616 3617 pfvf = rvu_get_pfvf(rvu, pcifunc); 3618 3619 /* untrusted VF can't overwrite admin(PF) changes */ 3620 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 3621 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { 3622 dev_warn(rvu->dev, 3623 "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); 3624 return -EPERM; 3625 } 3626 3627 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 3628 3629 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 3630 pfvf->rx_chan_base, req->mac_addr); 3631 3632 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) 3633 ether_addr_copy(pfvf->default_mac, req->mac_addr); 3634 3635 rvu_switch_update_rules(rvu, pcifunc); 3636 3637 return 0; 3638 } 3639 3640 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 3641 struct msg_req *req, 3642 struct nix_get_mac_addr_rsp *rsp) 3643 { 3644 u16 pcifunc = req->hdr.pcifunc; 3645 struct rvu_pfvf *pfvf; 3646 3647 if (!is_nixlf_attached(rvu, pcifunc)) 3648 return NIX_AF_ERR_AF_LF_INVALID; 3649 3650 pfvf = rvu_get_pfvf(rvu, pcifunc); 3651 3652 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 3653 3654 return 0; 3655 } 3656 3657 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 3658 struct msg_rsp *rsp) 3659 { 3660 bool allmulti, promisc, nix_rx_multicast; 3661 u16 pcifunc = req->hdr.pcifunc; 3662 struct rvu_pfvf *pfvf; 3663 int nixlf, err; 3664 3665 pfvf = rvu_get_pfvf(rvu, pcifunc); 3666 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; 3667 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; 3668 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; 3669 3670 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; 3671 3672 if (is_vf(pcifunc) && !nix_rx_multicast && 3673 (promisc || allmulti)) { 3674 dev_warn_ratelimited(rvu->dev, 3675 "VF promisc/multicast not supported\n"); 3676 return 0; 3677 } 3678 3679 /* untrusted VF can't configure promisc/allmulti */ 3680 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 3681 (promisc || allmulti)) 3682 return 0; 3683 3684 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3685 if (err) 3686 return err; 3687 3688 if (nix_rx_multicast) { 3689 /* add/del this PF_FUNC to/from mcast pkt replication list */ 3690 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, 3691 allmulti); 3692 if (err) { 3693 dev_err(rvu->dev, 3694 "Failed to update pcifunc 0x%x to multicast list\n", 3695 pcifunc); 3696 return err; 3697 } 3698 3699 /* add/del this PF_FUNC to/from promisc pkt replication list */ 3700 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, 3701 promisc); 3702 if (err) { 3703 dev_err(rvu->dev, 3704 "Failed to update pcifunc 0x%x to promisc list\n", 3705 pcifunc); 3706 return err; 3707 } 3708 } 3709 3710 /* install/uninstall allmulti entry */ 3711 if (allmulti) { 3712 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, 3713 pfvf->rx_chan_base); 3714 } else { 3715 if (!nix_rx_multicast) 3716 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); 3717 } 3718 3719 /* install/uninstall promisc entry */ 3720 if (promisc) { 3721 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 3722 pfvf->rx_chan_base, 3723 pfvf->rx_chan_cnt); 3724 } else { 3725 if (!nix_rx_multicast) 3726 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 3727 } 3728 3729 return 0; 3730 } 3731 3732 static void nix_find_link_frs(struct rvu *rvu, 3733 struct nix_frs_cfg *req, u16 pcifunc) 3734 { 3735 int pf = rvu_get_pf(pcifunc); 3736 struct rvu_pfvf *pfvf; 3737 int maxlen, minlen; 3738 int numvfs, hwvf; 3739 int vf; 3740 3741 /* Update with requester's min/max lengths */ 3742 pfvf = rvu_get_pfvf(rvu, pcifunc); 3743 pfvf->maxlen = req->maxlen; 3744 if (req->update_minlen) 3745 pfvf->minlen = req->minlen; 3746 3747 maxlen = req->maxlen; 3748 minlen = req->update_minlen ? req->minlen : 0; 3749 3750 /* Get this PF's numVFs and starting hwvf */ 3751 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 3752 3753 /* For each VF, compare requested max/minlen */ 3754 for (vf = 0; vf < numvfs; vf++) { 3755 pfvf = &rvu->hwvf[hwvf + vf]; 3756 if (pfvf->maxlen > maxlen) 3757 maxlen = pfvf->maxlen; 3758 if (req->update_minlen && 3759 pfvf->minlen && pfvf->minlen < minlen) 3760 minlen = pfvf->minlen; 3761 } 3762 3763 /* Compare requested max/minlen with PF's max/minlen */ 3764 pfvf = &rvu->pf[pf]; 3765 if (pfvf->maxlen > maxlen) 3766 maxlen = pfvf->maxlen; 3767 if (req->update_minlen && 3768 pfvf->minlen && pfvf->minlen < minlen) 3769 minlen = pfvf->minlen; 3770 3771 /* Update the request with max/min PF's and it's VF's max/min */ 3772 req->maxlen = maxlen; 3773 if (req->update_minlen) 3774 req->minlen = minlen; 3775 } 3776 3777 static int 3778 nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, 3779 u16 pcifunc, u64 tx_credits) 3780 { 3781 struct rvu_hwinfo *hw = rvu->hw; 3782 int pf = rvu_get_pf(pcifunc); 3783 u8 cgx_id = 0, lmac_id = 0; 3784 unsigned long poll_tmo; 3785 bool restore_tx_en = 0; 3786 struct nix_hw *nix_hw; 3787 u64 cfg, sw_xoff = 0; 3788 u32 schq = 0; 3789 u32 credits; 3790 int rc; 3791 3792 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3793 if (!nix_hw) 3794 return NIX_AF_ERR_INVALID_NIXBLK; 3795 3796 if (tx_credits == nix_hw->tx_credits[link]) 3797 return 0; 3798 3799 /* Enable cgx tx if disabled for credits to be back */ 3800 if (is_pf_cgxmapped(rvu, pf)) { 3801 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 3802 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), 3803 lmac_id, true); 3804 } 3805 3806 mutex_lock(&rvu->rsrc_lock); 3807 /* Disable new traffic to link */ 3808 if (hw->cap.nix_shaping) { 3809 schq = nix_get_tx_link(rvu, pcifunc); 3810 sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)); 3811 rvu_write64(rvu, blkaddr, 3812 NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0)); 3813 } 3814 3815 rc = -EBUSY; 3816 poll_tmo = jiffies + usecs_to_jiffies(10000); 3817 /* Wait for credits to return */ 3818 do { 3819 if (time_after(jiffies, poll_tmo)) 3820 goto exit; 3821 usleep_range(100, 200); 3822 3823 cfg = rvu_read64(rvu, blkaddr, 3824 NIX_AF_TX_LINKX_NORM_CREDIT(link)); 3825 credits = (cfg >> 12) & 0xFFFFFULL; 3826 } while (credits != nix_hw->tx_credits[link]); 3827 3828 cfg &= ~(0xFFFFFULL << 12); 3829 cfg |= (tx_credits << 12); 3830 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 3831 rc = 0; 3832 3833 nix_hw->tx_credits[link] = tx_credits; 3834 3835 exit: 3836 /* Enable traffic back */ 3837 if (hw->cap.nix_shaping && !sw_xoff) 3838 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0); 3839 3840 /* Restore state of cgx tx */ 3841 if (restore_tx_en) 3842 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 3843 3844 mutex_unlock(&rvu->rsrc_lock); 3845 return rc; 3846 } 3847 3848 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 3849 struct msg_rsp *rsp) 3850 { 3851 struct rvu_hwinfo *hw = rvu->hw; 3852 u16 pcifunc = req->hdr.pcifunc; 3853 int pf = rvu_get_pf(pcifunc); 3854 int blkaddr, schq, link = -1; 3855 struct nix_txsch *txsch; 3856 u64 cfg, lmac_fifo_len; 3857 struct nix_hw *nix_hw; 3858 struct rvu_pfvf *pfvf; 3859 u8 cgx = 0, lmac = 0; 3860 u16 max_mtu; 3861 3862 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3863 if (blkaddr < 0) 3864 return NIX_AF_ERR_AF_LF_INVALID; 3865 3866 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3867 if (!nix_hw) 3868 return NIX_AF_ERR_INVALID_NIXBLK; 3869 3870 if (is_afvf(pcifunc)) 3871 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 3872 else 3873 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 3874 3875 if (!req->sdp_link && req->maxlen > max_mtu) 3876 return NIX_AF_ERR_FRS_INVALID; 3877 3878 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 3879 return NIX_AF_ERR_FRS_INVALID; 3880 3881 /* Check if requester wants to update SMQ's */ 3882 if (!req->update_smq) 3883 goto rx_frscfg; 3884 3885 /* Update min/maxlen in each of the SMQ attached to this PF/VF */ 3886 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 3887 mutex_lock(&rvu->rsrc_lock); 3888 for (schq = 0; schq < txsch->schq.max; schq++) { 3889 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 3890 continue; 3891 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); 3892 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); 3893 if (req->update_minlen) 3894 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); 3895 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); 3896 } 3897 mutex_unlock(&rvu->rsrc_lock); 3898 3899 rx_frscfg: 3900 /* Check if config is for SDP link */ 3901 if (req->sdp_link) { 3902 if (!hw->sdp_links) 3903 return NIX_AF_ERR_RX_LINK_INVALID; 3904 link = hw->cgx_links + hw->lbk_links; 3905 goto linkcfg; 3906 } 3907 3908 /* Check if the request is from CGX mapped RVU PF */ 3909 if (is_pf_cgxmapped(rvu, pf)) { 3910 /* Get CGX and LMAC to which this PF is mapped and find link */ 3911 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 3912 link = (cgx * hw->lmac_per_cgx) + lmac; 3913 } else if (pf == 0) { 3914 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 3915 pfvf = rvu_get_pfvf(rvu, pcifunc); 3916 link = hw->cgx_links + pfvf->lbkid; 3917 } 3918 3919 if (link < 0) 3920 return NIX_AF_ERR_RX_LINK_INVALID; 3921 3922 nix_find_link_frs(rvu, req, pcifunc); 3923 3924 linkcfg: 3925 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 3926 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 3927 if (req->update_minlen) 3928 cfg = (cfg & ~0xFFFFULL) | req->minlen; 3929 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 3930 3931 if (req->sdp_link || pf == 0) 3932 return 0; 3933 3934 /* Update transmit credits for CGX links */ 3935 lmac_fifo_len = 3936 rvu_cgx_get_fifolen(rvu) / 3937 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3938 return nix_config_link_credits(rvu, blkaddr, link, pcifunc, 3939 (lmac_fifo_len - req->maxlen) / 16); 3940 } 3941 3942 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 3943 struct msg_rsp *rsp) 3944 { 3945 int nixlf, blkaddr, err; 3946 u64 cfg; 3947 3948 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 3949 if (err) 3950 return err; 3951 3952 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 3953 /* Set the interface configuration */ 3954 if (req->len_verify & BIT(0)) 3955 cfg |= BIT_ULL(41); 3956 else 3957 cfg &= ~BIT_ULL(41); 3958 3959 if (req->len_verify & BIT(1)) 3960 cfg |= BIT_ULL(40); 3961 else 3962 cfg &= ~BIT_ULL(40); 3963 3964 if (req->csum_verify & BIT(0)) 3965 cfg |= BIT_ULL(37); 3966 else 3967 cfg &= ~BIT_ULL(37); 3968 3969 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 3970 3971 return 0; 3972 } 3973 3974 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 3975 { 3976 /* CN10k supports 72KB FIFO size and max packet size of 64k */ 3977 if (rvu->hw->lbk_bufsize == 0x12000) 3978 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16; 3979 3980 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 3981 } 3982 3983 static void nix_link_config(struct rvu *rvu, int blkaddr, 3984 struct nix_hw *nix_hw) 3985 { 3986 struct rvu_hwinfo *hw = rvu->hw; 3987 int cgx, lmac_cnt, slink, link; 3988 u16 lbk_max_frs, lmac_max_frs; 3989 u64 tx_credits, cfg; 3990 3991 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 3992 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 3993 3994 /* Set default min/max packet lengths allowed on NIX Rx links. 3995 * 3996 * With HW reset minlen value of 60byte, HW will treat ARP pkts 3997 * as undersize and report them to SW as error pkts, hence 3998 * setting it to 40 bytes. 3999 */ 4000 for (link = 0; link < hw->cgx_links; link++) { 4001 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4002 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 4003 } 4004 4005 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 4006 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4007 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 4008 } 4009 if (hw->sdp_links) { 4010 link = hw->cgx_links + hw->lbk_links; 4011 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4012 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 4013 } 4014 4015 /* Set credits for Tx links assuming max packet length allowed. 4016 * This will be reconfigured based on MTU set for PF/VF. 4017 */ 4018 for (cgx = 0; cgx < hw->cgx; cgx++) { 4019 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 4020 /* Skip when cgx is not available or lmac cnt is zero */ 4021 if (lmac_cnt <= 0) 4022 continue; 4023 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) - 4024 lmac_max_frs) / 16; 4025 /* Enable credits and set credit pkt count to max allowed */ 4026 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4027 slink = cgx * hw->lmac_per_cgx; 4028 for (link = slink; link < (slink + lmac_cnt); link++) { 4029 nix_hw->tx_credits[link] = tx_credits; 4030 rvu_write64(rvu, blkaddr, 4031 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 4032 } 4033 } 4034 4035 /* Set Tx credits for LBK link */ 4036 slink = hw->cgx_links; 4037 for (link = slink; link < (slink + hw->lbk_links); link++) { 4038 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 4039 nix_hw->tx_credits[link] = tx_credits; 4040 /* Enable credits and set credit pkt count to max allowed */ 4041 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4042 rvu_write64(rvu, blkaddr, 4043 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 4044 } 4045 } 4046 4047 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 4048 { 4049 int idx, err; 4050 u64 status; 4051 4052 /* Start X2P bus calibration */ 4053 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4054 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 4055 /* Wait for calibration to complete */ 4056 err = rvu_poll_reg(rvu, blkaddr, 4057 NIX_AF_STATUS, BIT_ULL(10), false); 4058 if (err) { 4059 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 4060 return err; 4061 } 4062 4063 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 4064 /* Check if CGX devices are ready */ 4065 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 4066 /* Skip when cgx port is not available */ 4067 if (!rvu_cgx_pdata(idx, rvu) || 4068 (status & (BIT_ULL(16 + idx)))) 4069 continue; 4070 dev_err(rvu->dev, 4071 "CGX%d didn't respond to NIX X2P calibration\n", idx); 4072 err = -EBUSY; 4073 } 4074 4075 /* Check if LBK is ready */ 4076 if (!(status & BIT_ULL(19))) { 4077 dev_err(rvu->dev, 4078 "LBK didn't respond to NIX X2P calibration\n"); 4079 err = -EBUSY; 4080 } 4081 4082 /* Clear 'calibrate_x2p' bit */ 4083 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4084 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 4085 if (err || (status & 0x3FFULL)) 4086 dev_err(rvu->dev, 4087 "NIX X2P calibration failed, status 0x%llx\n", status); 4088 if (err) 4089 return err; 4090 return 0; 4091 } 4092 4093 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 4094 { 4095 u64 cfg; 4096 int err; 4097 4098 /* Set admin queue endianness */ 4099 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 4100 #ifdef __BIG_ENDIAN 4101 cfg |= BIT_ULL(8); 4102 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4103 #else 4104 cfg &= ~BIT_ULL(8); 4105 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4106 #endif 4107 4108 /* Do not bypass NDC cache */ 4109 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 4110 cfg &= ~0x3FFEULL; 4111 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 4112 /* Disable caching of SQB aka SQEs */ 4113 cfg |= 0x04ULL; 4114 #endif 4115 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 4116 4117 /* Result structure can be followed by RQ/SQ/CQ context at 4118 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 4119 * operation type. Alloc sufficient result memory for all operations. 4120 */ 4121 err = rvu_aq_alloc(rvu, &block->aq, 4122 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 4123 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 4124 if (err) 4125 return err; 4126 4127 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 4128 rvu_write64(rvu, block->addr, 4129 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 4130 return 0; 4131 } 4132 4133 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) 4134 { 4135 struct rvu_hwinfo *hw = rvu->hw; 4136 u64 hw_const; 4137 4138 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 4139 4140 /* On OcteonTx2 DWRR quantum is directly configured into each of 4141 * the transmit scheduler queues. And PF/VF drivers were free to 4142 * config any value upto 2^24. 4143 * On CN10K, HW is modified, the quantum configuration at scheduler 4144 * queues is in terms of weight. And SW needs to setup a base DWRR MTU 4145 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do 4146 * 'DWRR MTU * weight' to get the quantum. 4147 * 4148 * Check if HW uses a common MTU for all DWRR quantum configs. 4149 * On OcteonTx2 this register field is '0'. 4150 */ 4151 if (((hw_const >> 56) & 0x10) == 0x10) 4152 hw->cap.nix_common_dwrr_mtu = true; 4153 } 4154 4155 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 4156 { 4157 const struct npc_lt_def_cfg *ltdefs; 4158 struct rvu_hwinfo *hw = rvu->hw; 4159 int blkaddr = nix_hw->blkaddr; 4160 struct rvu_block *block; 4161 int err; 4162 u64 cfg; 4163 4164 block = &hw->block[blkaddr]; 4165 4166 if (is_rvu_96xx_B0(rvu)) { 4167 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 4168 * internal state when conditional clocks are turned off. 4169 * Hence enable them. 4170 */ 4171 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4172 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 4173 4174 /* Set chan/link to backpressure TL3 instead of TL2 */ 4175 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 4176 4177 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 4178 * This sticky mode is known to cause SQ stalls when multiple 4179 * SQs are mapped to same SMQ and transmitting pkts at a time. 4180 */ 4181 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 4182 cfg &= ~BIT_ULL(15); 4183 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 4184 } 4185 4186 ltdefs = rvu->kpu.lt_def; 4187 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 4188 err = nix_calibrate_x2p(rvu, blkaddr); 4189 if (err) 4190 return err; 4191 4192 /* Setup capabilities of the NIX block */ 4193 rvu_nix_setup_capabilities(rvu, blkaddr); 4194 4195 /* Initialize admin queue */ 4196 err = nix_aq_init(rvu, block); 4197 if (err) 4198 return err; 4199 4200 /* Restore CINT timer delay to HW reset values */ 4201 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 4202 4203 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ 4204 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL); 4205 4206 if (is_block_implemented(hw, blkaddr)) { 4207 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 4208 if (err) 4209 return err; 4210 4211 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); 4212 if (err) 4213 return err; 4214 4215 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 4216 if (err) 4217 return err; 4218 4219 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 4220 if (err) 4221 return err; 4222 4223 err = nix_setup_txvlan(rvu, nix_hw); 4224 if (err) 4225 return err; 4226 4227 /* Configure segmentation offload formats */ 4228 nix_setup_lso(rvu, nix_hw, blkaddr); 4229 4230 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 4231 * This helps HW protocol checker to identify headers 4232 * and validate length and checksums. 4233 */ 4234 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 4235 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 4236 ltdefs->rx_ol2.ltype_mask); 4237 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 4238 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 4239 ltdefs->rx_oip4.ltype_mask); 4240 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 4241 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 4242 ltdefs->rx_iip4.ltype_mask); 4243 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 4244 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 4245 ltdefs->rx_oip6.ltype_mask); 4246 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 4247 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 4248 ltdefs->rx_iip6.ltype_mask); 4249 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 4250 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 4251 ltdefs->rx_otcp.ltype_mask); 4252 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 4253 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 4254 ltdefs->rx_itcp.ltype_mask); 4255 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 4256 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 4257 ltdefs->rx_oudp.ltype_mask); 4258 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 4259 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 4260 ltdefs->rx_iudp.ltype_mask); 4261 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 4262 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 4263 ltdefs->rx_osctp.ltype_mask); 4264 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 4265 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 4266 ltdefs->rx_isctp.ltype_mask); 4267 4268 if (!is_rvu_otx2(rvu)) { 4269 /* Enable APAD calculation for other protocols 4270 * matching APAD0 and APAD1 lt def registers. 4271 */ 4272 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, 4273 (ltdefs->rx_apad0.valid << 11) | 4274 (ltdefs->rx_apad0.lid << 8) | 4275 (ltdefs->rx_apad0.ltype_match << 4) | 4276 ltdefs->rx_apad0.ltype_mask); 4277 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, 4278 (ltdefs->rx_apad1.valid << 11) | 4279 (ltdefs->rx_apad1.lid << 8) | 4280 (ltdefs->rx_apad1.ltype_match << 4) | 4281 ltdefs->rx_apad1.ltype_mask); 4282 4283 /* Receive ethertype defination register defines layer 4284 * information in NPC_RESULT_S to identify the Ethertype 4285 * location in L2 header. Used for Ethertype overwriting 4286 * in inline IPsec flow. 4287 */ 4288 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), 4289 (ltdefs->rx_et[0].offset << 12) | 4290 (ltdefs->rx_et[0].valid << 11) | 4291 (ltdefs->rx_et[0].lid << 8) | 4292 (ltdefs->rx_et[0].ltype_match << 4) | 4293 ltdefs->rx_et[0].ltype_mask); 4294 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), 4295 (ltdefs->rx_et[1].offset << 12) | 4296 (ltdefs->rx_et[1].valid << 11) | 4297 (ltdefs->rx_et[1].lid << 8) | 4298 (ltdefs->rx_et[1].ltype_match << 4) | 4299 ltdefs->rx_et[1].ltype_mask); 4300 } 4301 4302 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 4303 if (err) 4304 return err; 4305 4306 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, 4307 sizeof(u64), GFP_KERNEL); 4308 if (!nix_hw->tx_credits) 4309 return -ENOMEM; 4310 4311 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 4312 nix_link_config(rvu, blkaddr, nix_hw); 4313 4314 /* Enable Channel backpressure */ 4315 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 4316 } 4317 return 0; 4318 } 4319 4320 int rvu_nix_init(struct rvu *rvu) 4321 { 4322 struct rvu_hwinfo *hw = rvu->hw; 4323 struct nix_hw *nix_hw; 4324 int blkaddr = 0, err; 4325 int i = 0; 4326 4327 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 4328 GFP_KERNEL); 4329 if (!hw->nix) 4330 return -ENOMEM; 4331 4332 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4333 while (blkaddr) { 4334 nix_hw = &hw->nix[i]; 4335 nix_hw->rvu = rvu; 4336 nix_hw->blkaddr = blkaddr; 4337 err = rvu_nix_block_init(rvu, nix_hw); 4338 if (err) 4339 return err; 4340 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4341 i++; 4342 } 4343 4344 return 0; 4345 } 4346 4347 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 4348 struct rvu_block *block) 4349 { 4350 struct nix_txsch *txsch; 4351 struct nix_mcast *mcast; 4352 struct nix_txvlan *vlan; 4353 struct nix_hw *nix_hw; 4354 int lvl; 4355 4356 rvu_aq_free(rvu, block->aq); 4357 4358 if (is_block_implemented(rvu->hw, blkaddr)) { 4359 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4360 if (!nix_hw) 4361 return; 4362 4363 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 4364 txsch = &nix_hw->txsch[lvl]; 4365 kfree(txsch->schq.bmap); 4366 } 4367 4368 kfree(nix_hw->tx_credits); 4369 4370 nix_ipolicer_freemem(rvu, nix_hw); 4371 4372 vlan = &nix_hw->txvlan; 4373 kfree(vlan->rsrc.bmap); 4374 mutex_destroy(&vlan->rsrc_lock); 4375 4376 mcast = &nix_hw->mcast; 4377 qmem_free(rvu->dev, mcast->mce_ctx); 4378 qmem_free(rvu->dev, mcast->mcast_buf); 4379 mutex_destroy(&mcast->mce_lock); 4380 } 4381 } 4382 4383 void rvu_nix_freemem(struct rvu *rvu) 4384 { 4385 struct rvu_hwinfo *hw = rvu->hw; 4386 struct rvu_block *block; 4387 int blkaddr = 0; 4388 4389 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4390 while (blkaddr) { 4391 block = &hw->block[blkaddr]; 4392 rvu_nix_block_freemem(rvu, blkaddr, block); 4393 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4394 } 4395 } 4396 4397 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 4398 struct msg_rsp *rsp) 4399 { 4400 u16 pcifunc = req->hdr.pcifunc; 4401 struct rvu_pfvf *pfvf; 4402 int nixlf, err; 4403 4404 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4405 if (err) 4406 return err; 4407 4408 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 4409 4410 npc_mcam_enable_flows(rvu, pcifunc); 4411 4412 pfvf = rvu_get_pfvf(rvu, pcifunc); 4413 set_bit(NIXLF_INITIALIZED, &pfvf->flags); 4414 4415 rvu_switch_update_rules(rvu, pcifunc); 4416 4417 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 4418 } 4419 4420 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 4421 struct msg_rsp *rsp) 4422 { 4423 u16 pcifunc = req->hdr.pcifunc; 4424 struct rvu_pfvf *pfvf; 4425 int nixlf, err; 4426 4427 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4428 if (err) 4429 return err; 4430 4431 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 4432 4433 pfvf = rvu_get_pfvf(rvu, pcifunc); 4434 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 4435 4436 return rvu_cgx_start_stop_io(rvu, pcifunc, false); 4437 } 4438 4439 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 4440 { 4441 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 4442 struct hwctx_disable_req ctx_req; 4443 int err; 4444 4445 ctx_req.hdr.pcifunc = pcifunc; 4446 4447 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 4448 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 4449 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 4450 nix_interface_deinit(rvu, pcifunc, nixlf); 4451 nix_rx_sync(rvu, blkaddr); 4452 nix_txschq_free(rvu, pcifunc); 4453 4454 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 4455 4456 rvu_cgx_start_stop_io(rvu, pcifunc, false); 4457 4458 if (pfvf->sq_ctx) { 4459 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 4460 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4461 if (err) 4462 dev_err(rvu->dev, "SQ ctx disable failed\n"); 4463 } 4464 4465 if (pfvf->rq_ctx) { 4466 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 4467 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4468 if (err) 4469 dev_err(rvu->dev, "RQ ctx disable failed\n"); 4470 } 4471 4472 if (pfvf->cq_ctx) { 4473 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 4474 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4475 if (err) 4476 dev_err(rvu->dev, "CQ ctx disable failed\n"); 4477 } 4478 4479 nix_ctx_free(rvu, pfvf); 4480 4481 nix_free_all_bandprof(rvu, pcifunc); 4482 } 4483 4484 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 4485 4486 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 4487 { 4488 struct rvu_hwinfo *hw = rvu->hw; 4489 struct rvu_block *block; 4490 int blkaddr, pf; 4491 int nixlf; 4492 u64 cfg; 4493 4494 pf = rvu_get_pf(pcifunc); 4495 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 4496 return 0; 4497 4498 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4499 if (blkaddr < 0) 4500 return NIX_AF_ERR_AF_LF_INVALID; 4501 4502 block = &hw->block[blkaddr]; 4503 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 4504 if (nixlf < 0) 4505 return NIX_AF_ERR_AF_LF_INVALID; 4506 4507 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 4508 4509 if (enable) 4510 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 4511 else 4512 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 4513 4514 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 4515 4516 return 0; 4517 } 4518 4519 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 4520 struct msg_rsp *rsp) 4521 { 4522 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 4523 } 4524 4525 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 4526 struct msg_rsp *rsp) 4527 { 4528 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 4529 } 4530 4531 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 4532 struct nix_lso_format_cfg *req, 4533 struct nix_lso_format_cfg_rsp *rsp) 4534 { 4535 u16 pcifunc = req->hdr.pcifunc; 4536 struct nix_hw *nix_hw; 4537 struct rvu_pfvf *pfvf; 4538 int blkaddr, idx, f; 4539 u64 reg; 4540 4541 pfvf = rvu_get_pfvf(rvu, pcifunc); 4542 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4543 if (!pfvf->nixlf || blkaddr < 0) 4544 return NIX_AF_ERR_AF_LF_INVALID; 4545 4546 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4547 if (!nix_hw) 4548 return NIX_AF_ERR_INVALID_NIXBLK; 4549 4550 /* Find existing matching LSO format, if any */ 4551 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 4552 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 4553 reg = rvu_read64(rvu, blkaddr, 4554 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 4555 if (req->fields[f] != (reg & req->field_mask)) 4556 break; 4557 } 4558 4559 if (f == NIX_LSO_FIELD_MAX) 4560 break; 4561 } 4562 4563 if (idx < nix_hw->lso.in_use) { 4564 /* Match found */ 4565 rsp->lso_format_idx = idx; 4566 return 0; 4567 } 4568 4569 if (nix_hw->lso.in_use == nix_hw->lso.total) 4570 return NIX_AF_ERR_LSO_CFG_FAIL; 4571 4572 rsp->lso_format_idx = nix_hw->lso.in_use++; 4573 4574 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 4575 rvu_write64(rvu, blkaddr, 4576 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 4577 req->fields[f]); 4578 4579 return 0; 4580 } 4581 4582 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 4583 { 4584 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 4585 4586 /* overwrite vf mac address with default_mac */ 4587 if (from_vf) 4588 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 4589 } 4590 4591 /* NIX ingress policers or bandwidth profiles APIs */ 4592 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) 4593 { 4594 struct npc_lt_def_cfg defs, *ltdefs; 4595 4596 ltdefs = &defs; 4597 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); 4598 4599 /* Extract PCP and DEI fields from outer VLAN from byte offset 4600 * 2 from the start of LB_PTR (ie TAG). 4601 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN 4602 * fields are considered when 'Tunnel enable' is set in profile. 4603 */ 4604 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, 4605 (2UL << 12) | (ltdefs->ovlan.lid << 8) | 4606 (ltdefs->ovlan.ltype_match << 4) | 4607 ltdefs->ovlan.ltype_mask); 4608 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, 4609 (2UL << 12) | (ltdefs->ivlan.lid << 8) | 4610 (ltdefs->ivlan.ltype_match << 4) | 4611 ltdefs->ivlan.ltype_mask); 4612 4613 /* DSCP field in outer and tunneled IPv4 packets */ 4614 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, 4615 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | 4616 (ltdefs->rx_oip4.ltype_match << 4) | 4617 ltdefs->rx_oip4.ltype_mask); 4618 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, 4619 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | 4620 (ltdefs->rx_iip4.ltype_match << 4) | 4621 ltdefs->rx_iip4.ltype_mask); 4622 4623 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ 4624 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, 4625 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | 4626 (ltdefs->rx_oip6.ltype_match << 4) | 4627 ltdefs->rx_oip6.ltype_mask); 4628 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, 4629 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | 4630 (ltdefs->rx_iip6.ltype_match << 4) | 4631 ltdefs->rx_iip6.ltype_mask); 4632 } 4633 4634 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, 4635 int layer, int prof_idx) 4636 { 4637 struct nix_cn10k_aq_enq_req aq_req; 4638 int rc; 4639 4640 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4641 4642 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); 4643 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 4644 aq_req.op = NIX_AQ_INSTOP_INIT; 4645 4646 /* Context is all zeros, submit to AQ */ 4647 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4648 (struct nix_aq_enq_req *)&aq_req, NULL); 4649 if (rc) 4650 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", 4651 layer, prof_idx); 4652 return rc; 4653 } 4654 4655 static int nix_setup_ipolicers(struct rvu *rvu, 4656 struct nix_hw *nix_hw, int blkaddr) 4657 { 4658 struct rvu_hwinfo *hw = rvu->hw; 4659 struct nix_ipolicer *ipolicer; 4660 int err, layer, prof_idx; 4661 u64 cfg; 4662 4663 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 4664 if (!(cfg & BIT_ULL(61))) { 4665 hw->cap.ipolicer = false; 4666 return 0; 4667 } 4668 4669 hw->cap.ipolicer = true; 4670 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, 4671 sizeof(*ipolicer), GFP_KERNEL); 4672 if (!nix_hw->ipolicer) 4673 return -ENOMEM; 4674 4675 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); 4676 4677 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4678 ipolicer = &nix_hw->ipolicer[layer]; 4679 switch (layer) { 4680 case BAND_PROF_LEAF_LAYER: 4681 ipolicer->band_prof.max = cfg & 0XFFFF; 4682 break; 4683 case BAND_PROF_MID_LAYER: 4684 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; 4685 break; 4686 case BAND_PROF_TOP_LAYER: 4687 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; 4688 break; 4689 } 4690 4691 if (!ipolicer->band_prof.max) 4692 continue; 4693 4694 err = rvu_alloc_bitmap(&ipolicer->band_prof); 4695 if (err) 4696 return err; 4697 4698 ipolicer->pfvf_map = devm_kcalloc(rvu->dev, 4699 ipolicer->band_prof.max, 4700 sizeof(u16), GFP_KERNEL); 4701 if (!ipolicer->pfvf_map) 4702 return -ENOMEM; 4703 4704 ipolicer->match_id = devm_kcalloc(rvu->dev, 4705 ipolicer->band_prof.max, 4706 sizeof(u16), GFP_KERNEL); 4707 if (!ipolicer->match_id) 4708 return -ENOMEM; 4709 4710 for (prof_idx = 0; 4711 prof_idx < ipolicer->band_prof.max; prof_idx++) { 4712 /* Set AF as current owner for INIT ops to succeed */ 4713 ipolicer->pfvf_map[prof_idx] = 0x00; 4714 4715 /* There is no enable bit in the profile context, 4716 * so no context disable. So let's INIT them here 4717 * so that PF/VF later on have to just do WRITE to 4718 * setup policer rates and config. 4719 */ 4720 err = nix_init_policer_context(rvu, nix_hw, 4721 layer, prof_idx); 4722 if (err) 4723 return err; 4724 } 4725 4726 /* Allocate memory for maintaining ref_counts for MID level 4727 * profiles, this will be needed for leaf layer profiles' 4728 * aggregation. 4729 */ 4730 if (layer != BAND_PROF_MID_LAYER) 4731 continue; 4732 4733 ipolicer->ref_count = devm_kcalloc(rvu->dev, 4734 ipolicer->band_prof.max, 4735 sizeof(u16), GFP_KERNEL); 4736 } 4737 4738 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ 4739 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); 4740 4741 nix_config_rx_pkt_policer_precolor(rvu, blkaddr); 4742 4743 return 0; 4744 } 4745 4746 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) 4747 { 4748 struct nix_ipolicer *ipolicer; 4749 int layer; 4750 4751 if (!rvu->hw->cap.ipolicer) 4752 return; 4753 4754 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4755 ipolicer = &nix_hw->ipolicer[layer]; 4756 4757 if (!ipolicer->band_prof.max) 4758 continue; 4759 4760 kfree(ipolicer->band_prof.bmap); 4761 } 4762 } 4763 4764 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 4765 struct nix_hw *nix_hw, u16 pcifunc) 4766 { 4767 struct nix_ipolicer *ipolicer; 4768 int layer, hi_layer, prof_idx; 4769 4770 /* Bits [15:14] in profile index represent layer */ 4771 layer = (req->qidx >> 14) & 0x03; 4772 prof_idx = req->qidx & 0x3FFF; 4773 4774 ipolicer = &nix_hw->ipolicer[layer]; 4775 if (prof_idx >= ipolicer->band_prof.max) 4776 return -EINVAL; 4777 4778 /* Check if the profile is allocated to the requesting PCIFUNC or not 4779 * with the exception of AF. AF is allowed to read and update contexts. 4780 */ 4781 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) 4782 return -EINVAL; 4783 4784 /* If this profile is linked to higher layer profile then check 4785 * if that profile is also allocated to the requesting PCIFUNC 4786 * or not. 4787 */ 4788 if (!req->prof.hl_en) 4789 return 0; 4790 4791 /* Leaf layer profile can link only to mid layer and 4792 * mid layer to top layer. 4793 */ 4794 if (layer == BAND_PROF_LEAF_LAYER) 4795 hi_layer = BAND_PROF_MID_LAYER; 4796 else if (layer == BAND_PROF_MID_LAYER) 4797 hi_layer = BAND_PROF_TOP_LAYER; 4798 else 4799 return -EINVAL; 4800 4801 ipolicer = &nix_hw->ipolicer[hi_layer]; 4802 prof_idx = req->prof.band_prof_id; 4803 if (prof_idx >= ipolicer->band_prof.max || 4804 ipolicer->pfvf_map[prof_idx] != pcifunc) 4805 return -EINVAL; 4806 4807 return 0; 4808 } 4809 4810 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, 4811 struct nix_bandprof_alloc_req *req, 4812 struct nix_bandprof_alloc_rsp *rsp) 4813 { 4814 int blkaddr, layer, prof, idx, err; 4815 u16 pcifunc = req->hdr.pcifunc; 4816 struct nix_ipolicer *ipolicer; 4817 struct nix_hw *nix_hw; 4818 4819 if (!rvu->hw->cap.ipolicer) 4820 return NIX_AF_ERR_IPOLICER_NOTSUPP; 4821 4822 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4823 if (err) 4824 return err; 4825 4826 mutex_lock(&rvu->rsrc_lock); 4827 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4828 if (layer == BAND_PROF_INVAL_LAYER) 4829 continue; 4830 if (!req->prof_count[layer]) 4831 continue; 4832 4833 ipolicer = &nix_hw->ipolicer[layer]; 4834 for (idx = 0; idx < req->prof_count[layer]; idx++) { 4835 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ 4836 if (idx == MAX_BANDPROF_PER_PFFUNC) 4837 break; 4838 4839 prof = rvu_alloc_rsrc(&ipolicer->band_prof); 4840 if (prof < 0) 4841 break; 4842 rsp->prof_count[layer]++; 4843 rsp->prof_idx[layer][idx] = prof; 4844 ipolicer->pfvf_map[prof] = pcifunc; 4845 } 4846 } 4847 mutex_unlock(&rvu->rsrc_lock); 4848 return 0; 4849 } 4850 4851 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) 4852 { 4853 int blkaddr, layer, prof_idx, err; 4854 struct nix_ipolicer *ipolicer; 4855 struct nix_hw *nix_hw; 4856 4857 if (!rvu->hw->cap.ipolicer) 4858 return NIX_AF_ERR_IPOLICER_NOTSUPP; 4859 4860 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4861 if (err) 4862 return err; 4863 4864 mutex_lock(&rvu->rsrc_lock); 4865 /* Free all the profiles allocated to the PCIFUNC */ 4866 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4867 if (layer == BAND_PROF_INVAL_LAYER) 4868 continue; 4869 ipolicer = &nix_hw->ipolicer[layer]; 4870 4871 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { 4872 if (ipolicer->pfvf_map[prof_idx] != pcifunc) 4873 continue; 4874 4875 /* Clear ratelimit aggregation, if any */ 4876 if (layer == BAND_PROF_LEAF_LAYER && 4877 ipolicer->match_id[prof_idx]) 4878 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 4879 4880 ipolicer->pfvf_map[prof_idx] = 0x00; 4881 ipolicer->match_id[prof_idx] = 0; 4882 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 4883 } 4884 } 4885 mutex_unlock(&rvu->rsrc_lock); 4886 return 0; 4887 } 4888 4889 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, 4890 struct nix_bandprof_free_req *req, 4891 struct msg_rsp *rsp) 4892 { 4893 int blkaddr, layer, prof_idx, idx, err; 4894 u16 pcifunc = req->hdr.pcifunc; 4895 struct nix_ipolicer *ipolicer; 4896 struct nix_hw *nix_hw; 4897 4898 if (req->free_all) 4899 return nix_free_all_bandprof(rvu, pcifunc); 4900 4901 if (!rvu->hw->cap.ipolicer) 4902 return NIX_AF_ERR_IPOLICER_NOTSUPP; 4903 4904 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4905 if (err) 4906 return err; 4907 4908 mutex_lock(&rvu->rsrc_lock); 4909 /* Free the requested profile indices */ 4910 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4911 if (layer == BAND_PROF_INVAL_LAYER) 4912 continue; 4913 if (!req->prof_count[layer]) 4914 continue; 4915 4916 ipolicer = &nix_hw->ipolicer[layer]; 4917 for (idx = 0; idx < req->prof_count[layer]; idx++) { 4918 prof_idx = req->prof_idx[layer][idx]; 4919 if (prof_idx >= ipolicer->band_prof.max || 4920 ipolicer->pfvf_map[prof_idx] != pcifunc) 4921 continue; 4922 4923 /* Clear ratelimit aggregation, if any */ 4924 if (layer == BAND_PROF_LEAF_LAYER && 4925 ipolicer->match_id[prof_idx]) 4926 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 4927 4928 ipolicer->pfvf_map[prof_idx] = 0x00; 4929 ipolicer->match_id[prof_idx] = 0; 4930 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 4931 if (idx == MAX_BANDPROF_PER_PFFUNC) 4932 break; 4933 } 4934 } 4935 mutex_unlock(&rvu->rsrc_lock); 4936 return 0; 4937 } 4938 4939 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, 4940 struct nix_cn10k_aq_enq_req *aq_req, 4941 struct nix_cn10k_aq_enq_rsp *aq_rsp, 4942 u16 pcifunc, u8 ctype, u32 qidx) 4943 { 4944 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4945 aq_req->hdr.pcifunc = pcifunc; 4946 aq_req->ctype = ctype; 4947 aq_req->op = NIX_AQ_INSTOP_READ; 4948 aq_req->qidx = qidx; 4949 4950 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4951 (struct nix_aq_enq_req *)aq_req, 4952 (struct nix_aq_enq_rsp *)aq_rsp); 4953 } 4954 4955 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, 4956 struct nix_hw *nix_hw, 4957 struct nix_cn10k_aq_enq_req *aq_req, 4958 struct nix_cn10k_aq_enq_rsp *aq_rsp, 4959 u32 leaf_prof, u16 mid_prof) 4960 { 4961 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4962 aq_req->hdr.pcifunc = 0x00; 4963 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; 4964 aq_req->op = NIX_AQ_INSTOP_WRITE; 4965 aq_req->qidx = leaf_prof; 4966 4967 aq_req->prof.band_prof_id = mid_prof; 4968 aq_req->prof_mask.band_prof_id = GENMASK(6, 0); 4969 aq_req->prof.hl_en = 1; 4970 aq_req->prof_mask.hl_en = 1; 4971 4972 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4973 (struct nix_aq_enq_req *)aq_req, 4974 (struct nix_aq_enq_rsp *)aq_rsp); 4975 } 4976 4977 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, 4978 u16 rq_idx, u16 match_id) 4979 { 4980 int leaf_prof, mid_prof, leaf_match; 4981 struct nix_cn10k_aq_enq_req aq_req; 4982 struct nix_cn10k_aq_enq_rsp aq_rsp; 4983 struct nix_ipolicer *ipolicer; 4984 struct nix_hw *nix_hw; 4985 int blkaddr, idx, rc; 4986 4987 if (!rvu->hw->cap.ipolicer) 4988 return 0; 4989 4990 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4991 if (rc) 4992 return rc; 4993 4994 /* Fetch the RQ's context to see if policing is enabled */ 4995 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, 4996 NIX_AQ_CTYPE_RQ, rq_idx); 4997 if (rc) { 4998 dev_err(rvu->dev, 4999 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", 5000 __func__, rq_idx, pcifunc); 5001 return rc; 5002 } 5003 5004 if (!aq_rsp.rq.policer_ena) 5005 return 0; 5006 5007 /* Get the bandwidth profile ID mapped to this RQ */ 5008 leaf_prof = aq_rsp.rq.band_prof_id; 5009 5010 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; 5011 ipolicer->match_id[leaf_prof] = match_id; 5012 5013 /* Check if any other leaf profile is marked with same match_id */ 5014 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 5015 if (idx == leaf_prof) 5016 continue; 5017 if (ipolicer->match_id[idx] != match_id) 5018 continue; 5019 5020 leaf_match = idx; 5021 break; 5022 } 5023 5024 if (idx == ipolicer->band_prof.max) 5025 return 0; 5026 5027 /* Fetch the matching profile's context to check if it's already 5028 * mapped to a mid level profile. 5029 */ 5030 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5031 NIX_AQ_CTYPE_BANDPROF, leaf_match); 5032 if (rc) { 5033 dev_err(rvu->dev, 5034 "%s: Failed to fetch context of leaf profile %d\n", 5035 __func__, leaf_match); 5036 return rc; 5037 } 5038 5039 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5040 if (aq_rsp.prof.hl_en) { 5041 /* Get Mid layer prof index and map leaf_prof index 5042 * also such that flows that are being steered 5043 * to different RQs and marked with same match_id 5044 * are rate limited in a aggregate fashion 5045 */ 5046 mid_prof = aq_rsp.prof.band_prof_id; 5047 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5048 &aq_req, &aq_rsp, 5049 leaf_prof, mid_prof); 5050 if (rc) { 5051 dev_err(rvu->dev, 5052 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5053 __func__, leaf_prof, mid_prof); 5054 goto exit; 5055 } 5056 5057 mutex_lock(&rvu->rsrc_lock); 5058 ipolicer->ref_count[mid_prof]++; 5059 mutex_unlock(&rvu->rsrc_lock); 5060 goto exit; 5061 } 5062 5063 /* Allocate a mid layer profile and 5064 * map both 'leaf_prof' and 'leaf_match' profiles to it. 5065 */ 5066 mutex_lock(&rvu->rsrc_lock); 5067 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5068 if (mid_prof < 0) { 5069 dev_err(rvu->dev, 5070 "%s: Unable to allocate mid layer profile\n", __func__); 5071 mutex_unlock(&rvu->rsrc_lock); 5072 goto exit; 5073 } 5074 mutex_unlock(&rvu->rsrc_lock); 5075 ipolicer->pfvf_map[mid_prof] = 0x00; 5076 ipolicer->ref_count[mid_prof] = 0; 5077 5078 /* Initialize mid layer profile same as 'leaf_prof' */ 5079 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5080 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 5081 if (rc) { 5082 dev_err(rvu->dev, 5083 "%s: Failed to fetch context of leaf profile %d\n", 5084 __func__, leaf_prof); 5085 goto exit; 5086 } 5087 5088 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5089 aq_req.hdr.pcifunc = 0x00; 5090 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); 5091 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5092 aq_req.op = NIX_AQ_INSTOP_WRITE; 5093 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); 5094 /* Clear higher layer enable bit in the mid profile, just in case */ 5095 aq_req.prof.hl_en = 0; 5096 aq_req.prof_mask.hl_en = 1; 5097 5098 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5099 (struct nix_aq_enq_req *)&aq_req, NULL); 5100 if (rc) { 5101 dev_err(rvu->dev, 5102 "%s: Failed to INIT context of mid layer profile %d\n", 5103 __func__, mid_prof); 5104 goto exit; 5105 } 5106 5107 /* Map both leaf profiles to this mid layer profile */ 5108 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5109 &aq_req, &aq_rsp, 5110 leaf_prof, mid_prof); 5111 if (rc) { 5112 dev_err(rvu->dev, 5113 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5114 __func__, leaf_prof, mid_prof); 5115 goto exit; 5116 } 5117 5118 mutex_lock(&rvu->rsrc_lock); 5119 ipolicer->ref_count[mid_prof]++; 5120 mutex_unlock(&rvu->rsrc_lock); 5121 5122 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5123 &aq_req, &aq_rsp, 5124 leaf_match, mid_prof); 5125 if (rc) { 5126 dev_err(rvu->dev, 5127 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5128 __func__, leaf_match, mid_prof); 5129 ipolicer->ref_count[mid_prof]--; 5130 goto exit; 5131 } 5132 5133 mutex_lock(&rvu->rsrc_lock); 5134 ipolicer->ref_count[mid_prof]++; 5135 mutex_unlock(&rvu->rsrc_lock); 5136 5137 exit: 5138 return rc; 5139 } 5140 5141 /* Called with mutex rsrc_lock */ 5142 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 5143 u32 leaf_prof) 5144 { 5145 struct nix_cn10k_aq_enq_req aq_req; 5146 struct nix_cn10k_aq_enq_rsp aq_rsp; 5147 struct nix_ipolicer *ipolicer; 5148 u16 mid_prof; 5149 int rc; 5150 5151 mutex_unlock(&rvu->rsrc_lock); 5152 5153 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5154 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 5155 5156 mutex_lock(&rvu->rsrc_lock); 5157 if (rc) { 5158 dev_err(rvu->dev, 5159 "%s: Failed to fetch context of leaf profile %d\n", 5160 __func__, leaf_prof); 5161 return; 5162 } 5163 5164 if (!aq_rsp.prof.hl_en) 5165 return; 5166 5167 mid_prof = aq_rsp.prof.band_prof_id; 5168 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5169 ipolicer->ref_count[mid_prof]--; 5170 /* If ref_count is zero, free mid layer profile */ 5171 if (!ipolicer->ref_count[mid_prof]) { 5172 ipolicer->pfvf_map[mid_prof] = 0x00; 5173 rvu_free_rsrc(&ipolicer->band_prof, mid_prof); 5174 } 5175 } 5176 5177 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, 5178 struct nix_bandprof_get_hwinfo_rsp *rsp) 5179 { 5180 struct nix_ipolicer *ipolicer; 5181 int blkaddr, layer, err; 5182 struct nix_hw *nix_hw; 5183 u64 tu; 5184 5185 if (!rvu->hw->cap.ipolicer) 5186 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5187 5188 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 5189 if (err) 5190 return err; 5191 5192 /* Return number of bandwidth profiles free at each layer */ 5193 mutex_lock(&rvu->rsrc_lock); 5194 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5195 if (layer == BAND_PROF_INVAL_LAYER) 5196 continue; 5197 5198 ipolicer = &nix_hw->ipolicer[layer]; 5199 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); 5200 } 5201 mutex_unlock(&rvu->rsrc_lock); 5202 5203 /* Set the policer timeunit in nanosec */ 5204 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); 5205 rsp->policer_timeunit = (tu + 1) * 100; 5206 5207 return 0; 5208 } 5209