1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 11 #include "rvu_struct.h" 12 #include "rvu_reg.h" 13 #include "rvu.h" 14 #include "npc.h" 15 #include "mcs.h" 16 #include "cgx.h" 17 #include "lmac_common.h" 18 #include "rvu_npc_hash.h" 19 20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 22 int type, int chan_id); 23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 24 int type, bool add); 25 static int nix_setup_ipolicers(struct rvu *rvu, 26 struct nix_hw *nix_hw, int blkaddr); 27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); 28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 29 struct nix_hw *nix_hw, u16 pcifunc); 30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); 31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 32 u32 leaf_prof); 33 static const char *nix_get_ctx_name(int ctype); 34 35 enum mc_tbl_sz { 36 MC_TBL_SZ_256, 37 MC_TBL_SZ_512, 38 MC_TBL_SZ_1K, 39 MC_TBL_SZ_2K, 40 MC_TBL_SZ_4K, 41 MC_TBL_SZ_8K, 42 MC_TBL_SZ_16K, 43 MC_TBL_SZ_32K, 44 MC_TBL_SZ_64K, 45 }; 46 47 enum mc_buf_cnt { 48 MC_BUF_CNT_8, 49 MC_BUF_CNT_16, 50 MC_BUF_CNT_32, 51 MC_BUF_CNT_64, 52 MC_BUF_CNT_128, 53 MC_BUF_CNT_256, 54 MC_BUF_CNT_512, 55 MC_BUF_CNT_1024, 56 MC_BUF_CNT_2048, 57 }; 58 59 enum nix_makr_fmt_indexes { 60 NIX_MARK_CFG_IP_DSCP_RED, 61 NIX_MARK_CFG_IP_DSCP_YELLOW, 62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 63 NIX_MARK_CFG_IP_ECN_RED, 64 NIX_MARK_CFG_IP_ECN_YELLOW, 65 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 66 NIX_MARK_CFG_VLAN_DEI_RED, 67 NIX_MARK_CFG_VLAN_DEI_YELLOW, 68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 69 NIX_MARK_CFG_MAX, 70 }; 71 72 /* For now considering MC resources needed for broadcast 73 * pkt replication only. i.e 256 HWVFs + 12 PFs. 74 */ 75 #define MC_TBL_SIZE MC_TBL_SZ_512 76 #define MC_BUF_CNT MC_BUF_CNT_128 77 78 struct mce { 79 struct hlist_node node; 80 u16 pcifunc; 81 }; 82 83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 84 { 85 int i = 0; 86 87 /*If blkaddr is 0, return the first nix block address*/ 88 if (blkaddr == 0) 89 return rvu->nix_blkaddr[blkaddr]; 90 91 while (i + 1 < MAX_NIX_BLKS) { 92 if (rvu->nix_blkaddr[i] == blkaddr) 93 return rvu->nix_blkaddr[i + 1]; 94 i++; 95 } 96 97 return 0; 98 } 99 100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 101 { 102 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 103 int blkaddr; 104 105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 106 if (!pfvf->nixlf || blkaddr < 0) 107 return false; 108 return true; 109 } 110 111 int rvu_get_nixlf_count(struct rvu *rvu) 112 { 113 int blkaddr = 0, max = 0; 114 struct rvu_block *block; 115 116 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 117 while (blkaddr) { 118 block = &rvu->hw->block[blkaddr]; 119 max += block->lf.max; 120 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 121 } 122 return max; 123 } 124 125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 126 { 127 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 128 struct rvu_hwinfo *hw = rvu->hw; 129 int blkaddr; 130 131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 132 if (!pfvf->nixlf || blkaddr < 0) 133 return NIX_AF_ERR_AF_LF_INVALID; 134 135 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 136 if (*nixlf < 0) 137 return NIX_AF_ERR_AF_LF_INVALID; 138 139 if (nix_blkaddr) 140 *nix_blkaddr = blkaddr; 141 142 return 0; 143 } 144 145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, 146 struct nix_hw **nix_hw, int *blkaddr) 147 { 148 struct rvu_pfvf *pfvf; 149 150 pfvf = rvu_get_pfvf(rvu, pcifunc); 151 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 152 if (!pfvf->nixlf || *blkaddr < 0) 153 return NIX_AF_ERR_AF_LF_INVALID; 154 155 *nix_hw = get_nix_hw(rvu->hw, *blkaddr); 156 if (!*nix_hw) 157 return NIX_AF_ERR_INVALID_NIXBLK; 158 return 0; 159 } 160 161 static void nix_mce_list_init(struct nix_mce_list *list, int max) 162 { 163 INIT_HLIST_HEAD(&list->head); 164 list->count = 0; 165 list->max = max; 166 } 167 168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) 169 { 170 int idx; 171 172 if (!mcast) 173 return 0; 174 175 idx = mcast->next_free_mce; 176 mcast->next_free_mce += count; 177 return idx; 178 } 179 180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 181 { 182 int nix_blkaddr = 0, i = 0; 183 struct rvu *rvu = hw->rvu; 184 185 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 186 while (nix_blkaddr) { 187 if (blkaddr == nix_blkaddr && hw->nix) 188 return &hw->nix[i]; 189 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 190 i++; 191 } 192 return NULL; 193 } 194 195 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type) 196 { 197 if (hw->cap.nix_multiple_dwrr_mtu) 198 return NIX_AF_DWRR_MTUX(smq_link_type); 199 200 if (smq_link_type == SMQ_LINK_TYPE_SDP) 201 return NIX_AF_DWRR_SDP_MTU; 202 203 /* Here it's same reg for RPM and LBK */ 204 return NIX_AF_DWRR_RPM_MTU; 205 } 206 207 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) 208 { 209 dwrr_mtu &= 0x1FULL; 210 211 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 212 * Value of 4 is reserved for MTU value of 9728 bytes. 213 * Value of 5 is reserved for MTU value of 10240 bytes. 214 */ 215 switch (dwrr_mtu) { 216 case 4: 217 return 9728; 218 case 5: 219 return 10240; 220 default: 221 return BIT_ULL(dwrr_mtu); 222 } 223 224 return 0; 225 } 226 227 u32 convert_bytes_to_dwrr_mtu(u32 bytes) 228 { 229 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 230 * Value of 4 is reserved for MTU value of 9728 bytes. 231 * Value of 5 is reserved for MTU value of 10240 bytes. 232 */ 233 if (bytes > BIT_ULL(16)) 234 return 0; 235 236 switch (bytes) { 237 case 9728: 238 return 4; 239 case 10240: 240 return 5; 241 default: 242 return ilog2(bytes); 243 } 244 245 return 0; 246 } 247 248 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 249 { 250 int err; 251 252 /* Sync all in flight RX packets to LLC/DRAM */ 253 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 254 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 255 if (err) 256 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); 257 258 /* SW_SYNC ensures all existing transactions are finished and pkts 259 * are written to LLC/DRAM, queues should be teared down after 260 * successful SW_SYNC. Due to a HW errata, in some rare scenarios 261 * an existing transaction might end after SW_SYNC operation. To 262 * ensure operation is fully done, do the SW_SYNC twice. 263 */ 264 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 265 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 266 if (err) 267 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); 268 } 269 270 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 271 int lvl, u16 pcifunc, u16 schq) 272 { 273 struct rvu_hwinfo *hw = rvu->hw; 274 struct nix_txsch *txsch; 275 struct nix_hw *nix_hw; 276 u16 map_func; 277 278 nix_hw = get_nix_hw(rvu->hw, blkaddr); 279 if (!nix_hw) 280 return false; 281 282 txsch = &nix_hw->txsch[lvl]; 283 /* Check out of bounds */ 284 if (schq >= txsch->schq.max) 285 return false; 286 287 mutex_lock(&rvu->rsrc_lock); 288 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 289 mutex_unlock(&rvu->rsrc_lock); 290 291 /* TLs aggegating traffic are shared across PF and VFs */ 292 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 293 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 294 return false; 295 else 296 return true; 297 } 298 299 if (map_func != pcifunc) 300 return false; 301 302 return true; 303 } 304 305 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, 306 struct nix_lf_alloc_rsp *rsp, bool loop) 307 { 308 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); 309 u16 req_chan_base, req_chan_end, req_chan_cnt; 310 struct rvu_hwinfo *hw = rvu->hw; 311 struct sdp_node_info *sdp_info; 312 int pkind, pf, vf, lbkid, vfid; 313 u8 cgx_id, lmac_id; 314 bool from_vf; 315 int err; 316 317 pf = rvu_get_pf(pcifunc); 318 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 319 type != NIX_INTF_TYPE_SDP) 320 return 0; 321 322 switch (type) { 323 case NIX_INTF_TYPE_CGX: 324 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 325 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 326 327 pkind = rvu_npc_get_pkind(rvu, pf); 328 if (pkind < 0) { 329 dev_err(rvu->dev, 330 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 331 return -EINVAL; 332 } 333 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 334 pfvf->tx_chan_base = pfvf->rx_chan_base; 335 pfvf->rx_chan_cnt = 1; 336 pfvf->tx_chan_cnt = 1; 337 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; 338 339 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 340 rvu_npc_set_pkind(rvu, pkind, pfvf); 341 342 break; 343 case NIX_INTF_TYPE_LBK: 344 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 345 346 /* If NIX1 block is present on the silicon then NIXes are 347 * assigned alternatively for lbk interfaces. NIX0 should 348 * send packets on lbk link 1 channels and NIX1 should send 349 * on lbk link 0 channels for the communication between 350 * NIX0 and NIX1. 351 */ 352 lbkid = 0; 353 if (rvu->hw->lbk_links > 1) 354 lbkid = vf & 0x1 ? 0 : 1; 355 356 /* By default NIX0 is configured to send packet on lbk link 1 357 * (which corresponds to LBK1), same packet will receive on 358 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 359 * (which corresponds to LBK2) packet will receive on NIX0 lbk 360 * link 1. 361 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 362 * transmits and receives on lbk link 0, whick corresponds 363 * to LBK1 block, back to back connectivity between NIX and 364 * LBK can be achieved (which is similar to 96xx) 365 * 366 * RX TX 367 * NIX0 lbk link 1 (LBK2) 1 (LBK1) 368 * NIX0 lbk link 0 (LBK0) 0 (LBK0) 369 * NIX1 lbk link 0 (LBK1) 0 (LBK2) 370 * NIX1 lbk link 1 (LBK3) 1 (LBK3) 371 */ 372 if (loop) 373 lbkid = !lbkid; 374 375 /* Note that AF's VFs work in pairs and talk over consecutive 376 * loopback channels.Therefore if odd number of AF VFs are 377 * enabled then the last VF remains with no pair. 378 */ 379 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 380 pfvf->tx_chan_base = vf & 0x1 ? 381 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 382 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 383 pfvf->rx_chan_cnt = 1; 384 pfvf->tx_chan_cnt = 1; 385 rsp->tx_link = hw->cgx_links + lbkid; 386 pfvf->lbkid = lbkid; 387 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); 388 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 389 pfvf->rx_chan_base, 390 pfvf->rx_chan_cnt); 391 392 break; 393 case NIX_INTF_TYPE_SDP: 394 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 395 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; 396 sdp_info = parent_pf->sdp_info; 397 if (!sdp_info) { 398 dev_err(rvu->dev, "Invalid sdp_info pointer\n"); 399 return -EINVAL; 400 } 401 if (from_vf) { 402 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + 403 sdp_info->num_pf_rings; 404 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 405 for (vfid = 0; vfid < vf; vfid++) 406 req_chan_base += sdp_info->vf_rings[vfid]; 407 req_chan_cnt = sdp_info->vf_rings[vf]; 408 req_chan_end = req_chan_base + req_chan_cnt - 1; 409 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || 410 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { 411 dev_err(rvu->dev, 412 "PF_Func 0x%x: Invalid channel base and count\n", 413 pcifunc); 414 return -EINVAL; 415 } 416 } else { 417 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; 418 req_chan_cnt = sdp_info->num_pf_rings; 419 } 420 421 pfvf->rx_chan_base = req_chan_base; 422 pfvf->rx_chan_cnt = req_chan_cnt; 423 pfvf->tx_chan_base = pfvf->rx_chan_base; 424 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; 425 426 rsp->tx_link = hw->cgx_links + hw->lbk_links; 427 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 428 pfvf->rx_chan_base, 429 pfvf->rx_chan_cnt); 430 break; 431 } 432 433 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 434 * RVU PF/VF's MAC address. 435 */ 436 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 437 pfvf->rx_chan_base, pfvf->mac_addr); 438 439 /* Add this PF_FUNC to bcast pkt replication list */ 440 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); 441 if (err) { 442 dev_err(rvu->dev, 443 "Bcast list, failed to enable PF_FUNC 0x%x\n", 444 pcifunc); 445 return err; 446 } 447 /* Install MCAM rule matching Ethernet broadcast mac address */ 448 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 449 nixlf, pfvf->rx_chan_base); 450 451 pfvf->maxlen = NIC_HW_MIN_FRS; 452 pfvf->minlen = NIC_HW_MIN_FRS; 453 454 return 0; 455 } 456 457 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 458 { 459 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 460 int err; 461 462 pfvf->maxlen = 0; 463 pfvf->minlen = 0; 464 465 /* Remove this PF_FUNC from bcast pkt replication list */ 466 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); 467 if (err) { 468 dev_err(rvu->dev, 469 "Bcast list, failed to disable PF_FUNC 0x%x\n", 470 pcifunc); 471 } 472 473 /* Free and disable any MCAM entries used by this NIX LF */ 474 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 475 476 /* Disable DMAC filters used */ 477 rvu_cgx_disable_dmac_entries(rvu, pcifunc); 478 } 479 480 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 481 struct nix_bp_cfg_req *req, 482 struct msg_rsp *rsp) 483 { 484 u16 pcifunc = req->hdr.pcifunc; 485 struct rvu_pfvf *pfvf; 486 int blkaddr, pf, type; 487 u16 chan_base, chan; 488 u64 cfg; 489 490 pf = rvu_get_pf(pcifunc); 491 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 492 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 493 return 0; 494 495 pfvf = rvu_get_pfvf(rvu, pcifunc); 496 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 497 498 chan_base = pfvf->rx_chan_base + req->chan_base; 499 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 500 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 501 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 502 cfg & ~BIT_ULL(16)); 503 } 504 return 0; 505 } 506 507 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 508 int type, int chan_id) 509 { 510 int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; 511 u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt; 512 struct rvu_hwinfo *hw = rvu->hw; 513 struct rvu_pfvf *pfvf; 514 u8 cgx_id, lmac_id; 515 u64 cfg; 516 517 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 518 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 519 lmac_chan_cnt = cfg & 0xFF; 520 521 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; 522 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); 523 524 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 525 sdp_chan_cnt = cfg & 0xFFF; 526 sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; 527 528 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 529 530 /* Backpressure IDs range division 531 * CGX channles are mapped to (0 - 191) BPIDs 532 * LBK channles are mapped to (192 - 255) BPIDs 533 * SDP channles are mapped to (256 - 511) BPIDs 534 * 535 * Lmac channles and bpids mapped as follows 536 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 537 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 538 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 539 */ 540 switch (type) { 541 case NIX_INTF_TYPE_CGX: 542 if ((req->chan_base + req->chan_cnt) > 16) 543 return -EINVAL; 544 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 545 /* Assign bpid based on cgx, lmac and chan id */ 546 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + 547 (lmac_id * lmac_chan_cnt) + req->chan_base; 548 549 if (req->bpid_per_chan) 550 bpid += chan_id; 551 if (bpid > cgx_bpid_cnt) 552 return -EINVAL; 553 break; 554 555 case NIX_INTF_TYPE_LBK: 556 if ((req->chan_base + req->chan_cnt) > 63) 557 return -EINVAL; 558 bpid = cgx_bpid_cnt + req->chan_base; 559 if (req->bpid_per_chan) 560 bpid += chan_id; 561 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) 562 return -EINVAL; 563 break; 564 case NIX_INTF_TYPE_SDP: 565 if ((req->chan_base + req->chan_cnt) > 255) 566 return -EINVAL; 567 568 bpid = sdp_bpid_cnt + req->chan_base; 569 if (req->bpid_per_chan) 570 bpid += chan_id; 571 572 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) 573 return -EINVAL; 574 break; 575 default: 576 return -EINVAL; 577 } 578 return bpid; 579 } 580 581 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 582 struct nix_bp_cfg_req *req, 583 struct nix_bp_cfg_rsp *rsp) 584 { 585 int blkaddr, pf, type, chan_id = 0; 586 u16 pcifunc = req->hdr.pcifunc; 587 struct rvu_pfvf *pfvf; 588 u16 chan_base, chan; 589 s16 bpid, bpid_base; 590 u64 cfg; 591 592 pf = rvu_get_pf(pcifunc); 593 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 594 if (is_sdp_pfvf(pcifunc)) 595 type = NIX_INTF_TYPE_SDP; 596 597 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ 598 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 599 type != NIX_INTF_TYPE_SDP) 600 return 0; 601 602 pfvf = rvu_get_pfvf(rvu, pcifunc); 603 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 604 605 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 606 chan_base = pfvf->rx_chan_base + req->chan_base; 607 bpid = bpid_base; 608 609 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 610 if (bpid < 0) { 611 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 612 return -EINVAL; 613 } 614 615 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 616 cfg &= ~GENMASK_ULL(8, 0); 617 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 618 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); 619 chan_id++; 620 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 621 } 622 623 for (chan = 0; chan < req->chan_cnt; chan++) { 624 /* Map channel and bpid assign to it */ 625 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 626 (bpid_base & 0x3FF); 627 if (req->bpid_per_chan) 628 bpid_base++; 629 } 630 rsp->chan_cnt = req->chan_cnt; 631 632 return 0; 633 } 634 635 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 636 u64 format, bool v4, u64 *fidx) 637 { 638 struct nix_lso_format field = {0}; 639 640 /* IP's Length field */ 641 field.layer = NIX_TXLAYER_OL3; 642 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 643 field.offset = v4 ? 2 : 4; 644 field.sizem1 = 1; /* i.e 2 bytes */ 645 field.alg = NIX_LSOALG_ADD_PAYLEN; 646 rvu_write64(rvu, blkaddr, 647 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 648 *(u64 *)&field); 649 650 /* No ID field in IPv6 header */ 651 if (!v4) 652 return; 653 654 /* IP's ID field */ 655 field.layer = NIX_TXLAYER_OL3; 656 field.offset = 4; 657 field.sizem1 = 1; /* i.e 2 bytes */ 658 field.alg = NIX_LSOALG_ADD_SEGNUM; 659 rvu_write64(rvu, blkaddr, 660 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 661 *(u64 *)&field); 662 } 663 664 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 665 u64 format, u64 *fidx) 666 { 667 struct nix_lso_format field = {0}; 668 669 /* TCP's sequence number field */ 670 field.layer = NIX_TXLAYER_OL4; 671 field.offset = 4; 672 field.sizem1 = 3; /* i.e 4 bytes */ 673 field.alg = NIX_LSOALG_ADD_OFFSET; 674 rvu_write64(rvu, blkaddr, 675 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 676 *(u64 *)&field); 677 678 /* TCP's flags field */ 679 field.layer = NIX_TXLAYER_OL4; 680 field.offset = 12; 681 field.sizem1 = 1; /* 2 bytes */ 682 field.alg = NIX_LSOALG_TCP_FLAGS; 683 rvu_write64(rvu, blkaddr, 684 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 685 *(u64 *)&field); 686 } 687 688 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 689 { 690 u64 cfg, idx, fidx = 0; 691 692 /* Get max HW supported format indices */ 693 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 694 nix_hw->lso.total = cfg; 695 696 /* Enable LSO */ 697 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 698 /* For TSO, set first and middle segment flags to 699 * mask out PSH, RST & FIN flags in TCP packet 700 */ 701 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 702 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 703 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 704 705 /* Setup default static LSO formats 706 * 707 * Configure format fields for TCPv4 segmentation offload 708 */ 709 idx = NIX_LSO_FORMAT_IDX_TSOV4; 710 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 711 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 712 713 /* Set rest of the fields to NOP */ 714 for (; fidx < 8; fidx++) { 715 rvu_write64(rvu, blkaddr, 716 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 717 } 718 nix_hw->lso.in_use++; 719 720 /* Configure format fields for TCPv6 segmentation offload */ 721 idx = NIX_LSO_FORMAT_IDX_TSOV6; 722 fidx = 0; 723 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 724 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 725 726 /* Set rest of the fields to NOP */ 727 for (; fidx < 8; fidx++) { 728 rvu_write64(rvu, blkaddr, 729 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 730 } 731 nix_hw->lso.in_use++; 732 } 733 734 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 735 { 736 kfree(pfvf->rq_bmap); 737 kfree(pfvf->sq_bmap); 738 kfree(pfvf->cq_bmap); 739 if (pfvf->rq_ctx) 740 qmem_free(rvu->dev, pfvf->rq_ctx); 741 if (pfvf->sq_ctx) 742 qmem_free(rvu->dev, pfvf->sq_ctx); 743 if (pfvf->cq_ctx) 744 qmem_free(rvu->dev, pfvf->cq_ctx); 745 if (pfvf->rss_ctx) 746 qmem_free(rvu->dev, pfvf->rss_ctx); 747 if (pfvf->nix_qints_ctx) 748 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 749 if (pfvf->cq_ints_ctx) 750 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 751 752 pfvf->rq_bmap = NULL; 753 pfvf->cq_bmap = NULL; 754 pfvf->sq_bmap = NULL; 755 pfvf->rq_ctx = NULL; 756 pfvf->sq_ctx = NULL; 757 pfvf->cq_ctx = NULL; 758 pfvf->rss_ctx = NULL; 759 pfvf->nix_qints_ctx = NULL; 760 pfvf->cq_ints_ctx = NULL; 761 } 762 763 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 764 struct rvu_pfvf *pfvf, int nixlf, 765 int rss_sz, int rss_grps, int hwctx_size, 766 u64 way_mask, bool tag_lsb_as_adder) 767 { 768 int err, grp, num_indices; 769 u64 val; 770 771 /* RSS is not requested for this NIXLF */ 772 if (!rss_sz) 773 return 0; 774 num_indices = rss_sz * rss_grps; 775 776 /* Alloc NIX RSS HW context memory and config the base */ 777 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 778 if (err) 779 return err; 780 781 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 782 (u64)pfvf->rss_ctx->iova); 783 784 /* Config full RSS table size, enable RSS and caching */ 785 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | 786 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); 787 788 if (tag_lsb_as_adder) 789 val |= BIT_ULL(5); 790 791 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); 792 /* Config RSS group offset and sizes */ 793 for (grp = 0; grp < rss_grps; grp++) 794 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 795 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 796 return 0; 797 } 798 799 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 800 struct nix_aq_inst_s *inst) 801 { 802 struct admin_queue *aq = block->aq; 803 struct nix_aq_res_s *result; 804 int timeout = 1000; 805 u64 reg, head; 806 int ret; 807 808 result = (struct nix_aq_res_s *)aq->res->base; 809 810 /* Get current head pointer where to append this instruction */ 811 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 812 head = (reg >> 4) & AQ_PTR_MASK; 813 814 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 815 (void *)inst, aq->inst->entry_sz); 816 memset(result, 0, sizeof(*result)); 817 /* sync into memory */ 818 wmb(); 819 820 /* Ring the doorbell and wait for result */ 821 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 822 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 823 cpu_relax(); 824 udelay(1); 825 timeout--; 826 if (!timeout) 827 return -EBUSY; 828 } 829 830 if (result->compcode != NIX_AQ_COMP_GOOD) { 831 /* TODO: Replace this with some error code */ 832 if (result->compcode == NIX_AQ_COMP_CTX_FAULT || 833 result->compcode == NIX_AQ_COMP_LOCKERR || 834 result->compcode == NIX_AQ_COMP_CTX_POISON) { 835 ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX); 836 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX); 837 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX); 838 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX); 839 if (ret) 840 dev_err(rvu->dev, 841 "%s: Not able to unlock cachelines\n", __func__); 842 } 843 844 return -EBUSY; 845 } 846 847 return 0; 848 } 849 850 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, 851 u16 *smq, u16 *smq_mask) 852 { 853 struct nix_cn10k_aq_enq_req *aq_req; 854 855 if (!is_rvu_otx2(rvu)) { 856 aq_req = (struct nix_cn10k_aq_enq_req *)req; 857 *smq = aq_req->sq.smq; 858 *smq_mask = aq_req->sq_mask.smq; 859 } else { 860 *smq = req->sq.smq; 861 *smq_mask = req->sq_mask.smq; 862 } 863 } 864 865 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 866 struct nix_aq_enq_req *req, 867 struct nix_aq_enq_rsp *rsp) 868 { 869 struct rvu_hwinfo *hw = rvu->hw; 870 u16 pcifunc = req->hdr.pcifunc; 871 int nixlf, blkaddr, rc = 0; 872 struct nix_aq_inst_s inst; 873 struct rvu_block *block; 874 struct admin_queue *aq; 875 struct rvu_pfvf *pfvf; 876 u16 smq, smq_mask; 877 void *ctx, *mask; 878 bool ena; 879 u64 cfg; 880 881 blkaddr = nix_hw->blkaddr; 882 block = &hw->block[blkaddr]; 883 aq = block->aq; 884 if (!aq) { 885 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 886 return NIX_AF_ERR_AQ_ENQUEUE; 887 } 888 889 pfvf = rvu_get_pfvf(rvu, pcifunc); 890 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 891 892 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile 893 * operations done by AF itself. 894 */ 895 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || 896 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { 897 if (!pfvf->nixlf || nixlf < 0) 898 return NIX_AF_ERR_AF_LF_INVALID; 899 } 900 901 switch (req->ctype) { 902 case NIX_AQ_CTYPE_RQ: 903 /* Check if index exceeds max no of queues */ 904 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 905 rc = NIX_AF_ERR_AQ_ENQUEUE; 906 break; 907 case NIX_AQ_CTYPE_SQ: 908 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 909 rc = NIX_AF_ERR_AQ_ENQUEUE; 910 break; 911 case NIX_AQ_CTYPE_CQ: 912 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 913 rc = NIX_AF_ERR_AQ_ENQUEUE; 914 break; 915 case NIX_AQ_CTYPE_RSS: 916 /* Check if RSS is enabled and qidx is within range */ 917 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 918 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 919 (req->qidx >= (256UL << (cfg & 0xF)))) 920 rc = NIX_AF_ERR_AQ_ENQUEUE; 921 break; 922 case NIX_AQ_CTYPE_MCE: 923 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 924 925 /* Check if index exceeds MCE list length */ 926 if (!nix_hw->mcast.mce_ctx || 927 (req->qidx >= (256UL << (cfg & 0xF)))) 928 rc = NIX_AF_ERR_AQ_ENQUEUE; 929 930 /* Adding multicast lists for requests from PF/VFs is not 931 * yet supported, so ignore this. 932 */ 933 if (rsp) 934 rc = NIX_AF_ERR_AQ_ENQUEUE; 935 break; 936 case NIX_AQ_CTYPE_BANDPROF: 937 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, 938 nix_hw, pcifunc)) 939 rc = NIX_AF_ERR_INVALID_BANDPROF; 940 break; 941 default: 942 rc = NIX_AF_ERR_AQ_ENQUEUE; 943 } 944 945 if (rc) 946 return rc; 947 948 nix_get_aq_req_smq(rvu, req, &smq, &smq_mask); 949 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 950 if (req->ctype == NIX_AQ_CTYPE_SQ && 951 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 952 (req->op == NIX_AQ_INSTOP_WRITE && 953 req->sq_mask.ena && req->sq.ena && smq_mask))) { 954 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 955 pcifunc, smq)) 956 return NIX_AF_ERR_AQ_ENQUEUE; 957 } 958 959 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 960 inst.lf = nixlf; 961 inst.cindex = req->qidx; 962 inst.ctype = req->ctype; 963 inst.op = req->op; 964 /* Currently we are not supporting enqueuing multiple instructions, 965 * so always choose first entry in result memory. 966 */ 967 inst.res_addr = (u64)aq->res->iova; 968 969 /* Hardware uses same aq->res->base for updating result of 970 * previous instruction hence wait here till it is done. 971 */ 972 spin_lock(&aq->lock); 973 974 /* Clean result + context memory */ 975 memset(aq->res->base, 0, aq->res->entry_sz); 976 /* Context needs to be written at RES_ADDR + 128 */ 977 ctx = aq->res->base + 128; 978 /* Mask needs to be written at RES_ADDR + 256 */ 979 mask = aq->res->base + 256; 980 981 switch (req->op) { 982 case NIX_AQ_INSTOP_WRITE: 983 if (req->ctype == NIX_AQ_CTYPE_RQ) 984 memcpy(mask, &req->rq_mask, 985 sizeof(struct nix_rq_ctx_s)); 986 else if (req->ctype == NIX_AQ_CTYPE_SQ) 987 memcpy(mask, &req->sq_mask, 988 sizeof(struct nix_sq_ctx_s)); 989 else if (req->ctype == NIX_AQ_CTYPE_CQ) 990 memcpy(mask, &req->cq_mask, 991 sizeof(struct nix_cq_ctx_s)); 992 else if (req->ctype == NIX_AQ_CTYPE_RSS) 993 memcpy(mask, &req->rss_mask, 994 sizeof(struct nix_rsse_s)); 995 else if (req->ctype == NIX_AQ_CTYPE_MCE) 996 memcpy(mask, &req->mce_mask, 997 sizeof(struct nix_rx_mce_s)); 998 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 999 memcpy(mask, &req->prof_mask, 1000 sizeof(struct nix_bandprof_s)); 1001 fallthrough; 1002 case NIX_AQ_INSTOP_INIT: 1003 if (req->ctype == NIX_AQ_CTYPE_RQ) 1004 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 1005 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1006 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 1007 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1008 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 1009 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1010 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 1011 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1012 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 1013 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1014 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); 1015 break; 1016 case NIX_AQ_INSTOP_NOP: 1017 case NIX_AQ_INSTOP_READ: 1018 case NIX_AQ_INSTOP_LOCK: 1019 case NIX_AQ_INSTOP_UNLOCK: 1020 break; 1021 default: 1022 rc = NIX_AF_ERR_AQ_ENQUEUE; 1023 spin_unlock(&aq->lock); 1024 return rc; 1025 } 1026 1027 /* Submit the instruction to AQ */ 1028 rc = nix_aq_enqueue_wait(rvu, block, &inst); 1029 if (rc) { 1030 spin_unlock(&aq->lock); 1031 return rc; 1032 } 1033 1034 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 1035 if (req->op == NIX_AQ_INSTOP_INIT) { 1036 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 1037 __set_bit(req->qidx, pfvf->rq_bmap); 1038 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 1039 __set_bit(req->qidx, pfvf->sq_bmap); 1040 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 1041 __set_bit(req->qidx, pfvf->cq_bmap); 1042 } 1043 1044 if (req->op == NIX_AQ_INSTOP_WRITE) { 1045 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1046 ena = (req->rq.ena & req->rq_mask.ena) | 1047 (test_bit(req->qidx, pfvf->rq_bmap) & 1048 ~req->rq_mask.ena); 1049 if (ena) 1050 __set_bit(req->qidx, pfvf->rq_bmap); 1051 else 1052 __clear_bit(req->qidx, pfvf->rq_bmap); 1053 } 1054 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1055 ena = (req->rq.ena & req->sq_mask.ena) | 1056 (test_bit(req->qidx, pfvf->sq_bmap) & 1057 ~req->sq_mask.ena); 1058 if (ena) 1059 __set_bit(req->qidx, pfvf->sq_bmap); 1060 else 1061 __clear_bit(req->qidx, pfvf->sq_bmap); 1062 } 1063 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1064 ena = (req->rq.ena & req->cq_mask.ena) | 1065 (test_bit(req->qidx, pfvf->cq_bmap) & 1066 ~req->cq_mask.ena); 1067 if (ena) 1068 __set_bit(req->qidx, pfvf->cq_bmap); 1069 else 1070 __clear_bit(req->qidx, pfvf->cq_bmap); 1071 } 1072 } 1073 1074 if (rsp) { 1075 /* Copy read context into mailbox */ 1076 if (req->op == NIX_AQ_INSTOP_READ) { 1077 if (req->ctype == NIX_AQ_CTYPE_RQ) 1078 memcpy(&rsp->rq, ctx, 1079 sizeof(struct nix_rq_ctx_s)); 1080 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1081 memcpy(&rsp->sq, ctx, 1082 sizeof(struct nix_sq_ctx_s)); 1083 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1084 memcpy(&rsp->cq, ctx, 1085 sizeof(struct nix_cq_ctx_s)); 1086 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1087 memcpy(&rsp->rss, ctx, 1088 sizeof(struct nix_rsse_s)); 1089 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1090 memcpy(&rsp->mce, ctx, 1091 sizeof(struct nix_rx_mce_s)); 1092 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1093 memcpy(&rsp->prof, ctx, 1094 sizeof(struct nix_bandprof_s)); 1095 } 1096 } 1097 1098 spin_unlock(&aq->lock); 1099 return 0; 1100 } 1101 1102 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, 1103 struct nix_aq_enq_req *req, u8 ctype) 1104 { 1105 struct nix_cn10k_aq_enq_req aq_req; 1106 struct nix_cn10k_aq_enq_rsp aq_rsp; 1107 int rc, word; 1108 1109 if (req->ctype != NIX_AQ_CTYPE_CQ) 1110 return 0; 1111 1112 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 1113 req->hdr.pcifunc, ctype, req->qidx); 1114 if (rc) { 1115 dev_err(rvu->dev, 1116 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", 1117 __func__, nix_get_ctx_name(ctype), req->qidx, 1118 req->hdr.pcifunc); 1119 return rc; 1120 } 1121 1122 /* Make copy of original context & mask which are required 1123 * for resubmission 1124 */ 1125 memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); 1126 memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); 1127 1128 /* exclude fields which HW can update */ 1129 aq_req.cq_mask.cq_err = 0; 1130 aq_req.cq_mask.wrptr = 0; 1131 aq_req.cq_mask.tail = 0; 1132 aq_req.cq_mask.head = 0; 1133 aq_req.cq_mask.avg_level = 0; 1134 aq_req.cq_mask.update_time = 0; 1135 aq_req.cq_mask.substream = 0; 1136 1137 /* Context mask (cq_mask) holds mask value of fields which 1138 * are changed in AQ WRITE operation. 1139 * for example cq.drop = 0xa; 1140 * cq_mask.drop = 0xff; 1141 * Below logic performs '&' between cq and cq_mask so that non 1142 * updated fields are masked out for request and response 1143 * comparison 1144 */ 1145 for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); 1146 word++) { 1147 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= 1148 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1149 *(u64 *)((u8 *)&aq_req.cq + word * 8) &= 1150 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1151 } 1152 1153 if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) 1154 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; 1155 1156 return 0; 1157 } 1158 1159 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 1160 struct nix_aq_enq_rsp *rsp) 1161 { 1162 struct nix_hw *nix_hw; 1163 int err, retries = 5; 1164 int blkaddr; 1165 1166 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 1167 if (blkaddr < 0) 1168 return NIX_AF_ERR_AF_LF_INVALID; 1169 1170 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1171 if (!nix_hw) 1172 return NIX_AF_ERR_INVALID_NIXBLK; 1173 1174 retry: 1175 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 1176 1177 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' 1178 * As a work around perfrom CQ context read after each AQ write. If AQ 1179 * read shows AQ write is not updated perform AQ write again. 1180 */ 1181 if (!err && req->op == NIX_AQ_INSTOP_WRITE) { 1182 err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); 1183 if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { 1184 if (retries--) 1185 goto retry; 1186 else 1187 return NIX_AF_ERR_CQ_CTX_WRITE_ERR; 1188 } 1189 } 1190 1191 return err; 1192 } 1193 1194 static const char *nix_get_ctx_name(int ctype) 1195 { 1196 switch (ctype) { 1197 case NIX_AQ_CTYPE_CQ: 1198 return "CQ"; 1199 case NIX_AQ_CTYPE_SQ: 1200 return "SQ"; 1201 case NIX_AQ_CTYPE_RQ: 1202 return "RQ"; 1203 case NIX_AQ_CTYPE_RSS: 1204 return "RSS"; 1205 } 1206 return ""; 1207 } 1208 1209 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 1210 { 1211 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1212 struct nix_aq_enq_req aq_req; 1213 unsigned long *bmap; 1214 int qidx, q_cnt = 0; 1215 int err = 0, rc; 1216 1217 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 1218 return NIX_AF_ERR_AQ_ENQUEUE; 1219 1220 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 1221 aq_req.hdr.pcifunc = req->hdr.pcifunc; 1222 1223 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1224 aq_req.cq.ena = 0; 1225 aq_req.cq_mask.ena = 1; 1226 aq_req.cq.bp_ena = 0; 1227 aq_req.cq_mask.bp_ena = 1; 1228 q_cnt = pfvf->cq_ctx->qsize; 1229 bmap = pfvf->cq_bmap; 1230 } 1231 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1232 aq_req.sq.ena = 0; 1233 aq_req.sq_mask.ena = 1; 1234 q_cnt = pfvf->sq_ctx->qsize; 1235 bmap = pfvf->sq_bmap; 1236 } 1237 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1238 aq_req.rq.ena = 0; 1239 aq_req.rq_mask.ena = 1; 1240 q_cnt = pfvf->rq_ctx->qsize; 1241 bmap = pfvf->rq_bmap; 1242 } 1243 1244 aq_req.ctype = req->ctype; 1245 aq_req.op = NIX_AQ_INSTOP_WRITE; 1246 1247 for (qidx = 0; qidx < q_cnt; qidx++) { 1248 if (!test_bit(qidx, bmap)) 1249 continue; 1250 aq_req.qidx = qidx; 1251 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 1252 if (rc) { 1253 err = rc; 1254 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 1255 nix_get_ctx_name(req->ctype), qidx); 1256 } 1257 } 1258 1259 return err; 1260 } 1261 1262 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 1263 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 1264 { 1265 struct nix_aq_enq_req lock_ctx_req; 1266 int err; 1267 1268 if (req->op != NIX_AQ_INSTOP_INIT) 1269 return 0; 1270 1271 if (req->ctype == NIX_AQ_CTYPE_MCE || 1272 req->ctype == NIX_AQ_CTYPE_DYNO) 1273 return 0; 1274 1275 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 1276 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 1277 lock_ctx_req.ctype = req->ctype; 1278 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 1279 lock_ctx_req.qidx = req->qidx; 1280 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 1281 if (err) 1282 dev_err(rvu->dev, 1283 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 1284 req->hdr.pcifunc, 1285 nix_get_ctx_name(req->ctype), req->qidx); 1286 return err; 1287 } 1288 1289 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1290 struct nix_aq_enq_req *req, 1291 struct nix_aq_enq_rsp *rsp) 1292 { 1293 int err; 1294 1295 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 1296 if (!err) 1297 err = nix_lf_hwctx_lockdown(rvu, req); 1298 return err; 1299 } 1300 #else 1301 1302 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1303 struct nix_aq_enq_req *req, 1304 struct nix_aq_enq_rsp *rsp) 1305 { 1306 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1307 } 1308 #endif 1309 /* CN10K mbox handler */ 1310 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1311 struct nix_cn10k_aq_enq_req *req, 1312 struct nix_cn10k_aq_enq_rsp *rsp) 1313 { 1314 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1315 (struct nix_aq_enq_rsp *)rsp); 1316 } 1317 1318 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1319 struct hwctx_disable_req *req, 1320 struct msg_rsp *rsp) 1321 { 1322 return nix_lf_hwctx_disable(rvu, req); 1323 } 1324 1325 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1326 struct nix_lf_alloc_req *req, 1327 struct nix_lf_alloc_rsp *rsp) 1328 { 1329 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1330 struct rvu_hwinfo *hw = rvu->hw; 1331 u16 pcifunc = req->hdr.pcifunc; 1332 struct rvu_block *block; 1333 struct rvu_pfvf *pfvf; 1334 u64 cfg, ctx_cfg; 1335 int blkaddr; 1336 1337 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1338 return NIX_AF_ERR_PARAM; 1339 1340 if (req->way_mask) 1341 req->way_mask &= 0xFFFF; 1342 1343 pfvf = rvu_get_pfvf(rvu, pcifunc); 1344 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1345 if (!pfvf->nixlf || blkaddr < 0) 1346 return NIX_AF_ERR_AF_LF_INVALID; 1347 1348 block = &hw->block[blkaddr]; 1349 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1350 if (nixlf < 0) 1351 return NIX_AF_ERR_AF_LF_INVALID; 1352 1353 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1354 if (req->npa_func) { 1355 /* If default, use 'this' NIXLF's PFFUNC */ 1356 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1357 req->npa_func = pcifunc; 1358 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1359 return NIX_AF_INVAL_NPA_PF_FUNC; 1360 } 1361 1362 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1363 if (req->sso_func) { 1364 /* If default, use 'this' NIXLF's PFFUNC */ 1365 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1366 req->sso_func = pcifunc; 1367 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1368 return NIX_AF_INVAL_SSO_PF_FUNC; 1369 } 1370 1371 /* If RSS is being enabled, check if requested config is valid. 1372 * RSS table size should be power of two, otherwise 1373 * RSS_GRP::OFFSET + adder might go beyond that group or 1374 * won't be able to use entire table. 1375 */ 1376 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1377 !is_power_of_2(req->rss_sz))) 1378 return NIX_AF_ERR_RSS_SIZE_INVALID; 1379 1380 if (req->rss_sz && 1381 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1382 return NIX_AF_ERR_RSS_GRPS_INVALID; 1383 1384 /* Reset this NIX LF */ 1385 err = rvu_lf_reset(rvu, block, nixlf); 1386 if (err) { 1387 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1388 block->addr - BLKADDR_NIX0, nixlf); 1389 return NIX_AF_ERR_LF_RESET; 1390 } 1391 1392 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1393 1394 /* Alloc NIX RQ HW context memory and config the base */ 1395 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1396 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1397 if (err) 1398 goto free_mem; 1399 1400 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1401 if (!pfvf->rq_bmap) 1402 goto free_mem; 1403 1404 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1405 (u64)pfvf->rq_ctx->iova); 1406 1407 /* Set caching and queue count in HW */ 1408 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1409 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1410 1411 /* Alloc NIX SQ HW context memory and config the base */ 1412 hwctx_size = 1UL << (ctx_cfg & 0xF); 1413 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1414 if (err) 1415 goto free_mem; 1416 1417 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1418 if (!pfvf->sq_bmap) 1419 goto free_mem; 1420 1421 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1422 (u64)pfvf->sq_ctx->iova); 1423 1424 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1425 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1426 1427 /* Alloc NIX CQ HW context memory and config the base */ 1428 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1429 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1430 if (err) 1431 goto free_mem; 1432 1433 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1434 if (!pfvf->cq_bmap) 1435 goto free_mem; 1436 1437 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1438 (u64)pfvf->cq_ctx->iova); 1439 1440 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1441 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1442 1443 /* Initialize receive side scaling (RSS) */ 1444 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1445 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1446 req->rss_grps, hwctx_size, req->way_mask, 1447 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); 1448 if (err) 1449 goto free_mem; 1450 1451 /* Alloc memory for CQINT's HW contexts */ 1452 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1453 qints = (cfg >> 24) & 0xFFF; 1454 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1455 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1456 if (err) 1457 goto free_mem; 1458 1459 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1460 (u64)pfvf->cq_ints_ctx->iova); 1461 1462 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1463 BIT_ULL(36) | req->way_mask << 20); 1464 1465 /* Alloc memory for QINT's HW contexts */ 1466 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1467 qints = (cfg >> 12) & 0xFFF; 1468 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1469 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1470 if (err) 1471 goto free_mem; 1472 1473 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1474 (u64)pfvf->nix_qints_ctx->iova); 1475 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1476 BIT_ULL(36) | req->way_mask << 20); 1477 1478 /* Setup VLANX TPID's. 1479 * Use VLAN1 for 802.1Q 1480 * and VLAN0 for 802.1AD. 1481 */ 1482 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1483 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1484 1485 /* Enable LMTST for this NIX LF */ 1486 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1487 1488 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1489 if (req->npa_func) 1490 cfg = req->npa_func; 1491 if (req->sso_func) 1492 cfg |= (u64)req->sso_func << 16; 1493 1494 cfg |= (u64)req->xqe_sz << 33; 1495 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1496 1497 /* Config Rx pkt length, csum checks and apad enable / disable */ 1498 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1499 1500 /* Configure pkind for TX parse config */ 1501 cfg = NPC_TX_DEF_PKIND; 1502 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1503 1504 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1505 if (is_sdp_pfvf(pcifunc)) 1506 intf = NIX_INTF_TYPE_SDP; 1507 1508 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, 1509 !!(req->flags & NIX_LF_LBK_BLK_SEL)); 1510 if (err) 1511 goto free_mem; 1512 1513 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1514 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1515 1516 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1517 rvu_write64(rvu, blkaddr, 1518 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1519 VTAGSIZE_T4 | VTAG_STRIP); 1520 1521 goto exit; 1522 1523 free_mem: 1524 nix_ctx_free(rvu, pfvf); 1525 rc = -ENOMEM; 1526 1527 exit: 1528 /* Set macaddr of this PF/VF */ 1529 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1530 1531 /* set SQB size info */ 1532 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1533 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1534 rsp->rx_chan_base = pfvf->rx_chan_base; 1535 rsp->tx_chan_base = pfvf->tx_chan_base; 1536 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1537 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1538 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1539 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1540 /* Get HW supported stat count */ 1541 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1542 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1543 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1544 /* Get count of CQ IRQs and error IRQs supported per LF */ 1545 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1546 rsp->qints = ((cfg >> 12) & 0xFFF); 1547 rsp->cints = ((cfg >> 24) & 0xFFF); 1548 rsp->cgx_links = hw->cgx_links; 1549 rsp->lbk_links = hw->lbk_links; 1550 rsp->sdp_links = hw->sdp_links; 1551 1552 return rc; 1553 } 1554 1555 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1556 struct msg_rsp *rsp) 1557 { 1558 struct rvu_hwinfo *hw = rvu->hw; 1559 u16 pcifunc = req->hdr.pcifunc; 1560 struct rvu_block *block; 1561 int blkaddr, nixlf, err; 1562 struct rvu_pfvf *pfvf; 1563 1564 pfvf = rvu_get_pfvf(rvu, pcifunc); 1565 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1566 if (!pfvf->nixlf || blkaddr < 0) 1567 return NIX_AF_ERR_AF_LF_INVALID; 1568 1569 block = &hw->block[blkaddr]; 1570 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1571 if (nixlf < 0) 1572 return NIX_AF_ERR_AF_LF_INVALID; 1573 1574 if (req->flags & NIX_LF_DISABLE_FLOWS) 1575 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1576 else 1577 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1578 1579 /* Free any tx vtag def entries used by this NIX LF */ 1580 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1581 nix_free_tx_vtag_entries(rvu, pcifunc); 1582 1583 nix_interface_deinit(rvu, pcifunc, nixlf); 1584 1585 /* Reset this NIX LF */ 1586 err = rvu_lf_reset(rvu, block, nixlf); 1587 if (err) { 1588 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1589 block->addr - BLKADDR_NIX0, nixlf); 1590 return NIX_AF_ERR_LF_RESET; 1591 } 1592 1593 nix_ctx_free(rvu, pfvf); 1594 1595 return 0; 1596 } 1597 1598 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1599 struct nix_mark_format_cfg *req, 1600 struct nix_mark_format_cfg_rsp *rsp) 1601 { 1602 u16 pcifunc = req->hdr.pcifunc; 1603 struct nix_hw *nix_hw; 1604 struct rvu_pfvf *pfvf; 1605 int blkaddr, rc; 1606 u32 cfg; 1607 1608 pfvf = rvu_get_pfvf(rvu, pcifunc); 1609 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1610 if (!pfvf->nixlf || blkaddr < 0) 1611 return NIX_AF_ERR_AF_LF_INVALID; 1612 1613 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1614 if (!nix_hw) 1615 return NIX_AF_ERR_INVALID_NIXBLK; 1616 1617 cfg = (((u32)req->offset & 0x7) << 16) | 1618 (((u32)req->y_mask & 0xF) << 12) | 1619 (((u32)req->y_val & 0xF) << 8) | 1620 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1621 1622 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1623 if (rc < 0) { 1624 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1625 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1626 return NIX_AF_ERR_MARK_CFG_FAIL; 1627 } 1628 1629 rsp->mark_format_idx = rc; 1630 return 0; 1631 } 1632 1633 /* Handle shaper update specially for few revisions */ 1634 static bool 1635 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, 1636 int lvl, u64 reg, u64 regval) 1637 { 1638 u64 regbase, oldval, sw_xoff = 0; 1639 u64 dbgval, md_debug0 = 0; 1640 unsigned long poll_tmo; 1641 bool rate_reg = 0; 1642 u32 schq; 1643 1644 regbase = reg & 0xFFFF; 1645 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1646 1647 /* Check for rate register */ 1648 switch (lvl) { 1649 case NIX_TXSCH_LVL_TL1: 1650 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); 1651 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); 1652 1653 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); 1654 break; 1655 case NIX_TXSCH_LVL_TL2: 1656 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); 1657 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); 1658 1659 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || 1660 regbase == NIX_AF_TL2X_PIR(0)); 1661 break; 1662 case NIX_TXSCH_LVL_TL3: 1663 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); 1664 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); 1665 1666 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || 1667 regbase == NIX_AF_TL3X_PIR(0)); 1668 break; 1669 case NIX_TXSCH_LVL_TL4: 1670 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); 1671 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); 1672 1673 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || 1674 regbase == NIX_AF_TL4X_PIR(0)); 1675 break; 1676 case NIX_TXSCH_LVL_MDQ: 1677 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); 1678 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || 1679 regbase == NIX_AF_MDQX_PIR(0)); 1680 break; 1681 } 1682 1683 if (!rate_reg) 1684 return false; 1685 1686 /* Nothing special to do when state is not toggled */ 1687 oldval = rvu_read64(rvu, blkaddr, reg); 1688 if ((oldval & 0x1) == (regval & 0x1)) { 1689 rvu_write64(rvu, blkaddr, reg, regval); 1690 return true; 1691 } 1692 1693 /* PIR/CIR disable */ 1694 if (!(regval & 0x1)) { 1695 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1696 rvu_write64(rvu, blkaddr, reg, 0); 1697 udelay(4); 1698 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1699 return true; 1700 } 1701 1702 /* PIR/CIR enable */ 1703 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1704 if (md_debug0) { 1705 poll_tmo = jiffies + usecs_to_jiffies(10000); 1706 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ 1707 do { 1708 if (time_after(jiffies, poll_tmo)) { 1709 dev_err(rvu->dev, 1710 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", 1711 nixlf, schq, lvl); 1712 goto exit; 1713 } 1714 usleep_range(1, 5); 1715 dbgval = rvu_read64(rvu, blkaddr, md_debug0); 1716 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); 1717 } 1718 rvu_write64(rvu, blkaddr, reg, regval); 1719 exit: 1720 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1721 return true; 1722 } 1723 1724 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr, 1725 int lvl, int schq) 1726 { 1727 u64 tlx_parent = 0, tlx_schedule = 0; 1728 1729 switch (lvl) { 1730 case NIX_TXSCH_LVL_TL2: 1731 tlx_parent = NIX_AF_TL2X_PARENT(schq); 1732 tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq); 1733 break; 1734 case NIX_TXSCH_LVL_TL3: 1735 tlx_parent = NIX_AF_TL3X_PARENT(schq); 1736 tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq); 1737 break; 1738 case NIX_TXSCH_LVL_TL4: 1739 tlx_parent = NIX_AF_TL4X_PARENT(schq); 1740 tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq); 1741 break; 1742 case NIX_TXSCH_LVL_MDQ: 1743 /* no need to reset SMQ_CFG as HW clears this CSR 1744 * on SMQ flush 1745 */ 1746 tlx_parent = NIX_AF_MDQX_PARENT(schq); 1747 tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq); 1748 break; 1749 default: 1750 return; 1751 } 1752 1753 if (tlx_parent) 1754 rvu_write64(rvu, blkaddr, tlx_parent, 0x0); 1755 1756 if (tlx_schedule) 1757 rvu_write64(rvu, blkaddr, tlx_schedule, 0x0); 1758 } 1759 1760 /* Disable shaping of pkts by a scheduler queue 1761 * at a given scheduler level. 1762 */ 1763 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1764 int nixlf, int lvl, int schq) 1765 { 1766 struct rvu_hwinfo *hw = rvu->hw; 1767 u64 cir_reg = 0, pir_reg = 0; 1768 u64 cfg; 1769 1770 switch (lvl) { 1771 case NIX_TXSCH_LVL_TL1: 1772 cir_reg = NIX_AF_TL1X_CIR(schq); 1773 pir_reg = 0; /* PIR not available at TL1 */ 1774 break; 1775 case NIX_TXSCH_LVL_TL2: 1776 cir_reg = NIX_AF_TL2X_CIR(schq); 1777 pir_reg = NIX_AF_TL2X_PIR(schq); 1778 break; 1779 case NIX_TXSCH_LVL_TL3: 1780 cir_reg = NIX_AF_TL3X_CIR(schq); 1781 pir_reg = NIX_AF_TL3X_PIR(schq); 1782 break; 1783 case NIX_TXSCH_LVL_TL4: 1784 cir_reg = NIX_AF_TL4X_CIR(schq); 1785 pir_reg = NIX_AF_TL4X_PIR(schq); 1786 break; 1787 case NIX_TXSCH_LVL_MDQ: 1788 cir_reg = NIX_AF_MDQX_CIR(schq); 1789 pir_reg = NIX_AF_MDQX_PIR(schq); 1790 break; 1791 } 1792 1793 /* Shaper state toggle needs wait/poll */ 1794 if (hw->cap.nix_shaper_toggle_wait) { 1795 if (cir_reg) 1796 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1797 lvl, cir_reg, 0); 1798 if (pir_reg) 1799 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1800 lvl, pir_reg, 0); 1801 return; 1802 } 1803 1804 if (!cir_reg) 1805 return; 1806 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1807 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1808 1809 if (!pir_reg) 1810 return; 1811 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1812 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1813 } 1814 1815 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1816 int lvl, int schq) 1817 { 1818 struct rvu_hwinfo *hw = rvu->hw; 1819 int link_level; 1820 int link; 1821 1822 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1823 return; 1824 1825 /* Reset TL4's SDP link config */ 1826 if (lvl == NIX_TXSCH_LVL_TL4) 1827 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1828 1829 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1830 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1831 if (lvl != link_level) 1832 return; 1833 1834 /* Reset TL2's CGX or LBK link config */ 1835 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1836 rvu_write64(rvu, blkaddr, 1837 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1838 } 1839 1840 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, 1841 int lvl, int schq) 1842 { 1843 struct rvu_hwinfo *hw = rvu->hw; 1844 u64 reg; 1845 1846 /* Skip this if shaping is not supported */ 1847 if (!hw->cap.nix_shaping) 1848 return; 1849 1850 /* Clear level specific SW_XOFF */ 1851 switch (lvl) { 1852 case NIX_TXSCH_LVL_TL1: 1853 reg = NIX_AF_TL1X_SW_XOFF(schq); 1854 break; 1855 case NIX_TXSCH_LVL_TL2: 1856 reg = NIX_AF_TL2X_SW_XOFF(schq); 1857 break; 1858 case NIX_TXSCH_LVL_TL3: 1859 reg = NIX_AF_TL3X_SW_XOFF(schq); 1860 break; 1861 case NIX_TXSCH_LVL_TL4: 1862 reg = NIX_AF_TL4X_SW_XOFF(schq); 1863 break; 1864 case NIX_TXSCH_LVL_MDQ: 1865 reg = NIX_AF_MDQX_SW_XOFF(schq); 1866 break; 1867 default: 1868 return; 1869 } 1870 1871 rvu_write64(rvu, blkaddr, reg, 0x0); 1872 } 1873 1874 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1875 { 1876 struct rvu_hwinfo *hw = rvu->hw; 1877 int pf = rvu_get_pf(pcifunc); 1878 u8 cgx_id = 0, lmac_id = 0; 1879 1880 if (is_afvf(pcifunc)) {/* LBK links */ 1881 return hw->cgx_links; 1882 } else if (is_pf_cgxmapped(rvu, pf)) { 1883 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1884 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1885 } 1886 1887 /* SDP link */ 1888 return hw->cgx_links + hw->lbk_links; 1889 } 1890 1891 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 1892 int link, int *start, int *end) 1893 { 1894 struct rvu_hwinfo *hw = rvu->hw; 1895 int pf = rvu_get_pf(pcifunc); 1896 1897 if (is_afvf(pcifunc)) { /* LBK links */ 1898 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1899 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 1900 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 1901 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1902 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 1903 } else { /* SDP link */ 1904 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 1905 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 1906 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 1907 } 1908 } 1909 1910 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 1911 struct nix_hw *nix_hw, 1912 struct nix_txsch_alloc_req *req) 1913 { 1914 struct rvu_hwinfo *hw = rvu->hw; 1915 int schq, req_schq, free_cnt; 1916 struct nix_txsch *txsch; 1917 int link, start, end; 1918 1919 txsch = &nix_hw->txsch[lvl]; 1920 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 1921 1922 if (!req_schq) 1923 return 0; 1924 1925 link = nix_get_tx_link(rvu, pcifunc); 1926 1927 /* For traffic aggregating scheduler level, one queue is enough */ 1928 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1929 if (req_schq != 1) 1930 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1931 return 0; 1932 } 1933 1934 /* Get free SCHQ count and check if request can be accomodated */ 1935 if (hw->cap.nix_fixed_txschq_mapping) { 1936 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1937 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 1938 if (end <= txsch->schq.max && schq < end && 1939 !test_bit(schq, txsch->schq.bmap)) 1940 free_cnt = 1; 1941 else 1942 free_cnt = 0; 1943 } else { 1944 free_cnt = rvu_rsrc_free_count(&txsch->schq); 1945 } 1946 1947 if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || 1948 req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) 1949 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1950 1951 /* If contiguous queues are needed, check for availability */ 1952 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 1953 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 1954 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1955 1956 return 0; 1957 } 1958 1959 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 1960 struct nix_txsch_alloc_rsp *rsp, 1961 int lvl, int start, int end) 1962 { 1963 struct rvu_hwinfo *hw = rvu->hw; 1964 u16 pcifunc = rsp->hdr.pcifunc; 1965 int idx, schq; 1966 1967 /* For traffic aggregating levels, queue alloc is based 1968 * on transmit link to which PF_FUNC is mapped to. 1969 */ 1970 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1971 /* A single TL queue is allocated */ 1972 if (rsp->schq_contig[lvl]) { 1973 rsp->schq_contig[lvl] = 1; 1974 rsp->schq_contig_list[lvl][0] = start; 1975 } 1976 1977 /* Both contig and non-contig reqs doesn't make sense here */ 1978 if (rsp->schq_contig[lvl]) 1979 rsp->schq[lvl] = 0; 1980 1981 if (rsp->schq[lvl]) { 1982 rsp->schq[lvl] = 1; 1983 rsp->schq_list[lvl][0] = start; 1984 } 1985 return; 1986 } 1987 1988 /* Adjust the queue request count if HW supports 1989 * only one queue per level configuration. 1990 */ 1991 if (hw->cap.nix_fixed_txschq_mapping) { 1992 idx = pcifunc & RVU_PFVF_FUNC_MASK; 1993 schq = start + idx; 1994 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 1995 rsp->schq_contig[lvl] = 0; 1996 rsp->schq[lvl] = 0; 1997 return; 1998 } 1999 2000 if (rsp->schq_contig[lvl]) { 2001 rsp->schq_contig[lvl] = 1; 2002 set_bit(schq, txsch->schq.bmap); 2003 rsp->schq_contig_list[lvl][0] = schq; 2004 rsp->schq[lvl] = 0; 2005 } else if (rsp->schq[lvl]) { 2006 rsp->schq[lvl] = 1; 2007 set_bit(schq, txsch->schq.bmap); 2008 rsp->schq_list[lvl][0] = schq; 2009 } 2010 return; 2011 } 2012 2013 /* Allocate contiguous queue indices requesty first */ 2014 if (rsp->schq_contig[lvl]) { 2015 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 2016 txsch->schq.max, start, 2017 rsp->schq_contig[lvl], 0); 2018 if (schq >= end) 2019 rsp->schq_contig[lvl] = 0; 2020 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 2021 set_bit(schq, txsch->schq.bmap); 2022 rsp->schq_contig_list[lvl][idx] = schq; 2023 schq++; 2024 } 2025 } 2026 2027 /* Allocate non-contiguous queue indices */ 2028 if (rsp->schq[lvl]) { 2029 idx = 0; 2030 for (schq = start; schq < end; schq++) { 2031 if (!test_bit(schq, txsch->schq.bmap)) { 2032 set_bit(schq, txsch->schq.bmap); 2033 rsp->schq_list[lvl][idx++] = schq; 2034 } 2035 if (idx == rsp->schq[lvl]) 2036 break; 2037 } 2038 /* Update how many were allocated */ 2039 rsp->schq[lvl] = idx; 2040 } 2041 } 2042 2043 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 2044 struct nix_txsch_alloc_req *req, 2045 struct nix_txsch_alloc_rsp *rsp) 2046 { 2047 struct rvu_hwinfo *hw = rvu->hw; 2048 u16 pcifunc = req->hdr.pcifunc; 2049 int link, blkaddr, rc = 0; 2050 int lvl, idx, start, end; 2051 struct nix_txsch *txsch; 2052 struct nix_hw *nix_hw; 2053 u32 *pfvf_map; 2054 int nixlf; 2055 u16 schq; 2056 2057 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2058 if (rc) 2059 return rc; 2060 2061 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2062 if (!nix_hw) 2063 return NIX_AF_ERR_INVALID_NIXBLK; 2064 2065 mutex_lock(&rvu->rsrc_lock); 2066 2067 /* Check if request is valid as per HW capabilities 2068 * and can be accomodated. 2069 */ 2070 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2071 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 2072 if (rc) 2073 goto err; 2074 } 2075 2076 /* Allocate requested Tx scheduler queues */ 2077 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2078 txsch = &nix_hw->txsch[lvl]; 2079 pfvf_map = txsch->pfvf_map; 2080 2081 if (!req->schq[lvl] && !req->schq_contig[lvl]) 2082 continue; 2083 2084 rsp->schq[lvl] = req->schq[lvl]; 2085 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 2086 2087 link = nix_get_tx_link(rvu, pcifunc); 2088 2089 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2090 start = link; 2091 end = link; 2092 } else if (hw->cap.nix_fixed_txschq_mapping) { 2093 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 2094 } else { 2095 start = 0; 2096 end = txsch->schq.max; 2097 } 2098 2099 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 2100 2101 /* Reset queue config */ 2102 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 2103 schq = rsp->schq_contig_list[lvl][idx]; 2104 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2105 NIX_TXSCHQ_CFG_DONE)) 2106 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2107 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2108 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2109 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2110 } 2111 2112 for (idx = 0; idx < req->schq[lvl]; idx++) { 2113 schq = rsp->schq_list[lvl][idx]; 2114 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2115 NIX_TXSCHQ_CFG_DONE)) 2116 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2117 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2118 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2119 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2120 } 2121 } 2122 2123 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 2124 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 2125 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 2126 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 2127 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 2128 goto exit; 2129 err: 2130 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 2131 exit: 2132 mutex_unlock(&rvu->rsrc_lock); 2133 return rc; 2134 } 2135 2136 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, 2137 struct nix_smq_flush_ctx *smq_flush_ctx) 2138 { 2139 struct nix_smq_tree_ctx *smq_tree_ctx; 2140 u64 parent_off, regval; 2141 u16 schq; 2142 int lvl; 2143 2144 smq_flush_ctx->smq = smq; 2145 2146 schq = smq; 2147 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2148 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2149 smq_tree_ctx->schq = schq; 2150 if (lvl == NIX_TXSCH_LVL_TL1) { 2151 smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); 2152 smq_tree_ctx->pir_off = 0; 2153 smq_tree_ctx->pir_val = 0; 2154 parent_off = 0; 2155 } else if (lvl == NIX_TXSCH_LVL_TL2) { 2156 smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); 2157 smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); 2158 parent_off = NIX_AF_TL2X_PARENT(schq); 2159 } else if (lvl == NIX_TXSCH_LVL_TL3) { 2160 smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq); 2161 smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq); 2162 parent_off = NIX_AF_TL3X_PARENT(schq); 2163 } else if (lvl == NIX_TXSCH_LVL_TL4) { 2164 smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq); 2165 smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq); 2166 parent_off = NIX_AF_TL4X_PARENT(schq); 2167 } else if (lvl == NIX_TXSCH_LVL_MDQ) { 2168 smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq); 2169 smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq); 2170 parent_off = NIX_AF_MDQX_PARENT(schq); 2171 } 2172 /* save cir/pir register values */ 2173 smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off); 2174 if (smq_tree_ctx->pir_off) 2175 smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off); 2176 2177 /* get parent txsch node */ 2178 if (parent_off) { 2179 regval = rvu_read64(rvu, blkaddr, parent_off); 2180 schq = (regval >> 16) & 0x1FF; 2181 } 2182 } 2183 } 2184 2185 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, 2186 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2187 { 2188 struct nix_txsch *txsch; 2189 struct nix_hw *nix_hw; 2190 int tl2, tl2_schq; 2191 u64 regoff; 2192 2193 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2194 if (!nix_hw) 2195 return; 2196 2197 /* loop through all TL2s with matching PF_FUNC */ 2198 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; 2199 tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq; 2200 for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { 2201 /* skip the smq(flush) TL2 */ 2202 if (tl2 == tl2_schq) 2203 continue; 2204 /* skip unused TL2s */ 2205 if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) 2206 continue; 2207 /* skip if PF_FUNC doesn't match */ 2208 if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != 2209 (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] & 2210 ~RVU_PFVF_FUNC_MASK))) 2211 continue; 2212 /* enable/disable XOFF */ 2213 regoff = NIX_AF_TL2X_SW_XOFF(tl2); 2214 if (enable) 2215 rvu_write64(rvu, blkaddr, regoff, 0x1); 2216 else 2217 rvu_write64(rvu, blkaddr, regoff, 0x0); 2218 } 2219 } 2220 2221 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr, 2222 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2223 { 2224 u64 cir_off, pir_off, cir_val, pir_val; 2225 struct nix_smq_tree_ctx *smq_tree_ctx; 2226 int lvl; 2227 2228 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2229 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2230 cir_off = smq_tree_ctx->cir_off; 2231 cir_val = smq_tree_ctx->cir_val; 2232 pir_off = smq_tree_ctx->pir_off; 2233 pir_val = smq_tree_ctx->pir_val; 2234 2235 if (enable) { 2236 rvu_write64(rvu, blkaddr, cir_off, cir_val); 2237 if (lvl != NIX_TXSCH_LVL_TL1) 2238 rvu_write64(rvu, blkaddr, pir_off, pir_val); 2239 } else { 2240 rvu_write64(rvu, blkaddr, cir_off, 0x0); 2241 if (lvl != NIX_TXSCH_LVL_TL1) 2242 rvu_write64(rvu, blkaddr, pir_off, 0x0); 2243 } 2244 } 2245 } 2246 2247 static int nix_smq_flush(struct rvu *rvu, int blkaddr, 2248 int smq, u16 pcifunc, int nixlf) 2249 { 2250 struct nix_smq_flush_ctx *smq_flush_ctx; 2251 int err, restore_tx_en = 0, i; 2252 int pf = rvu_get_pf(pcifunc); 2253 u8 cgx_id = 0, lmac_id = 0; 2254 u16 tl2_tl3_link_schq; 2255 u8 link, link_level; 2256 u64 cfg, bmap = 0; 2257 2258 if (!is_rvu_otx2(rvu)) { 2259 /* Skip SMQ flush if pkt count is zero */ 2260 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq)); 2261 if (!cfg) 2262 return 0; 2263 } 2264 2265 /* enable cgx tx if disabled */ 2266 if (is_pf_cgxmapped(rvu, pf)) { 2267 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 2268 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), 2269 lmac_id, true); 2270 } 2271 2272 /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */ 2273 smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL); 2274 if (!smq_flush_ctx) 2275 return -ENOMEM; 2276 nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx); 2277 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); 2278 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); 2279 2280 /* Disable backpressure from physical link, 2281 * otherwise SMQ flush may stall. 2282 */ 2283 rvu_cgx_enadis_rx_bp(rvu, pf, false); 2284 2285 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 2286 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 2287 tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq; 2288 link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq; 2289 2290 /* SMQ set enqueue xoff */ 2291 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 2292 cfg |= BIT_ULL(50); 2293 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 2294 2295 /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */ 2296 for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { 2297 cfg = rvu_read64(rvu, blkaddr, 2298 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); 2299 if (!(cfg & BIT_ULL(12))) 2300 continue; 2301 bmap |= BIT_ULL(i); 2302 cfg &= ~BIT_ULL(12); 2303 rvu_write64(rvu, blkaddr, 2304 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); 2305 } 2306 2307 /* Do SMQ flush and set enqueue xoff */ 2308 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 2309 cfg |= BIT_ULL(50) | BIT_ULL(49); 2310 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 2311 2312 /* Wait for flush to complete */ 2313 err = rvu_poll_reg(rvu, blkaddr, 2314 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 2315 if (err) 2316 dev_info(rvu->dev, 2317 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n", 2318 nixlf, smq); 2319 2320 /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */ 2321 for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { 2322 if (!(bmap & BIT_ULL(i))) 2323 continue; 2324 cfg = rvu_read64(rvu, blkaddr, 2325 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); 2326 cfg |= BIT_ULL(12); 2327 rvu_write64(rvu, blkaddr, 2328 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); 2329 } 2330 2331 /* clear XOFF on TL2s */ 2332 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); 2333 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); 2334 kfree(smq_flush_ctx); 2335 2336 rvu_cgx_enadis_rx_bp(rvu, pf, true); 2337 /* restore cgx tx state */ 2338 if (restore_tx_en) 2339 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 2340 return err; 2341 } 2342 2343 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 2344 { 2345 int blkaddr, nixlf, lvl, schq, err; 2346 struct rvu_hwinfo *hw = rvu->hw; 2347 struct nix_txsch *txsch; 2348 struct nix_hw *nix_hw; 2349 u16 map_func; 2350 2351 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2352 if (blkaddr < 0) 2353 return NIX_AF_ERR_AF_LF_INVALID; 2354 2355 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2356 if (!nix_hw) 2357 return NIX_AF_ERR_INVALID_NIXBLK; 2358 2359 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2360 if (nixlf < 0) 2361 return NIX_AF_ERR_AF_LF_INVALID; 2362 2363 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ 2364 mutex_lock(&rvu->rsrc_lock); 2365 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2366 txsch = &nix_hw->txsch[lvl]; 2367 2368 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2369 continue; 2370 2371 for (schq = 0; schq < txsch->schq.max; schq++) { 2372 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2373 continue; 2374 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2375 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2376 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2377 } 2378 } 2379 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, 2380 nix_get_tx_link(rvu, pcifunc)); 2381 2382 /* On PF cleanup, clear cfg done flag as 2383 * PF would have changed default config. 2384 */ 2385 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 2386 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; 2387 schq = nix_get_tx_link(rvu, pcifunc); 2388 /* Do not clear pcifunc in txsch->pfvf_map[schq] because 2389 * VF might be using this TL1 queue 2390 */ 2391 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 2392 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); 2393 } 2394 2395 /* Flush SMQs */ 2396 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 2397 for (schq = 0; schq < txsch->schq.max; schq++) { 2398 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2399 continue; 2400 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2401 } 2402 2403 /* Now free scheduler queues to free pool */ 2404 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2405 /* TLs above aggregation level are shared across all PF 2406 * and it's VFs, hence skip freeing them. 2407 */ 2408 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2409 continue; 2410 2411 txsch = &nix_hw->txsch[lvl]; 2412 for (schq = 0; schq < txsch->schq.max; schq++) { 2413 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2414 continue; 2415 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2416 rvu_free_rsrc(&txsch->schq, schq); 2417 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2418 } 2419 } 2420 mutex_unlock(&rvu->rsrc_lock); 2421 2422 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ 2423 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); 2424 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); 2425 if (err) 2426 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 2427 2428 return 0; 2429 } 2430 2431 static int nix_txschq_free_one(struct rvu *rvu, 2432 struct nix_txsch_free_req *req) 2433 { 2434 struct rvu_hwinfo *hw = rvu->hw; 2435 u16 pcifunc = req->hdr.pcifunc; 2436 int lvl, schq, nixlf, blkaddr; 2437 struct nix_txsch *txsch; 2438 struct nix_hw *nix_hw; 2439 u32 *pfvf_map; 2440 int rc; 2441 2442 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2443 if (blkaddr < 0) 2444 return NIX_AF_ERR_AF_LF_INVALID; 2445 2446 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2447 if (!nix_hw) 2448 return NIX_AF_ERR_INVALID_NIXBLK; 2449 2450 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2451 if (nixlf < 0) 2452 return NIX_AF_ERR_AF_LF_INVALID; 2453 2454 lvl = req->schq_lvl; 2455 schq = req->schq; 2456 txsch = &nix_hw->txsch[lvl]; 2457 2458 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 2459 return 0; 2460 2461 pfvf_map = txsch->pfvf_map; 2462 mutex_lock(&rvu->rsrc_lock); 2463 2464 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 2465 rc = NIX_AF_ERR_TLX_INVALID; 2466 goto err; 2467 } 2468 2469 /* Clear SW_XOFF of this resource only. 2470 * For SMQ level, all path XOFF's 2471 * need to be made clear by user 2472 */ 2473 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2474 2475 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2476 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2477 2478 /* Flush if it is a SMQ. Onus of disabling 2479 * TL2/3 queue links before SMQ flush is on user 2480 */ 2481 if (lvl == NIX_TXSCH_LVL_SMQ && 2482 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { 2483 rc = NIX_AF_SMQ_FLUSH_FAILED; 2484 goto err; 2485 } 2486 2487 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2488 2489 /* Free the resource */ 2490 rvu_free_rsrc(&txsch->schq, schq); 2491 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2492 mutex_unlock(&rvu->rsrc_lock); 2493 return 0; 2494 err: 2495 mutex_unlock(&rvu->rsrc_lock); 2496 return rc; 2497 } 2498 2499 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 2500 struct nix_txsch_free_req *req, 2501 struct msg_rsp *rsp) 2502 { 2503 if (req->flags & TXSCHQ_FREE_ALL) 2504 return nix_txschq_free(rvu, req->hdr.pcifunc); 2505 else 2506 return nix_txschq_free_one(rvu, req); 2507 } 2508 2509 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 2510 int lvl, u64 reg, u64 regval) 2511 { 2512 u64 regbase = reg & 0xFFFF; 2513 u16 schq, parent; 2514 2515 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 2516 return false; 2517 2518 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2519 /* Check if this schq belongs to this PF/VF or not */ 2520 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 2521 return false; 2522 2523 parent = (regval >> 16) & 0x1FF; 2524 /* Validate MDQ's TL4 parent */ 2525 if (regbase == NIX_AF_MDQX_PARENT(0) && 2526 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 2527 return false; 2528 2529 /* Validate TL4's TL3 parent */ 2530 if (regbase == NIX_AF_TL4X_PARENT(0) && 2531 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 2532 return false; 2533 2534 /* Validate TL3's TL2 parent */ 2535 if (regbase == NIX_AF_TL3X_PARENT(0) && 2536 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 2537 return false; 2538 2539 /* Validate TL2's TL1 parent */ 2540 if (regbase == NIX_AF_TL2X_PARENT(0) && 2541 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 2542 return false; 2543 2544 return true; 2545 } 2546 2547 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 2548 { 2549 u64 regbase; 2550 2551 if (hw->cap.nix_shaping) 2552 return true; 2553 2554 /* If shaping and coloring is not supported, then 2555 * *_CIR and *_PIR registers should not be configured. 2556 */ 2557 regbase = reg & 0xFFFF; 2558 2559 switch (lvl) { 2560 case NIX_TXSCH_LVL_TL1: 2561 if (regbase == NIX_AF_TL1X_CIR(0)) 2562 return false; 2563 break; 2564 case NIX_TXSCH_LVL_TL2: 2565 if (regbase == NIX_AF_TL2X_CIR(0) || 2566 regbase == NIX_AF_TL2X_PIR(0)) 2567 return false; 2568 break; 2569 case NIX_TXSCH_LVL_TL3: 2570 if (regbase == NIX_AF_TL3X_CIR(0) || 2571 regbase == NIX_AF_TL3X_PIR(0)) 2572 return false; 2573 break; 2574 case NIX_TXSCH_LVL_TL4: 2575 if (regbase == NIX_AF_TL4X_CIR(0) || 2576 regbase == NIX_AF_TL4X_PIR(0)) 2577 return false; 2578 break; 2579 case NIX_TXSCH_LVL_MDQ: 2580 if (regbase == NIX_AF_MDQX_CIR(0) || 2581 regbase == NIX_AF_MDQX_PIR(0)) 2582 return false; 2583 break; 2584 } 2585 return true; 2586 } 2587 2588 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 2589 u16 pcifunc, int blkaddr) 2590 { 2591 u32 *pfvf_map; 2592 int schq; 2593 2594 schq = nix_get_tx_link(rvu, pcifunc); 2595 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 2596 /* Skip if PF has already done the config */ 2597 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 2598 return; 2599 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 2600 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 2601 2602 /* On OcteonTx2 the config was in bytes and newer silcons 2603 * it's changed to weight. 2604 */ 2605 if (!rvu->hw->cap.nix_common_dwrr_mtu) 2606 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2607 TXSCH_TL1_DFLT_RR_QTM); 2608 else 2609 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2610 CN10K_MAX_DWRR_WEIGHT); 2611 2612 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 2613 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 2614 } 2615 2616 /* Register offset - [15:0] 2617 * Scheduler Queue number - [25:16] 2618 */ 2619 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) 2620 2621 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, 2622 int blkaddr, struct nix_txschq_config *req, 2623 struct nix_txschq_config *rsp) 2624 { 2625 u16 pcifunc = req->hdr.pcifunc; 2626 int idx, schq; 2627 u64 reg; 2628 2629 for (idx = 0; idx < req->num_regs; idx++) { 2630 reg = req->reg[idx]; 2631 reg &= NIX_TX_SCHQ_MASK; 2632 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2633 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || 2634 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) 2635 return NIX_AF_INVAL_TXSCHQ_CFG; 2636 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); 2637 } 2638 rsp->lvl = req->lvl; 2639 rsp->num_regs = req->num_regs; 2640 return 0; 2641 } 2642 2643 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, 2644 struct nix_txsch *txsch, bool enable) 2645 { 2646 struct rvu_hwinfo *hw = rvu->hw; 2647 int lbk_link_start, lbk_links; 2648 u8 pf = rvu_get_pf(pcifunc); 2649 int schq; 2650 u64 cfg; 2651 2652 if (!is_pf_cgxmapped(rvu, pf)) 2653 return; 2654 2655 cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; 2656 lbk_link_start = hw->cgx_links; 2657 2658 for (schq = 0; schq < txsch->schq.max; schq++) { 2659 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2660 continue; 2661 /* Enable all LBK links with channel 63 by default so that 2662 * packets can be sent to LBK with a NPC TX MCAM rule 2663 */ 2664 lbk_links = hw->lbk_links; 2665 while (lbk_links--) 2666 rvu_write64(rvu, blkaddr, 2667 NIX_AF_TL3_TL2X_LINKX_CFG(schq, 2668 lbk_link_start + 2669 lbk_links), cfg); 2670 } 2671 } 2672 2673 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 2674 struct nix_txschq_config *req, 2675 struct nix_txschq_config *rsp) 2676 { 2677 u64 reg, val, regval, schq_regbase, val_mask; 2678 struct rvu_hwinfo *hw = rvu->hw; 2679 u16 pcifunc = req->hdr.pcifunc; 2680 struct nix_txsch *txsch; 2681 struct nix_hw *nix_hw; 2682 int blkaddr, idx, err; 2683 int nixlf, schq; 2684 u32 *pfvf_map; 2685 2686 if (req->lvl >= NIX_TXSCH_LVL_CNT || 2687 req->num_regs > MAX_REGS_PER_MBOX_MSG) 2688 return NIX_AF_INVAL_TXSCHQ_CFG; 2689 2690 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2691 if (err) 2692 return err; 2693 2694 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2695 if (!nix_hw) 2696 return NIX_AF_ERR_INVALID_NIXBLK; 2697 2698 if (req->read) 2699 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); 2700 2701 txsch = &nix_hw->txsch[req->lvl]; 2702 pfvf_map = txsch->pfvf_map; 2703 2704 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 2705 pcifunc & RVU_PFVF_FUNC_MASK) { 2706 mutex_lock(&rvu->rsrc_lock); 2707 if (req->lvl == NIX_TXSCH_LVL_TL1) 2708 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 2709 mutex_unlock(&rvu->rsrc_lock); 2710 return 0; 2711 } 2712 2713 for (idx = 0; idx < req->num_regs; idx++) { 2714 reg = req->reg[idx]; 2715 reg &= NIX_TX_SCHQ_MASK; 2716 regval = req->regval[idx]; 2717 schq_regbase = reg & 0xFFFF; 2718 val_mask = req->regval_mask[idx]; 2719 2720 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 2721 txsch->lvl, reg, regval)) 2722 return NIX_AF_INVAL_TXSCHQ_CFG; 2723 2724 /* Check if shaping and coloring is supported */ 2725 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 2726 continue; 2727 2728 val = rvu_read64(rvu, blkaddr, reg); 2729 regval = (val & val_mask) | (regval & ~val_mask); 2730 2731 /* Handle shaping state toggle specially */ 2732 if (hw->cap.nix_shaper_toggle_wait && 2733 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 2734 req->lvl, reg, regval)) 2735 continue; 2736 2737 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 2738 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 2739 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 2740 pcifunc, 0); 2741 regval &= ~(0x7FULL << 24); 2742 regval |= ((u64)nixlf << 24); 2743 } 2744 2745 /* Clear 'BP_ENA' config, if it's not allowed */ 2746 if (!hw->cap.nix_tx_link_bp) { 2747 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 2748 (schq_regbase & 0xFF00) == 2749 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 2750 regval &= ~BIT_ULL(13); 2751 } 2752 2753 /* Mark config as done for TL1 by PF */ 2754 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 2755 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 2756 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2757 mutex_lock(&rvu->rsrc_lock); 2758 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 2759 NIX_TXSCHQ_CFG_DONE); 2760 mutex_unlock(&rvu->rsrc_lock); 2761 } 2762 2763 /* SMQ flush is special hence split register writes such 2764 * that flush first and write rest of the bits later. 2765 */ 2766 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 2767 (regval & BIT_ULL(49))) { 2768 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2769 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2770 regval &= ~BIT_ULL(49); 2771 } 2772 rvu_write64(rvu, blkaddr, reg, regval); 2773 } 2774 2775 return 0; 2776 } 2777 2778 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 2779 struct nix_vtag_config *req) 2780 { 2781 u64 regval = req->vtag_size; 2782 2783 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2784 req->vtag_size > VTAGSIZE_T8) 2785 return -EINVAL; 2786 2787 /* RX VTAG Type 7 reserved for vf vlan */ 2788 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2789 return NIX_AF_ERR_RX_VTAG_INUSE; 2790 2791 if (req->rx.capture_vtag) 2792 regval |= BIT_ULL(5); 2793 if (req->rx.strip_vtag) 2794 regval |= BIT_ULL(4); 2795 2796 rvu_write64(rvu, blkaddr, 2797 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2798 return 0; 2799 } 2800 2801 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2802 u16 pcifunc, int index) 2803 { 2804 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2805 struct nix_txvlan *vlan; 2806 2807 if (!nix_hw) 2808 return NIX_AF_ERR_INVALID_NIXBLK; 2809 2810 vlan = &nix_hw->txvlan; 2811 if (vlan->entry2pfvf_map[index] != pcifunc) 2812 return NIX_AF_ERR_PARAM; 2813 2814 rvu_write64(rvu, blkaddr, 2815 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2816 rvu_write64(rvu, blkaddr, 2817 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2818 2819 vlan->entry2pfvf_map[index] = 0; 2820 rvu_free_rsrc(&vlan->rsrc, index); 2821 2822 return 0; 2823 } 2824 2825 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2826 { 2827 struct nix_txvlan *vlan; 2828 struct nix_hw *nix_hw; 2829 int index, blkaddr; 2830 2831 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2832 if (blkaddr < 0) 2833 return; 2834 2835 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2836 if (!nix_hw) 2837 return; 2838 2839 vlan = &nix_hw->txvlan; 2840 2841 mutex_lock(&vlan->rsrc_lock); 2842 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2843 for (index = 0; index < vlan->rsrc.max; index++) { 2844 if (vlan->entry2pfvf_map[index] == pcifunc) 2845 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2846 } 2847 mutex_unlock(&vlan->rsrc_lock); 2848 } 2849 2850 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2851 u64 vtag, u8 size) 2852 { 2853 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2854 struct nix_txvlan *vlan; 2855 u64 regval; 2856 int index; 2857 2858 if (!nix_hw) 2859 return NIX_AF_ERR_INVALID_NIXBLK; 2860 2861 vlan = &nix_hw->txvlan; 2862 2863 mutex_lock(&vlan->rsrc_lock); 2864 2865 index = rvu_alloc_rsrc(&vlan->rsrc); 2866 if (index < 0) { 2867 mutex_unlock(&vlan->rsrc_lock); 2868 return index; 2869 } 2870 2871 mutex_unlock(&vlan->rsrc_lock); 2872 2873 regval = size ? vtag : vtag << 32; 2874 2875 rvu_write64(rvu, blkaddr, 2876 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 2877 rvu_write64(rvu, blkaddr, 2878 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 2879 2880 return index; 2881 } 2882 2883 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 2884 struct nix_vtag_config *req) 2885 { 2886 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2887 u16 pcifunc = req->hdr.pcifunc; 2888 int idx0 = req->tx.vtag0_idx; 2889 int idx1 = req->tx.vtag1_idx; 2890 struct nix_txvlan *vlan; 2891 int err = 0; 2892 2893 if (!nix_hw) 2894 return NIX_AF_ERR_INVALID_NIXBLK; 2895 2896 vlan = &nix_hw->txvlan; 2897 if (req->tx.free_vtag0 && req->tx.free_vtag1) 2898 if (vlan->entry2pfvf_map[idx0] != pcifunc || 2899 vlan->entry2pfvf_map[idx1] != pcifunc) 2900 return NIX_AF_ERR_PARAM; 2901 2902 mutex_lock(&vlan->rsrc_lock); 2903 2904 if (req->tx.free_vtag0) { 2905 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 2906 if (err) 2907 goto exit; 2908 } 2909 2910 if (req->tx.free_vtag1) 2911 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 2912 2913 exit: 2914 mutex_unlock(&vlan->rsrc_lock); 2915 return err; 2916 } 2917 2918 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 2919 struct nix_vtag_config *req, 2920 struct nix_vtag_config_rsp *rsp) 2921 { 2922 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2923 struct nix_txvlan *vlan; 2924 u16 pcifunc = req->hdr.pcifunc; 2925 2926 if (!nix_hw) 2927 return NIX_AF_ERR_INVALID_NIXBLK; 2928 2929 vlan = &nix_hw->txvlan; 2930 if (req->tx.cfg_vtag0) { 2931 rsp->vtag0_idx = 2932 nix_tx_vtag_alloc(rvu, blkaddr, 2933 req->tx.vtag0, req->vtag_size); 2934 2935 if (rsp->vtag0_idx < 0) 2936 return NIX_AF_ERR_TX_VTAG_NOSPC; 2937 2938 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 2939 } 2940 2941 if (req->tx.cfg_vtag1) { 2942 rsp->vtag1_idx = 2943 nix_tx_vtag_alloc(rvu, blkaddr, 2944 req->tx.vtag1, req->vtag_size); 2945 2946 if (rsp->vtag1_idx < 0) 2947 goto err_free; 2948 2949 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 2950 } 2951 2952 return 0; 2953 2954 err_free: 2955 if (req->tx.cfg_vtag0) 2956 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 2957 2958 return NIX_AF_ERR_TX_VTAG_NOSPC; 2959 } 2960 2961 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 2962 struct nix_vtag_config *req, 2963 struct nix_vtag_config_rsp *rsp) 2964 { 2965 u16 pcifunc = req->hdr.pcifunc; 2966 int blkaddr, nixlf, err; 2967 2968 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2969 if (err) 2970 return err; 2971 2972 if (req->cfg_type) { 2973 /* rx vtag configuration */ 2974 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 2975 if (err) 2976 return NIX_AF_ERR_PARAM; 2977 } else { 2978 /* tx vtag configuration */ 2979 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 2980 (req->tx.free_vtag0 || req->tx.free_vtag1)) 2981 return NIX_AF_ERR_PARAM; 2982 2983 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 2984 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 2985 2986 if (req->tx.free_vtag0 || req->tx.free_vtag1) 2987 return nix_tx_vtag_decfg(rvu, blkaddr, req); 2988 } 2989 2990 return 0; 2991 } 2992 2993 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 2994 int mce, u8 op, u16 pcifunc, int next, bool eol) 2995 { 2996 struct nix_aq_enq_req aq_req; 2997 int err; 2998 2999 aq_req.hdr.pcifunc = 0; 3000 aq_req.ctype = NIX_AQ_CTYPE_MCE; 3001 aq_req.op = op; 3002 aq_req.qidx = mce; 3003 3004 /* Use RSS with RSS index 0 */ 3005 aq_req.mce.op = 1; 3006 aq_req.mce.index = 0; 3007 aq_req.mce.eol = eol; 3008 aq_req.mce.pf_func = pcifunc; 3009 aq_req.mce.next = next; 3010 3011 /* All fields valid */ 3012 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 3013 3014 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 3015 if (err) { 3016 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 3017 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 3018 return err; 3019 } 3020 return 0; 3021 } 3022 3023 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, 3024 u16 pcifunc, bool add) 3025 { 3026 struct mce *mce, *tail = NULL; 3027 bool delete = false; 3028 3029 /* Scan through the current list */ 3030 hlist_for_each_entry(mce, &mce_list->head, node) { 3031 /* If already exists, then delete */ 3032 if (mce->pcifunc == pcifunc && !add) { 3033 delete = true; 3034 break; 3035 } else if (mce->pcifunc == pcifunc && add) { 3036 /* entry already exists */ 3037 return 0; 3038 } 3039 tail = mce; 3040 } 3041 3042 if (delete) { 3043 hlist_del(&mce->node); 3044 kfree(mce); 3045 mce_list->count--; 3046 return 0; 3047 } 3048 3049 if (!add) 3050 return 0; 3051 3052 /* Add a new one to the list, at the tail */ 3053 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 3054 if (!mce) 3055 return -ENOMEM; 3056 mce->pcifunc = pcifunc; 3057 if (!tail) 3058 hlist_add_head(&mce->node, &mce_list->head); 3059 else 3060 hlist_add_behind(&mce->node, &tail->node); 3061 mce_list->count++; 3062 return 0; 3063 } 3064 3065 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, 3066 struct nix_mce_list *mce_list, 3067 int mce_idx, int mcam_index, bool add) 3068 { 3069 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; 3070 struct npc_mcam *mcam = &rvu->hw->mcam; 3071 struct nix_mcast *mcast; 3072 struct nix_hw *nix_hw; 3073 struct mce *mce; 3074 3075 if (!mce_list) 3076 return -EINVAL; 3077 3078 /* Get this PF/VF func's MCE index */ 3079 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 3080 3081 if (idx > (mce_idx + mce_list->max)) { 3082 dev_err(rvu->dev, 3083 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 3084 __func__, idx, mce_list->max, 3085 pcifunc >> RVU_PFVF_PF_SHIFT); 3086 return -EINVAL; 3087 } 3088 3089 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 3090 if (err) 3091 return err; 3092 3093 mcast = &nix_hw->mcast; 3094 mutex_lock(&mcast->mce_lock); 3095 3096 err = nix_update_mce_list_entry(mce_list, pcifunc, add); 3097 if (err) 3098 goto end; 3099 3100 /* Disable MCAM entry in NPC */ 3101 if (!mce_list->count) { 3102 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3103 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); 3104 goto end; 3105 } 3106 3107 /* Dump the updated list to HW */ 3108 idx = mce_idx; 3109 last_idx = idx + mce_list->count - 1; 3110 hlist_for_each_entry(mce, &mce_list->head, node) { 3111 if (idx > last_idx) 3112 break; 3113 3114 next_idx = idx + 1; 3115 /* EOL should be set in last MCE */ 3116 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 3117 mce->pcifunc, next_idx, 3118 (next_idx > last_idx) ? true : false); 3119 if (err) 3120 goto end; 3121 idx++; 3122 } 3123 3124 end: 3125 mutex_unlock(&mcast->mce_lock); 3126 return err; 3127 } 3128 3129 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, 3130 struct nix_mce_list **mce_list, int *mce_idx) 3131 { 3132 struct rvu_hwinfo *hw = rvu->hw; 3133 struct rvu_pfvf *pfvf; 3134 3135 if (!hw->cap.nix_rx_multicast || 3136 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { 3137 *mce_list = NULL; 3138 *mce_idx = 0; 3139 return; 3140 } 3141 3142 /* Get this PF/VF func's MCE index */ 3143 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 3144 3145 if (type == NIXLF_BCAST_ENTRY) { 3146 *mce_list = &pfvf->bcast_mce_list; 3147 *mce_idx = pfvf->bcast_mce_idx; 3148 } else if (type == NIXLF_ALLMULTI_ENTRY) { 3149 *mce_list = &pfvf->mcast_mce_list; 3150 *mce_idx = pfvf->mcast_mce_idx; 3151 } else if (type == NIXLF_PROMISC_ENTRY) { 3152 *mce_list = &pfvf->promisc_mce_list; 3153 *mce_idx = pfvf->promisc_mce_idx; 3154 } else { 3155 *mce_list = NULL; 3156 *mce_idx = 0; 3157 } 3158 } 3159 3160 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 3161 int type, bool add) 3162 { 3163 int err = 0, nixlf, blkaddr, mcam_index, mce_idx; 3164 struct npc_mcam *mcam = &rvu->hw->mcam; 3165 struct rvu_hwinfo *hw = rvu->hw; 3166 struct nix_mce_list *mce_list; 3167 int pf; 3168 3169 /* skip multicast pkt replication for AF's VFs & SDP links */ 3170 if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc)) 3171 return 0; 3172 3173 if (!hw->cap.nix_rx_multicast) 3174 return 0; 3175 3176 pf = rvu_get_pf(pcifunc); 3177 if (!is_pf_cgxmapped(rvu, pf)) 3178 return 0; 3179 3180 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3181 if (blkaddr < 0) 3182 return -EINVAL; 3183 3184 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 3185 if (nixlf < 0) 3186 return -EINVAL; 3187 3188 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 3189 3190 mcam_index = npc_get_nixlf_mcam_index(mcam, 3191 pcifunc & ~RVU_PFVF_FUNC_MASK, 3192 nixlf, type); 3193 err = nix_update_mce_list(rvu, pcifunc, mce_list, 3194 mce_idx, mcam_index, add); 3195 return err; 3196 } 3197 3198 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) 3199 { 3200 struct nix_mcast *mcast = &nix_hw->mcast; 3201 int err, pf, numvfs, idx; 3202 struct rvu_pfvf *pfvf; 3203 u16 pcifunc; 3204 u64 cfg; 3205 3206 /* Skip PF0 (i.e AF) */ 3207 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 3208 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 3209 /* If PF is not enabled, nothing to do */ 3210 if (!((cfg >> 20) & 0x01)) 3211 continue; 3212 /* Get numVFs attached to this PF */ 3213 numvfs = (cfg >> 12) & 0xFF; 3214 3215 pfvf = &rvu->pf[pf]; 3216 3217 /* This NIX0/1 block mapped to PF ? */ 3218 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 3219 continue; 3220 3221 /* save start idx of broadcast mce list */ 3222 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 3223 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 3224 3225 /* save start idx of multicast mce list */ 3226 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 3227 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); 3228 3229 /* save the start idx of promisc mce list */ 3230 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 3231 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); 3232 3233 for (idx = 0; idx < (numvfs + 1); idx++) { 3234 /* idx-0 is for PF, followed by VFs */ 3235 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 3236 pcifunc |= idx; 3237 /* Add dummy entries now, so that we don't have to check 3238 * for whether AQ_OP should be INIT/WRITE later on. 3239 * Will be updated when a NIXLF is attached/detached to 3240 * these PF/VFs. 3241 */ 3242 err = nix_blk_setup_mce(rvu, nix_hw, 3243 pfvf->bcast_mce_idx + idx, 3244 NIX_AQ_INSTOP_INIT, 3245 pcifunc, 0, true); 3246 if (err) 3247 return err; 3248 3249 /* add dummy entries to multicast mce list */ 3250 err = nix_blk_setup_mce(rvu, nix_hw, 3251 pfvf->mcast_mce_idx + idx, 3252 NIX_AQ_INSTOP_INIT, 3253 pcifunc, 0, true); 3254 if (err) 3255 return err; 3256 3257 /* add dummy entries to promisc mce list */ 3258 err = nix_blk_setup_mce(rvu, nix_hw, 3259 pfvf->promisc_mce_idx + idx, 3260 NIX_AQ_INSTOP_INIT, 3261 pcifunc, 0, true); 3262 if (err) 3263 return err; 3264 } 3265 } 3266 return 0; 3267 } 3268 3269 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3270 { 3271 struct nix_mcast *mcast = &nix_hw->mcast; 3272 struct rvu_hwinfo *hw = rvu->hw; 3273 int err, size; 3274 3275 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 3276 size = (1ULL << size); 3277 3278 /* Alloc memory for multicast/mirror replication entries */ 3279 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 3280 (256UL << MC_TBL_SIZE), size); 3281 if (err) 3282 return -ENOMEM; 3283 3284 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 3285 (u64)mcast->mce_ctx->iova); 3286 3287 /* Set max list length equal to max no of VFs per PF + PF itself */ 3288 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 3289 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 3290 3291 /* Alloc memory for multicast replication buffers */ 3292 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 3293 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 3294 (8UL << MC_BUF_CNT), size); 3295 if (err) 3296 return -ENOMEM; 3297 3298 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 3299 (u64)mcast->mcast_buf->iova); 3300 3301 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 3302 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 3303 3304 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 3305 BIT_ULL(63) | (mcast->replay_pkind << 24) | 3306 BIT_ULL(20) | MC_BUF_CNT); 3307 3308 mutex_init(&mcast->mce_lock); 3309 3310 return nix_setup_mce_tables(rvu, nix_hw); 3311 } 3312 3313 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 3314 { 3315 struct nix_txvlan *vlan = &nix_hw->txvlan; 3316 int err; 3317 3318 /* Allocate resource bimap for tx vtag def registers*/ 3319 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 3320 err = rvu_alloc_bitmap(&vlan->rsrc); 3321 if (err) 3322 return -ENOMEM; 3323 3324 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 3325 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 3326 sizeof(u16), GFP_KERNEL); 3327 if (!vlan->entry2pfvf_map) 3328 goto free_mem; 3329 3330 mutex_init(&vlan->rsrc_lock); 3331 return 0; 3332 3333 free_mem: 3334 kfree(vlan->rsrc.bmap); 3335 return -ENOMEM; 3336 } 3337 3338 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3339 { 3340 struct nix_txsch *txsch; 3341 int err, lvl, schq; 3342 u64 cfg, reg; 3343 3344 /* Get scheduler queue count of each type and alloc 3345 * bitmap for each for alloc/free/attach operations. 3346 */ 3347 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3348 txsch = &nix_hw->txsch[lvl]; 3349 txsch->lvl = lvl; 3350 switch (lvl) { 3351 case NIX_TXSCH_LVL_SMQ: 3352 reg = NIX_AF_MDQ_CONST; 3353 break; 3354 case NIX_TXSCH_LVL_TL4: 3355 reg = NIX_AF_TL4_CONST; 3356 break; 3357 case NIX_TXSCH_LVL_TL3: 3358 reg = NIX_AF_TL3_CONST; 3359 break; 3360 case NIX_TXSCH_LVL_TL2: 3361 reg = NIX_AF_TL2_CONST; 3362 break; 3363 case NIX_TXSCH_LVL_TL1: 3364 reg = NIX_AF_TL1_CONST; 3365 break; 3366 } 3367 cfg = rvu_read64(rvu, blkaddr, reg); 3368 txsch->schq.max = cfg & 0xFFFF; 3369 err = rvu_alloc_bitmap(&txsch->schq); 3370 if (err) 3371 return err; 3372 3373 /* Allocate memory for scheduler queues to 3374 * PF/VF pcifunc mapping info. 3375 */ 3376 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 3377 sizeof(u32), GFP_KERNEL); 3378 if (!txsch->pfvf_map) 3379 return -ENOMEM; 3380 for (schq = 0; schq < txsch->schq.max; schq++) 3381 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 3382 } 3383 3384 /* Setup a default value of 8192 as DWRR MTU */ 3385 if (rvu->hw->cap.nix_common_dwrr_mtu || 3386 rvu->hw->cap.nix_multiple_dwrr_mtu) { 3387 rvu_write64(rvu, blkaddr, 3388 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), 3389 convert_bytes_to_dwrr_mtu(8192)); 3390 rvu_write64(rvu, blkaddr, 3391 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK), 3392 convert_bytes_to_dwrr_mtu(8192)); 3393 rvu_write64(rvu, blkaddr, 3394 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP), 3395 convert_bytes_to_dwrr_mtu(8192)); 3396 } 3397 3398 return 0; 3399 } 3400 3401 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 3402 int blkaddr, u32 cfg) 3403 { 3404 int fmt_idx; 3405 3406 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 3407 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 3408 return fmt_idx; 3409 } 3410 if (fmt_idx >= nix_hw->mark_format.total) 3411 return -ERANGE; 3412 3413 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 3414 nix_hw->mark_format.cfg[fmt_idx] = cfg; 3415 nix_hw->mark_format.in_use++; 3416 return fmt_idx; 3417 } 3418 3419 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 3420 int blkaddr) 3421 { 3422 u64 cfgs[] = { 3423 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 3424 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 3425 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 3426 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 3427 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 3428 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 3429 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 3430 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 3431 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 3432 }; 3433 int i, rc; 3434 u64 total; 3435 3436 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 3437 nix_hw->mark_format.total = (u8)total; 3438 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 3439 GFP_KERNEL); 3440 if (!nix_hw->mark_format.cfg) 3441 return -ENOMEM; 3442 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 3443 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 3444 if (rc < 0) 3445 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 3446 i, rc); 3447 } 3448 3449 return 0; 3450 } 3451 3452 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3453 { 3454 /* CN10K supports LBK FIFO size 72 KB */ 3455 if (rvu->hw->lbk_bufsize == 0x12000) 3456 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 3457 else 3458 *max_mtu = NIC_HW_MAX_FRS; 3459 } 3460 3461 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3462 { 3463 int fifo_size = rvu_cgx_get_fifolen(rvu); 3464 3465 /* RPM supports FIFO len 128 KB and RPM2 supports double the 3466 * FIFO len to accommodate 8 LMACS 3467 */ 3468 if (fifo_size == 0x20000 || fifo_size == 0x40000) 3469 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 3470 else 3471 *max_mtu = NIC_HW_MAX_FRS; 3472 } 3473 3474 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 3475 struct nix_hw_info *rsp) 3476 { 3477 u16 pcifunc = req->hdr.pcifunc; 3478 u64 dwrr_mtu; 3479 int blkaddr; 3480 3481 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3482 if (blkaddr < 0) 3483 return NIX_AF_ERR_AF_LF_INVALID; 3484 3485 if (is_afvf(pcifunc)) 3486 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 3487 else 3488 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 3489 3490 rsp->min_mtu = NIC_HW_MIN_FRS; 3491 3492 if (!rvu->hw->cap.nix_common_dwrr_mtu && 3493 !rvu->hw->cap.nix_multiple_dwrr_mtu) { 3494 /* Return '1' on OTx2 */ 3495 rsp->rpm_dwrr_mtu = 1; 3496 rsp->sdp_dwrr_mtu = 1; 3497 rsp->lbk_dwrr_mtu = 1; 3498 return 0; 3499 } 3500 3501 /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */ 3502 dwrr_mtu = rvu_read64(rvu, blkaddr, 3503 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM)); 3504 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3505 3506 dwrr_mtu = rvu_read64(rvu, blkaddr, 3507 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP)); 3508 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3509 3510 dwrr_mtu = rvu_read64(rvu, blkaddr, 3511 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK)); 3512 rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3513 3514 return 0; 3515 } 3516 3517 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 3518 struct msg_rsp *rsp) 3519 { 3520 u16 pcifunc = req->hdr.pcifunc; 3521 int i, nixlf, blkaddr, err; 3522 u64 stats; 3523 3524 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3525 if (err) 3526 return err; 3527 3528 /* Get stats count supported by HW */ 3529 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 3530 3531 /* Reset tx stats */ 3532 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 3533 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 3534 3535 /* Reset rx stats */ 3536 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 3537 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 3538 3539 return 0; 3540 } 3541 3542 /* Returns the ALG index to be set into NPC_RX_ACTION */ 3543 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 3544 { 3545 int i; 3546 3547 /* Scan over exiting algo entries to find a match */ 3548 for (i = 0; i < nix_hw->flowkey.in_use; i++) 3549 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 3550 return i; 3551 3552 return -ERANGE; 3553 } 3554 3555 /* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */ 3556 #define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf) 3557 /* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */ 3558 #define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf) 3559 3560 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 3561 { 3562 int idx, nr_field, key_off, field_marker, keyoff_marker; 3563 int max_key_off, max_bit_pos, group_member; 3564 struct nix_rx_flowkey_alg *field; 3565 struct nix_rx_flowkey_alg tmp; 3566 u32 key_type, valid_key; 3567 u32 l3_l4_src_dst; 3568 int l4_key_offset = 0; 3569 3570 if (!alg) 3571 return -EINVAL; 3572 3573 #define FIELDS_PER_ALG 5 3574 #define MAX_KEY_OFF 40 3575 /* Clear all fields */ 3576 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 3577 3578 /* Each of the 32 possible flow key algorithm definitions should 3579 * fall into above incremental config (except ALG0). Otherwise a 3580 * single NPC MCAM entry is not sufficient for supporting RSS. 3581 * 3582 * If a different definition or combination needed then NPC MCAM 3583 * has to be programmed to filter such pkts and it's action should 3584 * point to this definition to calculate flowtag or hash. 3585 * 3586 * The `for loop` goes over _all_ protocol field and the following 3587 * variables depicts the state machine forward progress logic. 3588 * 3589 * keyoff_marker - Enabled when hash byte length needs to be accounted 3590 * in field->key_offset update. 3591 * field_marker - Enabled when a new field needs to be selected. 3592 * group_member - Enabled when protocol is part of a group. 3593 */ 3594 3595 /* Last 4 bits (31:28) are reserved to specify SRC, DST 3596 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST, 3597 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST 3598 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST 3599 */ 3600 l3_l4_src_dst = flow_cfg; 3601 /* Reset these 4 bits, so that these won't be part of key */ 3602 flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK; 3603 3604 keyoff_marker = 0; max_key_off = 0; group_member = 0; 3605 nr_field = 0; key_off = 0; field_marker = 1; 3606 field = &tmp; max_bit_pos = fls(flow_cfg); 3607 for (idx = 0; 3608 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 3609 key_off < MAX_KEY_OFF; idx++) { 3610 key_type = BIT(idx); 3611 valid_key = flow_cfg & key_type; 3612 /* Found a field marker, reset the field values */ 3613 if (field_marker) 3614 memset(&tmp, 0, sizeof(tmp)); 3615 3616 field_marker = true; 3617 keyoff_marker = true; 3618 switch (key_type) { 3619 case NIX_FLOW_KEY_TYPE_PORT: 3620 field->sel_chan = true; 3621 /* This should be set to 1, when SEL_CHAN is set */ 3622 field->bytesm1 = 1; 3623 break; 3624 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 3625 field->lid = NPC_LID_LC; 3626 field->hdr_offset = 9; /* offset */ 3627 field->bytesm1 = 0; /* 1 byte */ 3628 field->ltype_match = NPC_LT_LC_IP; 3629 field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; 3630 break; 3631 case NIX_FLOW_KEY_TYPE_IPV4: 3632 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 3633 field->lid = NPC_LID_LC; 3634 field->ltype_match = NPC_LT_LC_IP; 3635 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 3636 field->lid = NPC_LID_LG; 3637 field->ltype_match = NPC_LT_LG_TU_IP; 3638 } 3639 field->hdr_offset = 12; /* SIP offset */ 3640 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 3641 3642 /* Only SIP */ 3643 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 3644 field->bytesm1 = 3; /* SIP, 4 bytes */ 3645 3646 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 3647 /* Both SIP + DIP */ 3648 if (field->bytesm1 == 3) { 3649 field->bytesm1 = 7; /* SIP + DIP, 8B */ 3650 } else { 3651 /* Only DIP */ 3652 field->hdr_offset = 16; /* DIP off */ 3653 field->bytesm1 = 3; /* DIP, 4 bytes */ 3654 } 3655 } 3656 field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; 3657 keyoff_marker = false; 3658 break; 3659 case NIX_FLOW_KEY_TYPE_IPV6: 3660 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 3661 field->lid = NPC_LID_LC; 3662 field->ltype_match = NPC_LT_LC_IP6; 3663 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 3664 field->lid = NPC_LID_LG; 3665 field->ltype_match = NPC_LT_LG_TU_IP6; 3666 } 3667 field->hdr_offset = 8; /* SIP offset */ 3668 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 3669 3670 /* Only SIP */ 3671 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 3672 field->bytesm1 = 15; /* SIP, 16 bytes */ 3673 3674 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 3675 /* Both SIP + DIP */ 3676 if (field->bytesm1 == 15) { 3677 /* SIP + DIP, 32 bytes */ 3678 field->bytesm1 = 31; 3679 } else { 3680 /* Only DIP */ 3681 field->hdr_offset = 24; /* DIP off */ 3682 field->bytesm1 = 15; /* DIP,16 bytes */ 3683 } 3684 } 3685 field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK; 3686 break; 3687 case NIX_FLOW_KEY_TYPE_TCP: 3688 case NIX_FLOW_KEY_TYPE_UDP: 3689 case NIX_FLOW_KEY_TYPE_SCTP: 3690 case NIX_FLOW_KEY_TYPE_INNR_TCP: 3691 case NIX_FLOW_KEY_TYPE_INNR_UDP: 3692 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 3693 field->lid = NPC_LID_LD; 3694 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 3695 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 3696 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 3697 field->lid = NPC_LID_LH; 3698 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 3699 3700 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY) 3701 field->bytesm1 = 1; /* SRC, 2 bytes */ 3702 3703 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) { 3704 /* Both SRC + DST */ 3705 if (field->bytesm1 == 1) { 3706 /* SRC + DST, 4 bytes */ 3707 field->bytesm1 = 3; 3708 } else { 3709 /* Only DIP */ 3710 field->hdr_offset = 2; /* DST off */ 3711 field->bytesm1 = 1; /* DST, 2 bytes */ 3712 } 3713 } 3714 3715 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 3716 * so no need to change the ltype_match, just change 3717 * the lid for inner protocols 3718 */ 3719 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 3720 (int)NPC_LT_LH_TU_TCP); 3721 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 3722 (int)NPC_LT_LH_TU_UDP); 3723 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 3724 (int)NPC_LT_LH_TU_SCTP); 3725 3726 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 3727 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 3728 valid_key) { 3729 field->ltype_match |= NPC_LT_LD_TCP; 3730 group_member = true; 3731 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 3732 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 3733 valid_key) { 3734 field->ltype_match |= NPC_LT_LD_UDP; 3735 group_member = true; 3736 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 3737 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 3738 valid_key) { 3739 field->ltype_match |= NPC_LT_LD_SCTP; 3740 group_member = true; 3741 } 3742 field->ltype_mask = ~field->ltype_match; 3743 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 3744 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 3745 /* Handle the case where any of the group item 3746 * is enabled in the group but not the final one 3747 */ 3748 if (group_member) { 3749 valid_key = true; 3750 group_member = false; 3751 } 3752 } else { 3753 field_marker = false; 3754 keyoff_marker = false; 3755 } 3756 3757 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 3758 * remember the TCP key offset of 40 byte hash key. 3759 */ 3760 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 3761 l4_key_offset = key_off; 3762 break; 3763 case NIX_FLOW_KEY_TYPE_NVGRE: 3764 field->lid = NPC_LID_LD; 3765 field->hdr_offset = 4; /* VSID offset */ 3766 field->bytesm1 = 2; 3767 field->ltype_match = NPC_LT_LD_NVGRE; 3768 field->ltype_mask = 0xF; 3769 break; 3770 case NIX_FLOW_KEY_TYPE_VXLAN: 3771 case NIX_FLOW_KEY_TYPE_GENEVE: 3772 field->lid = NPC_LID_LE; 3773 field->bytesm1 = 2; 3774 field->hdr_offset = 4; 3775 field->ltype_mask = 0xF; 3776 field_marker = false; 3777 keyoff_marker = false; 3778 3779 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 3780 field->ltype_match |= NPC_LT_LE_VXLAN; 3781 group_member = true; 3782 } 3783 3784 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 3785 field->ltype_match |= NPC_LT_LE_GENEVE; 3786 group_member = true; 3787 } 3788 3789 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 3790 if (group_member) { 3791 field->ltype_mask = ~field->ltype_match; 3792 field_marker = true; 3793 keyoff_marker = true; 3794 valid_key = true; 3795 group_member = false; 3796 } 3797 } 3798 break; 3799 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 3800 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 3801 field->lid = NPC_LID_LA; 3802 field->ltype_match = NPC_LT_LA_ETHER; 3803 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 3804 field->lid = NPC_LID_LF; 3805 field->ltype_match = NPC_LT_LF_TU_ETHER; 3806 } 3807 field->hdr_offset = 0; 3808 field->bytesm1 = 5; /* DMAC 6 Byte */ 3809 field->ltype_mask = 0xF; 3810 break; 3811 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 3812 field->lid = NPC_LID_LC; 3813 field->hdr_offset = 40; /* IPV6 hdr */ 3814 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 3815 field->ltype_match = NPC_LT_LC_IP6_EXT; 3816 field->ltype_mask = 0xF; 3817 break; 3818 case NIX_FLOW_KEY_TYPE_GTPU: 3819 field->lid = NPC_LID_LE; 3820 field->hdr_offset = 4; 3821 field->bytesm1 = 3; /* 4 bytes TID*/ 3822 field->ltype_match = NPC_LT_LE_GTPU; 3823 field->ltype_mask = 0xF; 3824 break; 3825 case NIX_FLOW_KEY_TYPE_VLAN: 3826 field->lid = NPC_LID_LB; 3827 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 3828 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 3829 field->ltype_match = NPC_LT_LB_CTAG; 3830 field->ltype_mask = 0xF; 3831 field->fn_mask = 1; /* Mask out the first nibble */ 3832 break; 3833 case NIX_FLOW_KEY_TYPE_AH: 3834 case NIX_FLOW_KEY_TYPE_ESP: 3835 field->hdr_offset = 0; 3836 field->bytesm1 = 7; /* SPI + sequence number */ 3837 field->ltype_mask = 0xF; 3838 field->lid = NPC_LID_LE; 3839 field->ltype_match = NPC_LT_LE_ESP; 3840 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 3841 field->lid = NPC_LID_LD; 3842 field->ltype_match = NPC_LT_LD_AH; 3843 field->hdr_offset = 4; 3844 keyoff_marker = false; 3845 } 3846 break; 3847 } 3848 field->ena = 1; 3849 3850 /* Found a valid flow key type */ 3851 if (valid_key) { 3852 /* Use the key offset of TCP/UDP/SCTP fields 3853 * for ESP/AH fields. 3854 */ 3855 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 3856 key_type == NIX_FLOW_KEY_TYPE_AH) 3857 key_off = l4_key_offset; 3858 field->key_offset = key_off; 3859 memcpy(&alg[nr_field], field, sizeof(*field)); 3860 max_key_off = max(max_key_off, field->bytesm1 + 1); 3861 3862 /* Found a field marker, get the next field */ 3863 if (field_marker) 3864 nr_field++; 3865 } 3866 3867 /* Found a keyoff marker, update the new key_off */ 3868 if (keyoff_marker) { 3869 key_off += max_key_off; 3870 max_key_off = 0; 3871 } 3872 } 3873 /* Processed all the flow key types */ 3874 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 3875 return 0; 3876 else 3877 return NIX_AF_ERR_RSS_NOSPC_FIELD; 3878 } 3879 3880 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 3881 { 3882 u64 field[FIELDS_PER_ALG]; 3883 struct nix_hw *hw; 3884 int fid, rc; 3885 3886 hw = get_nix_hw(rvu->hw, blkaddr); 3887 if (!hw) 3888 return NIX_AF_ERR_INVALID_NIXBLK; 3889 3890 /* No room to add new flow hash algoritham */ 3891 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 3892 return NIX_AF_ERR_RSS_NOSPC_ALGO; 3893 3894 /* Generate algo fields for the given flow_cfg */ 3895 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 3896 if (rc) 3897 return rc; 3898 3899 /* Update ALGX_FIELDX register with generated fields */ 3900 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3901 rvu_write64(rvu, blkaddr, 3902 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 3903 fid), field[fid]); 3904 3905 /* Store the flow_cfg for futher lookup */ 3906 rc = hw->flowkey.in_use; 3907 hw->flowkey.flowkey[rc] = flow_cfg; 3908 hw->flowkey.in_use++; 3909 3910 return rc; 3911 } 3912 3913 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 3914 struct nix_rss_flowkey_cfg *req, 3915 struct nix_rss_flowkey_cfg_rsp *rsp) 3916 { 3917 u16 pcifunc = req->hdr.pcifunc; 3918 int alg_idx, nixlf, blkaddr; 3919 struct nix_hw *nix_hw; 3920 int err; 3921 3922 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3923 if (err) 3924 return err; 3925 3926 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3927 if (!nix_hw) 3928 return NIX_AF_ERR_INVALID_NIXBLK; 3929 3930 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 3931 /* Failed to get algo index from the exiting list, reserve new */ 3932 if (alg_idx < 0) { 3933 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 3934 req->flowkey_cfg); 3935 if (alg_idx < 0) 3936 return alg_idx; 3937 } 3938 rsp->alg_idx = alg_idx; 3939 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 3940 alg_idx, req->mcam_index); 3941 return 0; 3942 } 3943 3944 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 3945 { 3946 u32 flowkey_cfg, minkey_cfg; 3947 int alg, fid, rc; 3948 3949 /* Disable all flow key algx fieldx */ 3950 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 3951 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3952 rvu_write64(rvu, blkaddr, 3953 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 3954 0); 3955 } 3956 3957 /* IPv4/IPv6 SIP/DIPs */ 3958 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 3959 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3960 if (rc < 0) 3961 return rc; 3962 3963 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3964 minkey_cfg = flowkey_cfg; 3965 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 3966 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3967 if (rc < 0) 3968 return rc; 3969 3970 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3971 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 3972 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3973 if (rc < 0) 3974 return rc; 3975 3976 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3977 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 3978 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3979 if (rc < 0) 3980 return rc; 3981 3982 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3983 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3984 NIX_FLOW_KEY_TYPE_UDP; 3985 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3986 if (rc < 0) 3987 return rc; 3988 3989 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3990 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3991 NIX_FLOW_KEY_TYPE_SCTP; 3992 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3993 if (rc < 0) 3994 return rc; 3995 3996 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3997 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 3998 NIX_FLOW_KEY_TYPE_SCTP; 3999 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4000 if (rc < 0) 4001 return rc; 4002 4003 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 4004 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 4005 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 4006 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 4007 if (rc < 0) 4008 return rc; 4009 4010 return 0; 4011 } 4012 4013 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 4014 struct nix_set_mac_addr *req, 4015 struct msg_rsp *rsp) 4016 { 4017 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 4018 u16 pcifunc = req->hdr.pcifunc; 4019 int blkaddr, nixlf, err; 4020 struct rvu_pfvf *pfvf; 4021 4022 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 4023 if (err) 4024 return err; 4025 4026 pfvf = rvu_get_pfvf(rvu, pcifunc); 4027 4028 /* untrusted VF can't overwrite admin(PF) changes */ 4029 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 4030 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { 4031 dev_warn(rvu->dev, 4032 "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); 4033 return -EPERM; 4034 } 4035 4036 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 4037 4038 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 4039 pfvf->rx_chan_base, req->mac_addr); 4040 4041 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) 4042 ether_addr_copy(pfvf->default_mac, req->mac_addr); 4043 4044 rvu_switch_update_rules(rvu, pcifunc); 4045 4046 return 0; 4047 } 4048 4049 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 4050 struct msg_req *req, 4051 struct nix_get_mac_addr_rsp *rsp) 4052 { 4053 u16 pcifunc = req->hdr.pcifunc; 4054 struct rvu_pfvf *pfvf; 4055 4056 if (!is_nixlf_attached(rvu, pcifunc)) 4057 return NIX_AF_ERR_AF_LF_INVALID; 4058 4059 pfvf = rvu_get_pfvf(rvu, pcifunc); 4060 4061 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 4062 4063 return 0; 4064 } 4065 4066 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 4067 struct msg_rsp *rsp) 4068 { 4069 bool allmulti, promisc, nix_rx_multicast; 4070 u16 pcifunc = req->hdr.pcifunc; 4071 struct rvu_pfvf *pfvf; 4072 int nixlf, err; 4073 4074 pfvf = rvu_get_pfvf(rvu, pcifunc); 4075 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; 4076 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; 4077 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; 4078 4079 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; 4080 4081 if (is_vf(pcifunc) && !nix_rx_multicast && 4082 (promisc || allmulti)) { 4083 dev_warn_ratelimited(rvu->dev, 4084 "VF promisc/multicast not supported\n"); 4085 return 0; 4086 } 4087 4088 /* untrusted VF can't configure promisc/allmulti */ 4089 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 4090 (promisc || allmulti)) 4091 return 0; 4092 4093 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4094 if (err) 4095 return err; 4096 4097 if (nix_rx_multicast) { 4098 /* add/del this PF_FUNC to/from mcast pkt replication list */ 4099 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, 4100 allmulti); 4101 if (err) { 4102 dev_err(rvu->dev, 4103 "Failed to update pcifunc 0x%x to multicast list\n", 4104 pcifunc); 4105 return err; 4106 } 4107 4108 /* add/del this PF_FUNC to/from promisc pkt replication list */ 4109 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, 4110 promisc); 4111 if (err) { 4112 dev_err(rvu->dev, 4113 "Failed to update pcifunc 0x%x to promisc list\n", 4114 pcifunc); 4115 return err; 4116 } 4117 } 4118 4119 /* install/uninstall allmulti entry */ 4120 if (allmulti) { 4121 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, 4122 pfvf->rx_chan_base); 4123 } else { 4124 if (!nix_rx_multicast) 4125 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); 4126 } 4127 4128 /* install/uninstall promisc entry */ 4129 if (promisc) 4130 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 4131 pfvf->rx_chan_base, 4132 pfvf->rx_chan_cnt); 4133 else 4134 if (!nix_rx_multicast) 4135 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 4136 4137 return 0; 4138 } 4139 4140 static void nix_find_link_frs(struct rvu *rvu, 4141 struct nix_frs_cfg *req, u16 pcifunc) 4142 { 4143 int pf = rvu_get_pf(pcifunc); 4144 struct rvu_pfvf *pfvf; 4145 int maxlen, minlen; 4146 int numvfs, hwvf; 4147 int vf; 4148 4149 /* Update with requester's min/max lengths */ 4150 pfvf = rvu_get_pfvf(rvu, pcifunc); 4151 pfvf->maxlen = req->maxlen; 4152 if (req->update_minlen) 4153 pfvf->minlen = req->minlen; 4154 4155 maxlen = req->maxlen; 4156 minlen = req->update_minlen ? req->minlen : 0; 4157 4158 /* Get this PF's numVFs and starting hwvf */ 4159 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 4160 4161 /* For each VF, compare requested max/minlen */ 4162 for (vf = 0; vf < numvfs; vf++) { 4163 pfvf = &rvu->hwvf[hwvf + vf]; 4164 if (pfvf->maxlen > maxlen) 4165 maxlen = pfvf->maxlen; 4166 if (req->update_minlen && 4167 pfvf->minlen && pfvf->minlen < minlen) 4168 minlen = pfvf->minlen; 4169 } 4170 4171 /* Compare requested max/minlen with PF's max/minlen */ 4172 pfvf = &rvu->pf[pf]; 4173 if (pfvf->maxlen > maxlen) 4174 maxlen = pfvf->maxlen; 4175 if (req->update_minlen && 4176 pfvf->minlen && pfvf->minlen < minlen) 4177 minlen = pfvf->minlen; 4178 4179 /* Update the request with max/min PF's and it's VF's max/min */ 4180 req->maxlen = maxlen; 4181 if (req->update_minlen) 4182 req->minlen = minlen; 4183 } 4184 4185 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 4186 struct msg_rsp *rsp) 4187 { 4188 struct rvu_hwinfo *hw = rvu->hw; 4189 u16 pcifunc = req->hdr.pcifunc; 4190 int pf = rvu_get_pf(pcifunc); 4191 int blkaddr, link = -1; 4192 struct nix_hw *nix_hw; 4193 struct rvu_pfvf *pfvf; 4194 u8 cgx = 0, lmac = 0; 4195 u16 max_mtu; 4196 u64 cfg; 4197 4198 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4199 if (blkaddr < 0) 4200 return NIX_AF_ERR_AF_LF_INVALID; 4201 4202 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4203 if (!nix_hw) 4204 return NIX_AF_ERR_INVALID_NIXBLK; 4205 4206 if (is_afvf(pcifunc)) 4207 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 4208 else 4209 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 4210 4211 if (!req->sdp_link && req->maxlen > max_mtu) 4212 return NIX_AF_ERR_FRS_INVALID; 4213 4214 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 4215 return NIX_AF_ERR_FRS_INVALID; 4216 4217 /* Check if config is for SDP link */ 4218 if (req->sdp_link) { 4219 if (!hw->sdp_links) 4220 return NIX_AF_ERR_RX_LINK_INVALID; 4221 link = hw->cgx_links + hw->lbk_links; 4222 goto linkcfg; 4223 } 4224 4225 /* Check if the request is from CGX mapped RVU PF */ 4226 if (is_pf_cgxmapped(rvu, pf)) { 4227 /* Get CGX and LMAC to which this PF is mapped and find link */ 4228 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 4229 link = (cgx * hw->lmac_per_cgx) + lmac; 4230 } else if (pf == 0) { 4231 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 4232 pfvf = rvu_get_pfvf(rvu, pcifunc); 4233 link = hw->cgx_links + pfvf->lbkid; 4234 } 4235 4236 if (link < 0) 4237 return NIX_AF_ERR_RX_LINK_INVALID; 4238 4239 linkcfg: 4240 nix_find_link_frs(rvu, req, pcifunc); 4241 4242 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 4243 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 4244 if (req->update_minlen) 4245 cfg = (cfg & ~0xFFFFULL) | req->minlen; 4246 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 4247 4248 return 0; 4249 } 4250 4251 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 4252 struct msg_rsp *rsp) 4253 { 4254 int nixlf, blkaddr, err; 4255 u64 cfg; 4256 4257 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 4258 if (err) 4259 return err; 4260 4261 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 4262 /* Set the interface configuration */ 4263 if (req->len_verify & BIT(0)) 4264 cfg |= BIT_ULL(41); 4265 else 4266 cfg &= ~BIT_ULL(41); 4267 4268 if (req->len_verify & BIT(1)) 4269 cfg |= BIT_ULL(40); 4270 else 4271 cfg &= ~BIT_ULL(40); 4272 4273 if (req->len_verify & NIX_RX_DROP_RE) 4274 cfg |= BIT_ULL(32); 4275 else 4276 cfg &= ~BIT_ULL(32); 4277 4278 if (req->csum_verify & BIT(0)) 4279 cfg |= BIT_ULL(37); 4280 else 4281 cfg &= ~BIT_ULL(37); 4282 4283 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 4284 4285 return 0; 4286 } 4287 4288 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 4289 { 4290 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 4291 } 4292 4293 static void nix_link_config(struct rvu *rvu, int blkaddr, 4294 struct nix_hw *nix_hw) 4295 { 4296 struct rvu_hwinfo *hw = rvu->hw; 4297 int cgx, lmac_cnt, slink, link; 4298 u16 lbk_max_frs, lmac_max_frs; 4299 unsigned long lmac_bmap; 4300 u64 tx_credits, cfg; 4301 u64 lmac_fifo_len; 4302 int iter; 4303 4304 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 4305 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 4306 4307 /* Set default min/max packet lengths allowed on NIX Rx links. 4308 * 4309 * With HW reset minlen value of 60byte, HW will treat ARP pkts 4310 * as undersize and report them to SW as error pkts, hence 4311 * setting it to 40 bytes. 4312 */ 4313 for (link = 0; link < hw->cgx_links; link++) { 4314 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4315 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 4316 } 4317 4318 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 4319 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4320 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 4321 } 4322 if (hw->sdp_links) { 4323 link = hw->cgx_links + hw->lbk_links; 4324 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4325 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 4326 } 4327 4328 /* Get MCS external bypass status for CN10K-B */ 4329 if (mcs_get_blkcnt() == 1) { 4330 /* Adjust for 2 credits when external bypass is disabled */ 4331 nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2; 4332 } 4333 4334 /* Set credits for Tx links assuming max packet length allowed. 4335 * This will be reconfigured based on MTU set for PF/VF. 4336 */ 4337 for (cgx = 0; cgx < hw->cgx; cgx++) { 4338 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 4339 /* Skip when cgx is not available or lmac cnt is zero */ 4340 if (lmac_cnt <= 0) 4341 continue; 4342 slink = cgx * hw->lmac_per_cgx; 4343 4344 /* Get LMAC id's from bitmap */ 4345 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); 4346 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { 4347 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter); 4348 if (!lmac_fifo_len) { 4349 dev_err(rvu->dev, 4350 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", 4351 __func__, cgx, iter); 4352 continue; 4353 } 4354 tx_credits = (lmac_fifo_len - lmac_max_frs) / 16; 4355 /* Enable credits and set credit pkt count to max allowed */ 4356 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4357 cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt); 4358 4359 link = iter + slink; 4360 nix_hw->tx_credits[link] = tx_credits; 4361 rvu_write64(rvu, blkaddr, 4362 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 4363 } 4364 } 4365 4366 /* Set Tx credits for LBK link */ 4367 slink = hw->cgx_links; 4368 for (link = slink; link < (slink + hw->lbk_links); link++) { 4369 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 4370 nix_hw->tx_credits[link] = tx_credits; 4371 /* Enable credits and set credit pkt count to max allowed */ 4372 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4373 rvu_write64(rvu, blkaddr, 4374 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 4375 } 4376 } 4377 4378 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 4379 { 4380 int idx, err; 4381 u64 status; 4382 4383 /* Start X2P bus calibration */ 4384 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4385 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 4386 /* Wait for calibration to complete */ 4387 err = rvu_poll_reg(rvu, blkaddr, 4388 NIX_AF_STATUS, BIT_ULL(10), false); 4389 if (err) { 4390 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 4391 return err; 4392 } 4393 4394 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 4395 /* Check if CGX devices are ready */ 4396 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 4397 /* Skip when cgx port is not available */ 4398 if (!rvu_cgx_pdata(idx, rvu) || 4399 (status & (BIT_ULL(16 + idx)))) 4400 continue; 4401 dev_err(rvu->dev, 4402 "CGX%d didn't respond to NIX X2P calibration\n", idx); 4403 err = -EBUSY; 4404 } 4405 4406 /* Check if LBK is ready */ 4407 if (!(status & BIT_ULL(19))) { 4408 dev_err(rvu->dev, 4409 "LBK didn't respond to NIX X2P calibration\n"); 4410 err = -EBUSY; 4411 } 4412 4413 /* Clear 'calibrate_x2p' bit */ 4414 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4415 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 4416 if (err || (status & 0x3FFULL)) 4417 dev_err(rvu->dev, 4418 "NIX X2P calibration failed, status 0x%llx\n", status); 4419 if (err) 4420 return err; 4421 return 0; 4422 } 4423 4424 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 4425 { 4426 u64 cfg; 4427 int err; 4428 4429 /* Set admin queue endianness */ 4430 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 4431 #ifdef __BIG_ENDIAN 4432 cfg |= BIT_ULL(8); 4433 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4434 #else 4435 cfg &= ~BIT_ULL(8); 4436 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4437 #endif 4438 4439 /* Do not bypass NDC cache */ 4440 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 4441 cfg &= ~0x3FFEULL; 4442 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 4443 /* Disable caching of SQB aka SQEs */ 4444 cfg |= 0x04ULL; 4445 #endif 4446 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 4447 4448 /* Result structure can be followed by RQ/SQ/CQ context at 4449 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 4450 * operation type. Alloc sufficient result memory for all operations. 4451 */ 4452 err = rvu_aq_alloc(rvu, &block->aq, 4453 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 4454 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 4455 if (err) 4456 return err; 4457 4458 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 4459 rvu_write64(rvu, block->addr, 4460 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 4461 return 0; 4462 } 4463 4464 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) 4465 { 4466 struct rvu_hwinfo *hw = rvu->hw; 4467 u64 hw_const; 4468 4469 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 4470 4471 /* On OcteonTx2 DWRR quantum is directly configured into each of 4472 * the transmit scheduler queues. And PF/VF drivers were free to 4473 * config any value upto 2^24. 4474 * On CN10K, HW is modified, the quantum configuration at scheduler 4475 * queues is in terms of weight. And SW needs to setup a base DWRR MTU 4476 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do 4477 * 'DWRR MTU * weight' to get the quantum. 4478 * 4479 * Check if HW uses a common MTU for all DWRR quantum configs. 4480 * On OcteonTx2 this register field is '0'. 4481 */ 4482 if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61))) 4483 hw->cap.nix_common_dwrr_mtu = true; 4484 4485 if (hw_const & BIT_ULL(61)) 4486 hw->cap.nix_multiple_dwrr_mtu = true; 4487 } 4488 4489 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 4490 { 4491 const struct npc_lt_def_cfg *ltdefs; 4492 struct rvu_hwinfo *hw = rvu->hw; 4493 int blkaddr = nix_hw->blkaddr; 4494 struct rvu_block *block; 4495 int err; 4496 u64 cfg; 4497 4498 block = &hw->block[blkaddr]; 4499 4500 if (is_rvu_96xx_B0(rvu)) { 4501 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 4502 * internal state when conditional clocks are turned off. 4503 * Hence enable them. 4504 */ 4505 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4506 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 4507 } 4508 4509 /* Set chan/link to backpressure TL3 instead of TL2 */ 4510 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 4511 4512 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 4513 * This sticky mode is known to cause SQ stalls when multiple 4514 * SQs are mapped to same SMQ and transmitting pkts at a time. 4515 */ 4516 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 4517 cfg &= ~BIT_ULL(15); 4518 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 4519 4520 ltdefs = rvu->kpu.lt_def; 4521 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 4522 err = nix_calibrate_x2p(rvu, blkaddr); 4523 if (err) 4524 return err; 4525 4526 /* Setup capabilities of the NIX block */ 4527 rvu_nix_setup_capabilities(rvu, blkaddr); 4528 4529 /* Initialize admin queue */ 4530 err = nix_aq_init(rvu, block); 4531 if (err) 4532 return err; 4533 4534 /* Restore CINT timer delay to HW reset values */ 4535 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 4536 4537 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG); 4538 4539 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ 4540 cfg |= 1ULL; 4541 if (!is_rvu_otx2(rvu)) 4542 cfg |= NIX_PTP_1STEP_EN; 4543 4544 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg); 4545 4546 if (!is_rvu_otx2(rvu)) 4547 rvu_nix_block_cn10k_init(rvu, nix_hw); 4548 4549 if (is_block_implemented(hw, blkaddr)) { 4550 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 4551 if (err) 4552 return err; 4553 4554 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); 4555 if (err) 4556 return err; 4557 4558 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 4559 if (err) 4560 return err; 4561 4562 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 4563 if (err) 4564 return err; 4565 4566 err = nix_setup_txvlan(rvu, nix_hw); 4567 if (err) 4568 return err; 4569 4570 /* Configure segmentation offload formats */ 4571 nix_setup_lso(rvu, nix_hw, blkaddr); 4572 4573 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 4574 * This helps HW protocol checker to identify headers 4575 * and validate length and checksums. 4576 */ 4577 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 4578 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 4579 ltdefs->rx_ol2.ltype_mask); 4580 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 4581 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 4582 ltdefs->rx_oip4.ltype_mask); 4583 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 4584 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 4585 ltdefs->rx_iip4.ltype_mask); 4586 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 4587 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 4588 ltdefs->rx_oip6.ltype_mask); 4589 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 4590 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 4591 ltdefs->rx_iip6.ltype_mask); 4592 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 4593 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 4594 ltdefs->rx_otcp.ltype_mask); 4595 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 4596 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 4597 ltdefs->rx_itcp.ltype_mask); 4598 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 4599 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 4600 ltdefs->rx_oudp.ltype_mask); 4601 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 4602 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 4603 ltdefs->rx_iudp.ltype_mask); 4604 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 4605 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 4606 ltdefs->rx_osctp.ltype_mask); 4607 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 4608 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 4609 ltdefs->rx_isctp.ltype_mask); 4610 4611 if (!is_rvu_otx2(rvu)) { 4612 /* Enable APAD calculation for other protocols 4613 * matching APAD0 and APAD1 lt def registers. 4614 */ 4615 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, 4616 (ltdefs->rx_apad0.valid << 11) | 4617 (ltdefs->rx_apad0.lid << 8) | 4618 (ltdefs->rx_apad0.ltype_match << 4) | 4619 ltdefs->rx_apad0.ltype_mask); 4620 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, 4621 (ltdefs->rx_apad1.valid << 11) | 4622 (ltdefs->rx_apad1.lid << 8) | 4623 (ltdefs->rx_apad1.ltype_match << 4) | 4624 ltdefs->rx_apad1.ltype_mask); 4625 4626 /* Receive ethertype defination register defines layer 4627 * information in NPC_RESULT_S to identify the Ethertype 4628 * location in L2 header. Used for Ethertype overwriting 4629 * in inline IPsec flow. 4630 */ 4631 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), 4632 (ltdefs->rx_et[0].offset << 12) | 4633 (ltdefs->rx_et[0].valid << 11) | 4634 (ltdefs->rx_et[0].lid << 8) | 4635 (ltdefs->rx_et[0].ltype_match << 4) | 4636 ltdefs->rx_et[0].ltype_mask); 4637 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), 4638 (ltdefs->rx_et[1].offset << 12) | 4639 (ltdefs->rx_et[1].valid << 11) | 4640 (ltdefs->rx_et[1].lid << 8) | 4641 (ltdefs->rx_et[1].ltype_match << 4) | 4642 ltdefs->rx_et[1].ltype_mask); 4643 } 4644 4645 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 4646 if (err) 4647 return err; 4648 4649 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, 4650 sizeof(u64), GFP_KERNEL); 4651 if (!nix_hw->tx_credits) 4652 return -ENOMEM; 4653 4654 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 4655 nix_link_config(rvu, blkaddr, nix_hw); 4656 4657 /* Enable Channel backpressure */ 4658 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 4659 } 4660 return 0; 4661 } 4662 4663 int rvu_nix_init(struct rvu *rvu) 4664 { 4665 struct rvu_hwinfo *hw = rvu->hw; 4666 struct nix_hw *nix_hw; 4667 int blkaddr = 0, err; 4668 int i = 0; 4669 4670 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 4671 GFP_KERNEL); 4672 if (!hw->nix) 4673 return -ENOMEM; 4674 4675 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4676 while (blkaddr) { 4677 nix_hw = &hw->nix[i]; 4678 nix_hw->rvu = rvu; 4679 nix_hw->blkaddr = blkaddr; 4680 err = rvu_nix_block_init(rvu, nix_hw); 4681 if (err) 4682 return err; 4683 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4684 i++; 4685 } 4686 4687 return 0; 4688 } 4689 4690 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 4691 struct rvu_block *block) 4692 { 4693 struct nix_txsch *txsch; 4694 struct nix_mcast *mcast; 4695 struct nix_txvlan *vlan; 4696 struct nix_hw *nix_hw; 4697 int lvl; 4698 4699 rvu_aq_free(rvu, block->aq); 4700 4701 if (is_block_implemented(rvu->hw, blkaddr)) { 4702 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4703 if (!nix_hw) 4704 return; 4705 4706 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 4707 txsch = &nix_hw->txsch[lvl]; 4708 kfree(txsch->schq.bmap); 4709 } 4710 4711 kfree(nix_hw->tx_credits); 4712 4713 nix_ipolicer_freemem(rvu, nix_hw); 4714 4715 vlan = &nix_hw->txvlan; 4716 kfree(vlan->rsrc.bmap); 4717 mutex_destroy(&vlan->rsrc_lock); 4718 4719 mcast = &nix_hw->mcast; 4720 qmem_free(rvu->dev, mcast->mce_ctx); 4721 qmem_free(rvu->dev, mcast->mcast_buf); 4722 mutex_destroy(&mcast->mce_lock); 4723 } 4724 } 4725 4726 void rvu_nix_freemem(struct rvu *rvu) 4727 { 4728 struct rvu_hwinfo *hw = rvu->hw; 4729 struct rvu_block *block; 4730 int blkaddr = 0; 4731 4732 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4733 while (blkaddr) { 4734 block = &hw->block[blkaddr]; 4735 rvu_nix_block_freemem(rvu, blkaddr, block); 4736 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4737 } 4738 } 4739 4740 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 4741 struct msg_rsp *rsp) 4742 { 4743 u16 pcifunc = req->hdr.pcifunc; 4744 struct rvu_pfvf *pfvf; 4745 int nixlf, err; 4746 4747 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4748 if (err) 4749 return err; 4750 4751 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 4752 4753 npc_mcam_enable_flows(rvu, pcifunc); 4754 4755 pfvf = rvu_get_pfvf(rvu, pcifunc); 4756 set_bit(NIXLF_INITIALIZED, &pfvf->flags); 4757 4758 rvu_switch_update_rules(rvu, pcifunc); 4759 4760 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 4761 } 4762 4763 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 4764 struct msg_rsp *rsp) 4765 { 4766 u16 pcifunc = req->hdr.pcifunc; 4767 struct rvu_pfvf *pfvf; 4768 int nixlf, err; 4769 4770 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4771 if (err) 4772 return err; 4773 4774 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 4775 4776 pfvf = rvu_get_pfvf(rvu, pcifunc); 4777 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 4778 4779 err = rvu_cgx_start_stop_io(rvu, pcifunc, false); 4780 if (err) 4781 return err; 4782 4783 rvu_cgx_tx_enable(rvu, pcifunc, true); 4784 4785 return 0; 4786 } 4787 4788 #define RX_SA_BASE GENMASK_ULL(52, 7) 4789 4790 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 4791 { 4792 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 4793 struct hwctx_disable_req ctx_req; 4794 int pf = rvu_get_pf(pcifunc); 4795 struct mac_ops *mac_ops; 4796 u8 cgx_id, lmac_id; 4797 u64 sa_base; 4798 void *cgxd; 4799 int err; 4800 4801 ctx_req.hdr.pcifunc = pcifunc; 4802 4803 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 4804 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 4805 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 4806 nix_interface_deinit(rvu, pcifunc, nixlf); 4807 nix_rx_sync(rvu, blkaddr); 4808 nix_txschq_free(rvu, pcifunc); 4809 4810 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 4811 4812 rvu_cgx_start_stop_io(rvu, pcifunc, false); 4813 4814 if (pfvf->sq_ctx) { 4815 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 4816 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4817 if (err) 4818 dev_err(rvu->dev, "SQ ctx disable failed\n"); 4819 } 4820 4821 if (pfvf->rq_ctx) { 4822 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 4823 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4824 if (err) 4825 dev_err(rvu->dev, "RQ ctx disable failed\n"); 4826 } 4827 4828 if (pfvf->cq_ctx) { 4829 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 4830 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4831 if (err) 4832 dev_err(rvu->dev, "CQ ctx disable failed\n"); 4833 } 4834 4835 /* reset HW config done for Switch headers */ 4836 rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, 4837 (PKIND_TX | PKIND_RX), 0, 0, 0, 0); 4838 4839 /* Disabling CGX and NPC config done for PTP */ 4840 if (pfvf->hw_rx_tstamp_en) { 4841 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 4842 cgxd = rvu_cgx_pdata(cgx_id, rvu); 4843 mac_ops = get_mac_ops(cgxd); 4844 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); 4845 /* Undo NPC config done for PTP */ 4846 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) 4847 dev_err(rvu->dev, "NPC config for PTP failed\n"); 4848 pfvf->hw_rx_tstamp_en = false; 4849 } 4850 4851 /* reset priority flow control config */ 4852 rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0); 4853 4854 /* reset 802.3x flow control config */ 4855 rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0); 4856 4857 nix_ctx_free(rvu, pfvf); 4858 4859 nix_free_all_bandprof(rvu, pcifunc); 4860 4861 sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf)); 4862 if (FIELD_GET(RX_SA_BASE, sa_base)) { 4863 err = rvu_cpt_ctx_flush(rvu, pcifunc); 4864 if (err) 4865 dev_err(rvu->dev, 4866 "CPT ctx flush failed with error: %d\n", err); 4867 } 4868 } 4869 4870 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 4871 4872 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 4873 { 4874 struct rvu_hwinfo *hw = rvu->hw; 4875 struct rvu_block *block; 4876 int blkaddr, pf; 4877 int nixlf; 4878 u64 cfg; 4879 4880 pf = rvu_get_pf(pcifunc); 4881 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 4882 return 0; 4883 4884 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4885 if (blkaddr < 0) 4886 return NIX_AF_ERR_AF_LF_INVALID; 4887 4888 block = &hw->block[blkaddr]; 4889 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 4890 if (nixlf < 0) 4891 return NIX_AF_ERR_AF_LF_INVALID; 4892 4893 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 4894 4895 if (enable) 4896 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 4897 else 4898 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 4899 4900 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 4901 4902 return 0; 4903 } 4904 4905 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 4906 struct msg_rsp *rsp) 4907 { 4908 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 4909 } 4910 4911 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 4912 struct msg_rsp *rsp) 4913 { 4914 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 4915 } 4916 4917 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 4918 struct nix_lso_format_cfg *req, 4919 struct nix_lso_format_cfg_rsp *rsp) 4920 { 4921 u16 pcifunc = req->hdr.pcifunc; 4922 struct nix_hw *nix_hw; 4923 struct rvu_pfvf *pfvf; 4924 int blkaddr, idx, f; 4925 u64 reg; 4926 4927 pfvf = rvu_get_pfvf(rvu, pcifunc); 4928 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4929 if (!pfvf->nixlf || blkaddr < 0) 4930 return NIX_AF_ERR_AF_LF_INVALID; 4931 4932 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4933 if (!nix_hw) 4934 return NIX_AF_ERR_INVALID_NIXBLK; 4935 4936 /* Find existing matching LSO format, if any */ 4937 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 4938 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 4939 reg = rvu_read64(rvu, blkaddr, 4940 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 4941 if (req->fields[f] != (reg & req->field_mask)) 4942 break; 4943 } 4944 4945 if (f == NIX_LSO_FIELD_MAX) 4946 break; 4947 } 4948 4949 if (idx < nix_hw->lso.in_use) { 4950 /* Match found */ 4951 rsp->lso_format_idx = idx; 4952 return 0; 4953 } 4954 4955 if (nix_hw->lso.in_use == nix_hw->lso.total) 4956 return NIX_AF_ERR_LSO_CFG_FAIL; 4957 4958 rsp->lso_format_idx = nix_hw->lso.in_use++; 4959 4960 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 4961 rvu_write64(rvu, blkaddr, 4962 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 4963 req->fields[f]); 4964 4965 return 0; 4966 } 4967 4968 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) 4969 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) 4970 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) 4971 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) 4972 4973 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) 4974 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) 4975 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) 4976 4977 #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32) 4978 #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22) 4979 #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0) 4980 4981 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, 4982 int blkaddr) 4983 { 4984 u8 cpt_idx, cpt_blkaddr; 4985 u64 val; 4986 4987 cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; 4988 if (req->enable) { 4989 val = 0; 4990 /* Enable context prefetching */ 4991 if (!is_rvu_otx2(rvu)) 4992 val |= BIT_ULL(51); 4993 4994 /* Set OPCODE and EGRP */ 4995 val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); 4996 val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); 4997 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); 4998 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); 4999 5000 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); 5001 5002 /* Set CPT queue for inline IPSec */ 5003 val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); 5004 val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, 5005 req->inst_qsel.cpt_pf_func); 5006 5007 if (!is_rvu_otx2(rvu)) { 5008 cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : 5009 BLKADDR_CPT1; 5010 val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); 5011 } 5012 5013 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 5014 val); 5015 5016 /* Set CPT credit */ 5017 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 5018 if ((val & 0x3FFFFF) != 0x3FFFFF) 5019 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 5020 0x3FFFFF - val); 5021 5022 val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit); 5023 val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid); 5024 val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th); 5025 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val); 5026 } else { 5027 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); 5028 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 5029 0x0); 5030 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 5031 if ((val & 0x3FFFFF) != 0x3FFFFF) 5032 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 5033 0x3FFFFF - val); 5034 } 5035 } 5036 5037 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, 5038 struct nix_inline_ipsec_cfg *req, 5039 struct msg_rsp *rsp) 5040 { 5041 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5042 return 0; 5043 5044 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); 5045 if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) 5046 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); 5047 5048 return 0; 5049 } 5050 5051 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, 5052 struct msg_req *req, 5053 struct nix_inline_ipsec_cfg *rsp) 5054 5055 { 5056 u64 val; 5057 5058 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5059 return 0; 5060 5061 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG); 5062 rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val); 5063 rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val); 5064 rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val); 5065 rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val); 5066 5067 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0)); 5068 rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val); 5069 rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val); 5070 rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val); 5071 5072 return 0; 5073 } 5074 5075 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, 5076 struct nix_inline_ipsec_lf_cfg *req, 5077 struct msg_rsp *rsp) 5078 { 5079 int lf, blkaddr, err; 5080 u64 val; 5081 5082 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5083 return 0; 5084 5085 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); 5086 if (err) 5087 return err; 5088 5089 if (req->enable) { 5090 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ 5091 val = (u64)req->ipsec_cfg0.tt << 44 | 5092 (u64)req->ipsec_cfg0.tag_const << 20 | 5093 (u64)req->ipsec_cfg0.sa_pow2_size << 16 | 5094 req->ipsec_cfg0.lenm1_max; 5095 5096 if (blkaddr == BLKADDR_NIX1) 5097 val |= BIT_ULL(46); 5098 5099 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); 5100 5101 /* Set SA_IDX_W and SA_IDX_MAX */ 5102 val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | 5103 req->ipsec_cfg1.sa_idx_max; 5104 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); 5105 5106 /* Set SA base address */ 5107 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5108 req->sa_base_addr); 5109 } else { 5110 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); 5111 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); 5112 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5113 0x0); 5114 } 5115 5116 return 0; 5117 } 5118 5119 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 5120 { 5121 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 5122 5123 /* overwrite vf mac address with default_mac */ 5124 if (from_vf) 5125 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 5126 } 5127 5128 /* NIX ingress policers or bandwidth profiles APIs */ 5129 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) 5130 { 5131 struct npc_lt_def_cfg defs, *ltdefs; 5132 5133 ltdefs = &defs; 5134 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); 5135 5136 /* Extract PCP and DEI fields from outer VLAN from byte offset 5137 * 2 from the start of LB_PTR (ie TAG). 5138 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN 5139 * fields are considered when 'Tunnel enable' is set in profile. 5140 */ 5141 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, 5142 (2UL << 12) | (ltdefs->ovlan.lid << 8) | 5143 (ltdefs->ovlan.ltype_match << 4) | 5144 ltdefs->ovlan.ltype_mask); 5145 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, 5146 (2UL << 12) | (ltdefs->ivlan.lid << 8) | 5147 (ltdefs->ivlan.ltype_match << 4) | 5148 ltdefs->ivlan.ltype_mask); 5149 5150 /* DSCP field in outer and tunneled IPv4 packets */ 5151 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, 5152 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | 5153 (ltdefs->rx_oip4.ltype_match << 4) | 5154 ltdefs->rx_oip4.ltype_mask); 5155 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, 5156 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | 5157 (ltdefs->rx_iip4.ltype_match << 4) | 5158 ltdefs->rx_iip4.ltype_mask); 5159 5160 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ 5161 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, 5162 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | 5163 (ltdefs->rx_oip6.ltype_match << 4) | 5164 ltdefs->rx_oip6.ltype_mask); 5165 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, 5166 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | 5167 (ltdefs->rx_iip6.ltype_match << 4) | 5168 ltdefs->rx_iip6.ltype_mask); 5169 } 5170 5171 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, 5172 int layer, int prof_idx) 5173 { 5174 struct nix_cn10k_aq_enq_req aq_req; 5175 int rc; 5176 5177 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5178 5179 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); 5180 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5181 aq_req.op = NIX_AQ_INSTOP_INIT; 5182 5183 /* Context is all zeros, submit to AQ */ 5184 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5185 (struct nix_aq_enq_req *)&aq_req, NULL); 5186 if (rc) 5187 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", 5188 layer, prof_idx); 5189 return rc; 5190 } 5191 5192 static int nix_setup_ipolicers(struct rvu *rvu, 5193 struct nix_hw *nix_hw, int blkaddr) 5194 { 5195 struct rvu_hwinfo *hw = rvu->hw; 5196 struct nix_ipolicer *ipolicer; 5197 int err, layer, prof_idx; 5198 u64 cfg; 5199 5200 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 5201 if (!(cfg & BIT_ULL(61))) { 5202 hw->cap.ipolicer = false; 5203 return 0; 5204 } 5205 5206 hw->cap.ipolicer = true; 5207 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, 5208 sizeof(*ipolicer), GFP_KERNEL); 5209 if (!nix_hw->ipolicer) 5210 return -ENOMEM; 5211 5212 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); 5213 5214 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5215 ipolicer = &nix_hw->ipolicer[layer]; 5216 switch (layer) { 5217 case BAND_PROF_LEAF_LAYER: 5218 ipolicer->band_prof.max = cfg & 0XFFFF; 5219 break; 5220 case BAND_PROF_MID_LAYER: 5221 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; 5222 break; 5223 case BAND_PROF_TOP_LAYER: 5224 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; 5225 break; 5226 } 5227 5228 if (!ipolicer->band_prof.max) 5229 continue; 5230 5231 err = rvu_alloc_bitmap(&ipolicer->band_prof); 5232 if (err) 5233 return err; 5234 5235 ipolicer->pfvf_map = devm_kcalloc(rvu->dev, 5236 ipolicer->band_prof.max, 5237 sizeof(u16), GFP_KERNEL); 5238 if (!ipolicer->pfvf_map) 5239 return -ENOMEM; 5240 5241 ipolicer->match_id = devm_kcalloc(rvu->dev, 5242 ipolicer->band_prof.max, 5243 sizeof(u16), GFP_KERNEL); 5244 if (!ipolicer->match_id) 5245 return -ENOMEM; 5246 5247 for (prof_idx = 0; 5248 prof_idx < ipolicer->band_prof.max; prof_idx++) { 5249 /* Set AF as current owner for INIT ops to succeed */ 5250 ipolicer->pfvf_map[prof_idx] = 0x00; 5251 5252 /* There is no enable bit in the profile context, 5253 * so no context disable. So let's INIT them here 5254 * so that PF/VF later on have to just do WRITE to 5255 * setup policer rates and config. 5256 */ 5257 err = nix_init_policer_context(rvu, nix_hw, 5258 layer, prof_idx); 5259 if (err) 5260 return err; 5261 } 5262 5263 /* Allocate memory for maintaining ref_counts for MID level 5264 * profiles, this will be needed for leaf layer profiles' 5265 * aggregation. 5266 */ 5267 if (layer != BAND_PROF_MID_LAYER) 5268 continue; 5269 5270 ipolicer->ref_count = devm_kcalloc(rvu->dev, 5271 ipolicer->band_prof.max, 5272 sizeof(u16), GFP_KERNEL); 5273 if (!ipolicer->ref_count) 5274 return -ENOMEM; 5275 } 5276 5277 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ 5278 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); 5279 5280 nix_config_rx_pkt_policer_precolor(rvu, blkaddr); 5281 5282 return 0; 5283 } 5284 5285 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) 5286 { 5287 struct nix_ipolicer *ipolicer; 5288 int layer; 5289 5290 if (!rvu->hw->cap.ipolicer) 5291 return; 5292 5293 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5294 ipolicer = &nix_hw->ipolicer[layer]; 5295 5296 if (!ipolicer->band_prof.max) 5297 continue; 5298 5299 kfree(ipolicer->band_prof.bmap); 5300 } 5301 } 5302 5303 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 5304 struct nix_hw *nix_hw, u16 pcifunc) 5305 { 5306 struct nix_ipolicer *ipolicer; 5307 int layer, hi_layer, prof_idx; 5308 5309 /* Bits [15:14] in profile index represent layer */ 5310 layer = (req->qidx >> 14) & 0x03; 5311 prof_idx = req->qidx & 0x3FFF; 5312 5313 ipolicer = &nix_hw->ipolicer[layer]; 5314 if (prof_idx >= ipolicer->band_prof.max) 5315 return -EINVAL; 5316 5317 /* Check if the profile is allocated to the requesting PCIFUNC or not 5318 * with the exception of AF. AF is allowed to read and update contexts. 5319 */ 5320 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) 5321 return -EINVAL; 5322 5323 /* If this profile is linked to higher layer profile then check 5324 * if that profile is also allocated to the requesting PCIFUNC 5325 * or not. 5326 */ 5327 if (!req->prof.hl_en) 5328 return 0; 5329 5330 /* Leaf layer profile can link only to mid layer and 5331 * mid layer to top layer. 5332 */ 5333 if (layer == BAND_PROF_LEAF_LAYER) 5334 hi_layer = BAND_PROF_MID_LAYER; 5335 else if (layer == BAND_PROF_MID_LAYER) 5336 hi_layer = BAND_PROF_TOP_LAYER; 5337 else 5338 return -EINVAL; 5339 5340 ipolicer = &nix_hw->ipolicer[hi_layer]; 5341 prof_idx = req->prof.band_prof_id; 5342 if (prof_idx >= ipolicer->band_prof.max || 5343 ipolicer->pfvf_map[prof_idx] != pcifunc) 5344 return -EINVAL; 5345 5346 return 0; 5347 } 5348 5349 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, 5350 struct nix_bandprof_alloc_req *req, 5351 struct nix_bandprof_alloc_rsp *rsp) 5352 { 5353 int blkaddr, layer, prof, idx, err; 5354 u16 pcifunc = req->hdr.pcifunc; 5355 struct nix_ipolicer *ipolicer; 5356 struct nix_hw *nix_hw; 5357 5358 if (!rvu->hw->cap.ipolicer) 5359 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5360 5361 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5362 if (err) 5363 return err; 5364 5365 mutex_lock(&rvu->rsrc_lock); 5366 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5367 if (layer == BAND_PROF_INVAL_LAYER) 5368 continue; 5369 if (!req->prof_count[layer]) 5370 continue; 5371 5372 ipolicer = &nix_hw->ipolicer[layer]; 5373 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5374 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ 5375 if (idx == MAX_BANDPROF_PER_PFFUNC) 5376 break; 5377 5378 prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5379 if (prof < 0) 5380 break; 5381 rsp->prof_count[layer]++; 5382 rsp->prof_idx[layer][idx] = prof; 5383 ipolicer->pfvf_map[prof] = pcifunc; 5384 } 5385 } 5386 mutex_unlock(&rvu->rsrc_lock); 5387 return 0; 5388 } 5389 5390 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) 5391 { 5392 int blkaddr, layer, prof_idx, err; 5393 struct nix_ipolicer *ipolicer; 5394 struct nix_hw *nix_hw; 5395 5396 if (!rvu->hw->cap.ipolicer) 5397 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5398 5399 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5400 if (err) 5401 return err; 5402 5403 mutex_lock(&rvu->rsrc_lock); 5404 /* Free all the profiles allocated to the PCIFUNC */ 5405 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5406 if (layer == BAND_PROF_INVAL_LAYER) 5407 continue; 5408 ipolicer = &nix_hw->ipolicer[layer]; 5409 5410 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { 5411 if (ipolicer->pfvf_map[prof_idx] != pcifunc) 5412 continue; 5413 5414 /* Clear ratelimit aggregation, if any */ 5415 if (layer == BAND_PROF_LEAF_LAYER && 5416 ipolicer->match_id[prof_idx]) 5417 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5418 5419 ipolicer->pfvf_map[prof_idx] = 0x00; 5420 ipolicer->match_id[prof_idx] = 0; 5421 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5422 } 5423 } 5424 mutex_unlock(&rvu->rsrc_lock); 5425 return 0; 5426 } 5427 5428 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, 5429 struct nix_bandprof_free_req *req, 5430 struct msg_rsp *rsp) 5431 { 5432 int blkaddr, layer, prof_idx, idx, err; 5433 u16 pcifunc = req->hdr.pcifunc; 5434 struct nix_ipolicer *ipolicer; 5435 struct nix_hw *nix_hw; 5436 5437 if (req->free_all) 5438 return nix_free_all_bandprof(rvu, pcifunc); 5439 5440 if (!rvu->hw->cap.ipolicer) 5441 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5442 5443 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5444 if (err) 5445 return err; 5446 5447 mutex_lock(&rvu->rsrc_lock); 5448 /* Free the requested profile indices */ 5449 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5450 if (layer == BAND_PROF_INVAL_LAYER) 5451 continue; 5452 if (!req->prof_count[layer]) 5453 continue; 5454 5455 ipolicer = &nix_hw->ipolicer[layer]; 5456 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5457 if (idx == MAX_BANDPROF_PER_PFFUNC) 5458 break; 5459 prof_idx = req->prof_idx[layer][idx]; 5460 if (prof_idx >= ipolicer->band_prof.max || 5461 ipolicer->pfvf_map[prof_idx] != pcifunc) 5462 continue; 5463 5464 /* Clear ratelimit aggregation, if any */ 5465 if (layer == BAND_PROF_LEAF_LAYER && 5466 ipolicer->match_id[prof_idx]) 5467 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5468 5469 ipolicer->pfvf_map[prof_idx] = 0x00; 5470 ipolicer->match_id[prof_idx] = 0; 5471 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5472 } 5473 } 5474 mutex_unlock(&rvu->rsrc_lock); 5475 return 0; 5476 } 5477 5478 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, 5479 struct nix_cn10k_aq_enq_req *aq_req, 5480 struct nix_cn10k_aq_enq_rsp *aq_rsp, 5481 u16 pcifunc, u8 ctype, u32 qidx) 5482 { 5483 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5484 aq_req->hdr.pcifunc = pcifunc; 5485 aq_req->ctype = ctype; 5486 aq_req->op = NIX_AQ_INSTOP_READ; 5487 aq_req->qidx = qidx; 5488 5489 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5490 (struct nix_aq_enq_req *)aq_req, 5491 (struct nix_aq_enq_rsp *)aq_rsp); 5492 } 5493 5494 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, 5495 struct nix_hw *nix_hw, 5496 struct nix_cn10k_aq_enq_req *aq_req, 5497 struct nix_cn10k_aq_enq_rsp *aq_rsp, 5498 u32 leaf_prof, u16 mid_prof) 5499 { 5500 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5501 aq_req->hdr.pcifunc = 0x00; 5502 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; 5503 aq_req->op = NIX_AQ_INSTOP_WRITE; 5504 aq_req->qidx = leaf_prof; 5505 5506 aq_req->prof.band_prof_id = mid_prof; 5507 aq_req->prof_mask.band_prof_id = GENMASK(6, 0); 5508 aq_req->prof.hl_en = 1; 5509 aq_req->prof_mask.hl_en = 1; 5510 5511 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5512 (struct nix_aq_enq_req *)aq_req, 5513 (struct nix_aq_enq_rsp *)aq_rsp); 5514 } 5515 5516 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, 5517 u16 rq_idx, u16 match_id) 5518 { 5519 int leaf_prof, mid_prof, leaf_match; 5520 struct nix_cn10k_aq_enq_req aq_req; 5521 struct nix_cn10k_aq_enq_rsp aq_rsp; 5522 struct nix_ipolicer *ipolicer; 5523 struct nix_hw *nix_hw; 5524 int blkaddr, idx, rc; 5525 5526 if (!rvu->hw->cap.ipolicer) 5527 return 0; 5528 5529 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5530 if (rc) 5531 return rc; 5532 5533 /* Fetch the RQ's context to see if policing is enabled */ 5534 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, 5535 NIX_AQ_CTYPE_RQ, rq_idx); 5536 if (rc) { 5537 dev_err(rvu->dev, 5538 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", 5539 __func__, rq_idx, pcifunc); 5540 return rc; 5541 } 5542 5543 if (!aq_rsp.rq.policer_ena) 5544 return 0; 5545 5546 /* Get the bandwidth profile ID mapped to this RQ */ 5547 leaf_prof = aq_rsp.rq.band_prof_id; 5548 5549 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; 5550 ipolicer->match_id[leaf_prof] = match_id; 5551 5552 /* Check if any other leaf profile is marked with same match_id */ 5553 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 5554 if (idx == leaf_prof) 5555 continue; 5556 if (ipolicer->match_id[idx] != match_id) 5557 continue; 5558 5559 leaf_match = idx; 5560 break; 5561 } 5562 5563 if (idx == ipolicer->band_prof.max) 5564 return 0; 5565 5566 /* Fetch the matching profile's context to check if it's already 5567 * mapped to a mid level profile. 5568 */ 5569 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5570 NIX_AQ_CTYPE_BANDPROF, leaf_match); 5571 if (rc) { 5572 dev_err(rvu->dev, 5573 "%s: Failed to fetch context of leaf profile %d\n", 5574 __func__, leaf_match); 5575 return rc; 5576 } 5577 5578 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5579 if (aq_rsp.prof.hl_en) { 5580 /* Get Mid layer prof index and map leaf_prof index 5581 * also such that flows that are being steered 5582 * to different RQs and marked with same match_id 5583 * are rate limited in a aggregate fashion 5584 */ 5585 mid_prof = aq_rsp.prof.band_prof_id; 5586 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5587 &aq_req, &aq_rsp, 5588 leaf_prof, mid_prof); 5589 if (rc) { 5590 dev_err(rvu->dev, 5591 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5592 __func__, leaf_prof, mid_prof); 5593 goto exit; 5594 } 5595 5596 mutex_lock(&rvu->rsrc_lock); 5597 ipolicer->ref_count[mid_prof]++; 5598 mutex_unlock(&rvu->rsrc_lock); 5599 goto exit; 5600 } 5601 5602 /* Allocate a mid layer profile and 5603 * map both 'leaf_prof' and 'leaf_match' profiles to it. 5604 */ 5605 mutex_lock(&rvu->rsrc_lock); 5606 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5607 if (mid_prof < 0) { 5608 dev_err(rvu->dev, 5609 "%s: Unable to allocate mid layer profile\n", __func__); 5610 mutex_unlock(&rvu->rsrc_lock); 5611 goto exit; 5612 } 5613 mutex_unlock(&rvu->rsrc_lock); 5614 ipolicer->pfvf_map[mid_prof] = 0x00; 5615 ipolicer->ref_count[mid_prof] = 0; 5616 5617 /* Initialize mid layer profile same as 'leaf_prof' */ 5618 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5619 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 5620 if (rc) { 5621 dev_err(rvu->dev, 5622 "%s: Failed to fetch context of leaf profile %d\n", 5623 __func__, leaf_prof); 5624 goto exit; 5625 } 5626 5627 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5628 aq_req.hdr.pcifunc = 0x00; 5629 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); 5630 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5631 aq_req.op = NIX_AQ_INSTOP_WRITE; 5632 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); 5633 memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s)); 5634 /* Clear higher layer enable bit in the mid profile, just in case */ 5635 aq_req.prof.hl_en = 0; 5636 aq_req.prof_mask.hl_en = 1; 5637 5638 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5639 (struct nix_aq_enq_req *)&aq_req, NULL); 5640 if (rc) { 5641 dev_err(rvu->dev, 5642 "%s: Failed to INIT context of mid layer profile %d\n", 5643 __func__, mid_prof); 5644 goto exit; 5645 } 5646 5647 /* Map both leaf profiles to this mid layer profile */ 5648 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5649 &aq_req, &aq_rsp, 5650 leaf_prof, mid_prof); 5651 if (rc) { 5652 dev_err(rvu->dev, 5653 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5654 __func__, leaf_prof, mid_prof); 5655 goto exit; 5656 } 5657 5658 mutex_lock(&rvu->rsrc_lock); 5659 ipolicer->ref_count[mid_prof]++; 5660 mutex_unlock(&rvu->rsrc_lock); 5661 5662 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5663 &aq_req, &aq_rsp, 5664 leaf_match, mid_prof); 5665 if (rc) { 5666 dev_err(rvu->dev, 5667 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5668 __func__, leaf_match, mid_prof); 5669 ipolicer->ref_count[mid_prof]--; 5670 goto exit; 5671 } 5672 5673 mutex_lock(&rvu->rsrc_lock); 5674 ipolicer->ref_count[mid_prof]++; 5675 mutex_unlock(&rvu->rsrc_lock); 5676 5677 exit: 5678 return rc; 5679 } 5680 5681 /* Called with mutex rsrc_lock */ 5682 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 5683 u32 leaf_prof) 5684 { 5685 struct nix_cn10k_aq_enq_req aq_req; 5686 struct nix_cn10k_aq_enq_rsp aq_rsp; 5687 struct nix_ipolicer *ipolicer; 5688 u16 mid_prof; 5689 int rc; 5690 5691 mutex_unlock(&rvu->rsrc_lock); 5692 5693 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5694 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 5695 5696 mutex_lock(&rvu->rsrc_lock); 5697 if (rc) { 5698 dev_err(rvu->dev, 5699 "%s: Failed to fetch context of leaf profile %d\n", 5700 __func__, leaf_prof); 5701 return; 5702 } 5703 5704 if (!aq_rsp.prof.hl_en) 5705 return; 5706 5707 mid_prof = aq_rsp.prof.band_prof_id; 5708 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5709 ipolicer->ref_count[mid_prof]--; 5710 /* If ref_count is zero, free mid layer profile */ 5711 if (!ipolicer->ref_count[mid_prof]) { 5712 ipolicer->pfvf_map[mid_prof] = 0x00; 5713 rvu_free_rsrc(&ipolicer->band_prof, mid_prof); 5714 } 5715 } 5716 5717 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, 5718 struct nix_bandprof_get_hwinfo_rsp *rsp) 5719 { 5720 struct nix_ipolicer *ipolicer; 5721 int blkaddr, layer, err; 5722 struct nix_hw *nix_hw; 5723 u64 tu; 5724 5725 if (!rvu->hw->cap.ipolicer) 5726 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5727 5728 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 5729 if (err) 5730 return err; 5731 5732 /* Return number of bandwidth profiles free at each layer */ 5733 mutex_lock(&rvu->rsrc_lock); 5734 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5735 if (layer == BAND_PROF_INVAL_LAYER) 5736 continue; 5737 5738 ipolicer = &nix_hw->ipolicer[layer]; 5739 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); 5740 } 5741 mutex_unlock(&rvu->rsrc_lock); 5742 5743 /* Set the policer timeunit in nanosec */ 5744 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); 5745 rsp->policer_timeunit = (tu + 1) * 100; 5746 5747 return 0; 5748 } 5749