1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 11 #include "rvu_struct.h" 12 #include "rvu_reg.h" 13 #include "rvu.h" 14 #include "npc.h" 15 #include "mcs.h" 16 #include "cgx.h" 17 #include "lmac_common.h" 18 #include "rvu_npc_hash.h" 19 20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 22 int type, int chan_id); 23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 24 int type, bool add); 25 static int nix_setup_ipolicers(struct rvu *rvu, 26 struct nix_hw *nix_hw, int blkaddr); 27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); 28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 29 struct nix_hw *nix_hw, u16 pcifunc); 30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); 31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 32 u32 leaf_prof); 33 static const char *nix_get_ctx_name(int ctype); 34 35 enum mc_tbl_sz { 36 MC_TBL_SZ_256, 37 MC_TBL_SZ_512, 38 MC_TBL_SZ_1K, 39 MC_TBL_SZ_2K, 40 MC_TBL_SZ_4K, 41 MC_TBL_SZ_8K, 42 MC_TBL_SZ_16K, 43 MC_TBL_SZ_32K, 44 MC_TBL_SZ_64K, 45 }; 46 47 enum mc_buf_cnt { 48 MC_BUF_CNT_8, 49 MC_BUF_CNT_16, 50 MC_BUF_CNT_32, 51 MC_BUF_CNT_64, 52 MC_BUF_CNT_128, 53 MC_BUF_CNT_256, 54 MC_BUF_CNT_512, 55 MC_BUF_CNT_1024, 56 MC_BUF_CNT_2048, 57 }; 58 59 enum nix_makr_fmt_indexes { 60 NIX_MARK_CFG_IP_DSCP_RED, 61 NIX_MARK_CFG_IP_DSCP_YELLOW, 62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 63 NIX_MARK_CFG_IP_ECN_RED, 64 NIX_MARK_CFG_IP_ECN_YELLOW, 65 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 66 NIX_MARK_CFG_VLAN_DEI_RED, 67 NIX_MARK_CFG_VLAN_DEI_YELLOW, 68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 69 NIX_MARK_CFG_MAX, 70 }; 71 72 /* For now considering MC resources needed for broadcast 73 * pkt replication only. i.e 256 HWVFs + 12 PFs. 74 */ 75 #define MC_TBL_SIZE MC_TBL_SZ_512 76 #define MC_BUF_CNT MC_BUF_CNT_128 77 78 struct mce { 79 struct hlist_node node; 80 u16 pcifunc; 81 }; 82 83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 84 { 85 int i = 0; 86 87 /*If blkaddr is 0, return the first nix block address*/ 88 if (blkaddr == 0) 89 return rvu->nix_blkaddr[blkaddr]; 90 91 while (i + 1 < MAX_NIX_BLKS) { 92 if (rvu->nix_blkaddr[i] == blkaddr) 93 return rvu->nix_blkaddr[i + 1]; 94 i++; 95 } 96 97 return 0; 98 } 99 100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 101 { 102 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 103 int blkaddr; 104 105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 106 if (!pfvf->nixlf || blkaddr < 0) 107 return false; 108 return true; 109 } 110 111 int rvu_get_nixlf_count(struct rvu *rvu) 112 { 113 int blkaddr = 0, max = 0; 114 struct rvu_block *block; 115 116 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 117 while (blkaddr) { 118 block = &rvu->hw->block[blkaddr]; 119 max += block->lf.max; 120 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 121 } 122 return max; 123 } 124 125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 126 { 127 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 128 struct rvu_hwinfo *hw = rvu->hw; 129 int blkaddr; 130 131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 132 if (!pfvf->nixlf || blkaddr < 0) 133 return NIX_AF_ERR_AF_LF_INVALID; 134 135 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 136 if (*nixlf < 0) 137 return NIX_AF_ERR_AF_LF_INVALID; 138 139 if (nix_blkaddr) 140 *nix_blkaddr = blkaddr; 141 142 return 0; 143 } 144 145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, 146 struct nix_hw **nix_hw, int *blkaddr) 147 { 148 struct rvu_pfvf *pfvf; 149 150 pfvf = rvu_get_pfvf(rvu, pcifunc); 151 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 152 if (!pfvf->nixlf || *blkaddr < 0) 153 return NIX_AF_ERR_AF_LF_INVALID; 154 155 *nix_hw = get_nix_hw(rvu->hw, *blkaddr); 156 if (!*nix_hw) 157 return NIX_AF_ERR_INVALID_NIXBLK; 158 return 0; 159 } 160 161 static void nix_mce_list_init(struct nix_mce_list *list, int max) 162 { 163 INIT_HLIST_HEAD(&list->head); 164 list->count = 0; 165 list->max = max; 166 } 167 168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) 169 { 170 int idx; 171 172 if (!mcast) 173 return 0; 174 175 idx = mcast->next_free_mce; 176 mcast->next_free_mce += count; 177 return idx; 178 } 179 180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 181 { 182 int nix_blkaddr = 0, i = 0; 183 struct rvu *rvu = hw->rvu; 184 185 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 186 while (nix_blkaddr) { 187 if (blkaddr == nix_blkaddr && hw->nix) 188 return &hw->nix[i]; 189 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 190 i++; 191 } 192 return NULL; 193 } 194 195 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type) 196 { 197 if (hw->cap.nix_multiple_dwrr_mtu) 198 return NIX_AF_DWRR_MTUX(smq_link_type); 199 200 if (smq_link_type == SMQ_LINK_TYPE_SDP) 201 return NIX_AF_DWRR_SDP_MTU; 202 203 /* Here it's same reg for RPM and LBK */ 204 return NIX_AF_DWRR_RPM_MTU; 205 } 206 207 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) 208 { 209 dwrr_mtu &= 0x1FULL; 210 211 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 212 * Value of 4 is reserved for MTU value of 9728 bytes. 213 * Value of 5 is reserved for MTU value of 10240 bytes. 214 */ 215 switch (dwrr_mtu) { 216 case 4: 217 return 9728; 218 case 5: 219 return 10240; 220 default: 221 return BIT_ULL(dwrr_mtu); 222 } 223 224 return 0; 225 } 226 227 u32 convert_bytes_to_dwrr_mtu(u32 bytes) 228 { 229 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 230 * Value of 4 is reserved for MTU value of 9728 bytes. 231 * Value of 5 is reserved for MTU value of 10240 bytes. 232 */ 233 if (bytes > BIT_ULL(16)) 234 return 0; 235 236 switch (bytes) { 237 case 9728: 238 return 4; 239 case 10240: 240 return 5; 241 default: 242 return ilog2(bytes); 243 } 244 245 return 0; 246 } 247 248 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 249 { 250 int err; 251 252 /* Sync all in flight RX packets to LLC/DRAM */ 253 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 254 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 255 if (err) 256 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); 257 258 /* SW_SYNC ensures all existing transactions are finished and pkts 259 * are written to LLC/DRAM, queues should be teared down after 260 * successful SW_SYNC. Due to a HW errata, in some rare scenarios 261 * an existing transaction might end after SW_SYNC operation. To 262 * ensure operation is fully done, do the SW_SYNC twice. 263 */ 264 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 265 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 266 if (err) 267 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); 268 } 269 270 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 271 int lvl, u16 pcifunc, u16 schq) 272 { 273 struct rvu_hwinfo *hw = rvu->hw; 274 struct nix_txsch *txsch; 275 struct nix_hw *nix_hw; 276 u16 map_func; 277 278 nix_hw = get_nix_hw(rvu->hw, blkaddr); 279 if (!nix_hw) 280 return false; 281 282 txsch = &nix_hw->txsch[lvl]; 283 /* Check out of bounds */ 284 if (schq >= txsch->schq.max) 285 return false; 286 287 mutex_lock(&rvu->rsrc_lock); 288 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 289 mutex_unlock(&rvu->rsrc_lock); 290 291 /* TLs aggegating traffic are shared across PF and VFs */ 292 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 293 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 294 return false; 295 else 296 return true; 297 } 298 299 if (map_func != pcifunc) 300 return false; 301 302 return true; 303 } 304 305 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, 306 struct nix_lf_alloc_rsp *rsp, bool loop) 307 { 308 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); 309 u16 req_chan_base, req_chan_end, req_chan_cnt; 310 struct rvu_hwinfo *hw = rvu->hw; 311 struct sdp_node_info *sdp_info; 312 int pkind, pf, vf, lbkid, vfid; 313 u8 cgx_id, lmac_id; 314 bool from_vf; 315 int err; 316 317 pf = rvu_get_pf(pcifunc); 318 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 319 type != NIX_INTF_TYPE_SDP) 320 return 0; 321 322 switch (type) { 323 case NIX_INTF_TYPE_CGX: 324 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 325 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 326 327 pkind = rvu_npc_get_pkind(rvu, pf); 328 if (pkind < 0) { 329 dev_err(rvu->dev, 330 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 331 return -EINVAL; 332 } 333 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 334 pfvf->tx_chan_base = pfvf->rx_chan_base; 335 pfvf->rx_chan_cnt = 1; 336 pfvf->tx_chan_cnt = 1; 337 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; 338 339 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 340 rvu_npc_set_pkind(rvu, pkind, pfvf); 341 342 break; 343 case NIX_INTF_TYPE_LBK: 344 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 345 346 /* If NIX1 block is present on the silicon then NIXes are 347 * assigned alternatively for lbk interfaces. NIX0 should 348 * send packets on lbk link 1 channels and NIX1 should send 349 * on lbk link 0 channels for the communication between 350 * NIX0 and NIX1. 351 */ 352 lbkid = 0; 353 if (rvu->hw->lbk_links > 1) 354 lbkid = vf & 0x1 ? 0 : 1; 355 356 /* By default NIX0 is configured to send packet on lbk link 1 357 * (which corresponds to LBK1), same packet will receive on 358 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 359 * (which corresponds to LBK2) packet will receive on NIX0 lbk 360 * link 1. 361 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 362 * transmits and receives on lbk link 0, whick corresponds 363 * to LBK1 block, back to back connectivity between NIX and 364 * LBK can be achieved (which is similar to 96xx) 365 * 366 * RX TX 367 * NIX0 lbk link 1 (LBK2) 1 (LBK1) 368 * NIX0 lbk link 0 (LBK0) 0 (LBK0) 369 * NIX1 lbk link 0 (LBK1) 0 (LBK2) 370 * NIX1 lbk link 1 (LBK3) 1 (LBK3) 371 */ 372 if (loop) 373 lbkid = !lbkid; 374 375 /* Note that AF's VFs work in pairs and talk over consecutive 376 * loopback channels.Therefore if odd number of AF VFs are 377 * enabled then the last VF remains with no pair. 378 */ 379 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 380 pfvf->tx_chan_base = vf & 0x1 ? 381 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 382 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 383 pfvf->rx_chan_cnt = 1; 384 pfvf->tx_chan_cnt = 1; 385 rsp->tx_link = hw->cgx_links + lbkid; 386 pfvf->lbkid = lbkid; 387 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); 388 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 389 pfvf->rx_chan_base, 390 pfvf->rx_chan_cnt); 391 392 break; 393 case NIX_INTF_TYPE_SDP: 394 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 395 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; 396 sdp_info = parent_pf->sdp_info; 397 if (!sdp_info) { 398 dev_err(rvu->dev, "Invalid sdp_info pointer\n"); 399 return -EINVAL; 400 } 401 if (from_vf) { 402 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + 403 sdp_info->num_pf_rings; 404 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 405 for (vfid = 0; vfid < vf; vfid++) 406 req_chan_base += sdp_info->vf_rings[vfid]; 407 req_chan_cnt = sdp_info->vf_rings[vf]; 408 req_chan_end = req_chan_base + req_chan_cnt - 1; 409 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || 410 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { 411 dev_err(rvu->dev, 412 "PF_Func 0x%x: Invalid channel base and count\n", 413 pcifunc); 414 return -EINVAL; 415 } 416 } else { 417 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; 418 req_chan_cnt = sdp_info->num_pf_rings; 419 } 420 421 pfvf->rx_chan_base = req_chan_base; 422 pfvf->rx_chan_cnt = req_chan_cnt; 423 pfvf->tx_chan_base = pfvf->rx_chan_base; 424 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; 425 426 rsp->tx_link = hw->cgx_links + hw->lbk_links; 427 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 428 pfvf->rx_chan_base, 429 pfvf->rx_chan_cnt); 430 break; 431 } 432 433 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 434 * RVU PF/VF's MAC address. 435 */ 436 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 437 pfvf->rx_chan_base, pfvf->mac_addr); 438 439 /* Add this PF_FUNC to bcast pkt replication list */ 440 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); 441 if (err) { 442 dev_err(rvu->dev, 443 "Bcast list, failed to enable PF_FUNC 0x%x\n", 444 pcifunc); 445 return err; 446 } 447 /* Install MCAM rule matching Ethernet broadcast mac address */ 448 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 449 nixlf, pfvf->rx_chan_base); 450 451 pfvf->maxlen = NIC_HW_MIN_FRS; 452 pfvf->minlen = NIC_HW_MIN_FRS; 453 454 return 0; 455 } 456 457 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 458 { 459 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 460 int err; 461 462 pfvf->maxlen = 0; 463 pfvf->minlen = 0; 464 465 /* Remove this PF_FUNC from bcast pkt replication list */ 466 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); 467 if (err) { 468 dev_err(rvu->dev, 469 "Bcast list, failed to disable PF_FUNC 0x%x\n", 470 pcifunc); 471 } 472 473 /* Free and disable any MCAM entries used by this NIX LF */ 474 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 475 476 /* Disable DMAC filters used */ 477 rvu_cgx_disable_dmac_entries(rvu, pcifunc); 478 } 479 480 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 481 struct nix_bp_cfg_req *req, 482 struct msg_rsp *rsp) 483 { 484 u16 pcifunc = req->hdr.pcifunc; 485 struct rvu_pfvf *pfvf; 486 int blkaddr, pf, type; 487 u16 chan_base, chan; 488 u64 cfg; 489 490 pf = rvu_get_pf(pcifunc); 491 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 492 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 493 return 0; 494 495 pfvf = rvu_get_pfvf(rvu, pcifunc); 496 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 497 498 chan_base = pfvf->rx_chan_base + req->chan_base; 499 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 500 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 501 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 502 cfg & ~BIT_ULL(16)); 503 } 504 return 0; 505 } 506 507 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 508 int type, int chan_id) 509 { 510 int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; 511 u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt; 512 struct rvu_hwinfo *hw = rvu->hw; 513 struct rvu_pfvf *pfvf; 514 u8 cgx_id, lmac_id; 515 u64 cfg; 516 517 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 518 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 519 lmac_chan_cnt = cfg & 0xFF; 520 521 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; 522 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); 523 524 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 525 sdp_chan_cnt = cfg & 0xFFF; 526 sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; 527 528 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 529 530 /* Backpressure IDs range division 531 * CGX channles are mapped to (0 - 191) BPIDs 532 * LBK channles are mapped to (192 - 255) BPIDs 533 * SDP channles are mapped to (256 - 511) BPIDs 534 * 535 * Lmac channles and bpids mapped as follows 536 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 537 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 538 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 539 */ 540 switch (type) { 541 case NIX_INTF_TYPE_CGX: 542 if ((req->chan_base + req->chan_cnt) > 16) 543 return -EINVAL; 544 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 545 /* Assign bpid based on cgx, lmac and chan id */ 546 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + 547 (lmac_id * lmac_chan_cnt) + req->chan_base; 548 549 if (req->bpid_per_chan) 550 bpid += chan_id; 551 if (bpid > cgx_bpid_cnt) 552 return -EINVAL; 553 break; 554 555 case NIX_INTF_TYPE_LBK: 556 if ((req->chan_base + req->chan_cnt) > 63) 557 return -EINVAL; 558 bpid = cgx_bpid_cnt + req->chan_base; 559 if (req->bpid_per_chan) 560 bpid += chan_id; 561 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) 562 return -EINVAL; 563 break; 564 case NIX_INTF_TYPE_SDP: 565 if ((req->chan_base + req->chan_cnt) > 255) 566 return -EINVAL; 567 568 bpid = sdp_bpid_cnt + req->chan_base; 569 if (req->bpid_per_chan) 570 bpid += chan_id; 571 572 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) 573 return -EINVAL; 574 break; 575 default: 576 return -EINVAL; 577 } 578 return bpid; 579 } 580 581 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 582 struct nix_bp_cfg_req *req, 583 struct nix_bp_cfg_rsp *rsp) 584 { 585 int blkaddr, pf, type, chan_id = 0; 586 u16 pcifunc = req->hdr.pcifunc; 587 struct rvu_pfvf *pfvf; 588 u16 chan_base, chan; 589 s16 bpid, bpid_base; 590 u64 cfg; 591 592 pf = rvu_get_pf(pcifunc); 593 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 594 if (is_sdp_pfvf(pcifunc)) 595 type = NIX_INTF_TYPE_SDP; 596 597 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ 598 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 599 type != NIX_INTF_TYPE_SDP) 600 return 0; 601 602 pfvf = rvu_get_pfvf(rvu, pcifunc); 603 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 604 605 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 606 chan_base = pfvf->rx_chan_base + req->chan_base; 607 bpid = bpid_base; 608 609 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 610 if (bpid < 0) { 611 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 612 return -EINVAL; 613 } 614 615 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 616 cfg &= ~GENMASK_ULL(8, 0); 617 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 618 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); 619 chan_id++; 620 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 621 } 622 623 for (chan = 0; chan < req->chan_cnt; chan++) { 624 /* Map channel and bpid assign to it */ 625 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 626 (bpid_base & 0x3FF); 627 if (req->bpid_per_chan) 628 bpid_base++; 629 } 630 rsp->chan_cnt = req->chan_cnt; 631 632 return 0; 633 } 634 635 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 636 u64 format, bool v4, u64 *fidx) 637 { 638 struct nix_lso_format field = {0}; 639 640 /* IP's Length field */ 641 field.layer = NIX_TXLAYER_OL3; 642 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 643 field.offset = v4 ? 2 : 4; 644 field.sizem1 = 1; /* i.e 2 bytes */ 645 field.alg = NIX_LSOALG_ADD_PAYLEN; 646 rvu_write64(rvu, blkaddr, 647 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 648 *(u64 *)&field); 649 650 /* No ID field in IPv6 header */ 651 if (!v4) 652 return; 653 654 /* IP's ID field */ 655 field.layer = NIX_TXLAYER_OL3; 656 field.offset = 4; 657 field.sizem1 = 1; /* i.e 2 bytes */ 658 field.alg = NIX_LSOALG_ADD_SEGNUM; 659 rvu_write64(rvu, blkaddr, 660 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 661 *(u64 *)&field); 662 } 663 664 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 665 u64 format, u64 *fidx) 666 { 667 struct nix_lso_format field = {0}; 668 669 /* TCP's sequence number field */ 670 field.layer = NIX_TXLAYER_OL4; 671 field.offset = 4; 672 field.sizem1 = 3; /* i.e 4 bytes */ 673 field.alg = NIX_LSOALG_ADD_OFFSET; 674 rvu_write64(rvu, blkaddr, 675 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 676 *(u64 *)&field); 677 678 /* TCP's flags field */ 679 field.layer = NIX_TXLAYER_OL4; 680 field.offset = 12; 681 field.sizem1 = 1; /* 2 bytes */ 682 field.alg = NIX_LSOALG_TCP_FLAGS; 683 rvu_write64(rvu, blkaddr, 684 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 685 *(u64 *)&field); 686 } 687 688 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 689 { 690 u64 cfg, idx, fidx = 0; 691 692 /* Get max HW supported format indices */ 693 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 694 nix_hw->lso.total = cfg; 695 696 /* Enable LSO */ 697 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 698 /* For TSO, set first and middle segment flags to 699 * mask out PSH, RST & FIN flags in TCP packet 700 */ 701 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 702 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 703 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 704 705 /* Setup default static LSO formats 706 * 707 * Configure format fields for TCPv4 segmentation offload 708 */ 709 idx = NIX_LSO_FORMAT_IDX_TSOV4; 710 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 711 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 712 713 /* Set rest of the fields to NOP */ 714 for (; fidx < 8; fidx++) { 715 rvu_write64(rvu, blkaddr, 716 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 717 } 718 nix_hw->lso.in_use++; 719 720 /* Configure format fields for TCPv6 segmentation offload */ 721 idx = NIX_LSO_FORMAT_IDX_TSOV6; 722 fidx = 0; 723 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 724 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 725 726 /* Set rest of the fields to NOP */ 727 for (; fidx < 8; fidx++) { 728 rvu_write64(rvu, blkaddr, 729 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 730 } 731 nix_hw->lso.in_use++; 732 } 733 734 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 735 { 736 kfree(pfvf->rq_bmap); 737 kfree(pfvf->sq_bmap); 738 kfree(pfvf->cq_bmap); 739 if (pfvf->rq_ctx) 740 qmem_free(rvu->dev, pfvf->rq_ctx); 741 if (pfvf->sq_ctx) 742 qmem_free(rvu->dev, pfvf->sq_ctx); 743 if (pfvf->cq_ctx) 744 qmem_free(rvu->dev, pfvf->cq_ctx); 745 if (pfvf->rss_ctx) 746 qmem_free(rvu->dev, pfvf->rss_ctx); 747 if (pfvf->nix_qints_ctx) 748 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 749 if (pfvf->cq_ints_ctx) 750 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 751 752 pfvf->rq_bmap = NULL; 753 pfvf->cq_bmap = NULL; 754 pfvf->sq_bmap = NULL; 755 pfvf->rq_ctx = NULL; 756 pfvf->sq_ctx = NULL; 757 pfvf->cq_ctx = NULL; 758 pfvf->rss_ctx = NULL; 759 pfvf->nix_qints_ctx = NULL; 760 pfvf->cq_ints_ctx = NULL; 761 } 762 763 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 764 struct rvu_pfvf *pfvf, int nixlf, 765 int rss_sz, int rss_grps, int hwctx_size, 766 u64 way_mask, bool tag_lsb_as_adder) 767 { 768 int err, grp, num_indices; 769 u64 val; 770 771 /* RSS is not requested for this NIXLF */ 772 if (!rss_sz) 773 return 0; 774 num_indices = rss_sz * rss_grps; 775 776 /* Alloc NIX RSS HW context memory and config the base */ 777 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 778 if (err) 779 return err; 780 781 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 782 (u64)pfvf->rss_ctx->iova); 783 784 /* Config full RSS table size, enable RSS and caching */ 785 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | 786 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); 787 788 if (tag_lsb_as_adder) 789 val |= BIT_ULL(5); 790 791 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); 792 /* Config RSS group offset and sizes */ 793 for (grp = 0; grp < rss_grps; grp++) 794 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 795 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 796 return 0; 797 } 798 799 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 800 struct nix_aq_inst_s *inst) 801 { 802 struct admin_queue *aq = block->aq; 803 struct nix_aq_res_s *result; 804 int timeout = 1000; 805 u64 reg, head; 806 int ret; 807 808 result = (struct nix_aq_res_s *)aq->res->base; 809 810 /* Get current head pointer where to append this instruction */ 811 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 812 head = (reg >> 4) & AQ_PTR_MASK; 813 814 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 815 (void *)inst, aq->inst->entry_sz); 816 memset(result, 0, sizeof(*result)); 817 /* sync into memory */ 818 wmb(); 819 820 /* Ring the doorbell and wait for result */ 821 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 822 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 823 cpu_relax(); 824 udelay(1); 825 timeout--; 826 if (!timeout) 827 return -EBUSY; 828 } 829 830 if (result->compcode != NIX_AQ_COMP_GOOD) { 831 /* TODO: Replace this with some error code */ 832 if (result->compcode == NIX_AQ_COMP_CTX_FAULT || 833 result->compcode == NIX_AQ_COMP_LOCKERR || 834 result->compcode == NIX_AQ_COMP_CTX_POISON) { 835 ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX); 836 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX); 837 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX); 838 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX); 839 if (ret) 840 dev_err(rvu->dev, 841 "%s: Not able to unlock cachelines\n", __func__); 842 } 843 844 return -EBUSY; 845 } 846 847 return 0; 848 } 849 850 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, 851 u16 *smq, u16 *smq_mask) 852 { 853 struct nix_cn10k_aq_enq_req *aq_req; 854 855 if (!is_rvu_otx2(rvu)) { 856 aq_req = (struct nix_cn10k_aq_enq_req *)req; 857 *smq = aq_req->sq.smq; 858 *smq_mask = aq_req->sq_mask.smq; 859 } else { 860 *smq = req->sq.smq; 861 *smq_mask = req->sq_mask.smq; 862 } 863 } 864 865 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 866 struct nix_aq_enq_req *req, 867 struct nix_aq_enq_rsp *rsp) 868 { 869 struct rvu_hwinfo *hw = rvu->hw; 870 u16 pcifunc = req->hdr.pcifunc; 871 int nixlf, blkaddr, rc = 0; 872 struct nix_aq_inst_s inst; 873 struct rvu_block *block; 874 struct admin_queue *aq; 875 struct rvu_pfvf *pfvf; 876 u16 smq, smq_mask; 877 void *ctx, *mask; 878 bool ena; 879 u64 cfg; 880 881 blkaddr = nix_hw->blkaddr; 882 block = &hw->block[blkaddr]; 883 aq = block->aq; 884 if (!aq) { 885 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 886 return NIX_AF_ERR_AQ_ENQUEUE; 887 } 888 889 pfvf = rvu_get_pfvf(rvu, pcifunc); 890 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 891 892 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile 893 * operations done by AF itself. 894 */ 895 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || 896 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { 897 if (!pfvf->nixlf || nixlf < 0) 898 return NIX_AF_ERR_AF_LF_INVALID; 899 } 900 901 switch (req->ctype) { 902 case NIX_AQ_CTYPE_RQ: 903 /* Check if index exceeds max no of queues */ 904 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 905 rc = NIX_AF_ERR_AQ_ENQUEUE; 906 break; 907 case NIX_AQ_CTYPE_SQ: 908 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 909 rc = NIX_AF_ERR_AQ_ENQUEUE; 910 break; 911 case NIX_AQ_CTYPE_CQ: 912 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 913 rc = NIX_AF_ERR_AQ_ENQUEUE; 914 break; 915 case NIX_AQ_CTYPE_RSS: 916 /* Check if RSS is enabled and qidx is within range */ 917 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 918 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 919 (req->qidx >= (256UL << (cfg & 0xF)))) 920 rc = NIX_AF_ERR_AQ_ENQUEUE; 921 break; 922 case NIX_AQ_CTYPE_MCE: 923 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 924 925 /* Check if index exceeds MCE list length */ 926 if (!nix_hw->mcast.mce_ctx || 927 (req->qidx >= (256UL << (cfg & 0xF)))) 928 rc = NIX_AF_ERR_AQ_ENQUEUE; 929 930 /* Adding multicast lists for requests from PF/VFs is not 931 * yet supported, so ignore this. 932 */ 933 if (rsp) 934 rc = NIX_AF_ERR_AQ_ENQUEUE; 935 break; 936 case NIX_AQ_CTYPE_BANDPROF: 937 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, 938 nix_hw, pcifunc)) 939 rc = NIX_AF_ERR_INVALID_BANDPROF; 940 break; 941 default: 942 rc = NIX_AF_ERR_AQ_ENQUEUE; 943 } 944 945 if (rc) 946 return rc; 947 948 nix_get_aq_req_smq(rvu, req, &smq, &smq_mask); 949 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 950 if (req->ctype == NIX_AQ_CTYPE_SQ && 951 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 952 (req->op == NIX_AQ_INSTOP_WRITE && 953 req->sq_mask.ena && req->sq.ena && smq_mask))) { 954 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 955 pcifunc, smq)) 956 return NIX_AF_ERR_AQ_ENQUEUE; 957 } 958 959 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 960 inst.lf = nixlf; 961 inst.cindex = req->qidx; 962 inst.ctype = req->ctype; 963 inst.op = req->op; 964 /* Currently we are not supporting enqueuing multiple instructions, 965 * so always choose first entry in result memory. 966 */ 967 inst.res_addr = (u64)aq->res->iova; 968 969 /* Hardware uses same aq->res->base for updating result of 970 * previous instruction hence wait here till it is done. 971 */ 972 spin_lock(&aq->lock); 973 974 /* Clean result + context memory */ 975 memset(aq->res->base, 0, aq->res->entry_sz); 976 /* Context needs to be written at RES_ADDR + 128 */ 977 ctx = aq->res->base + 128; 978 /* Mask needs to be written at RES_ADDR + 256 */ 979 mask = aq->res->base + 256; 980 981 switch (req->op) { 982 case NIX_AQ_INSTOP_WRITE: 983 if (req->ctype == NIX_AQ_CTYPE_RQ) 984 memcpy(mask, &req->rq_mask, 985 sizeof(struct nix_rq_ctx_s)); 986 else if (req->ctype == NIX_AQ_CTYPE_SQ) 987 memcpy(mask, &req->sq_mask, 988 sizeof(struct nix_sq_ctx_s)); 989 else if (req->ctype == NIX_AQ_CTYPE_CQ) 990 memcpy(mask, &req->cq_mask, 991 sizeof(struct nix_cq_ctx_s)); 992 else if (req->ctype == NIX_AQ_CTYPE_RSS) 993 memcpy(mask, &req->rss_mask, 994 sizeof(struct nix_rsse_s)); 995 else if (req->ctype == NIX_AQ_CTYPE_MCE) 996 memcpy(mask, &req->mce_mask, 997 sizeof(struct nix_rx_mce_s)); 998 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 999 memcpy(mask, &req->prof_mask, 1000 sizeof(struct nix_bandprof_s)); 1001 fallthrough; 1002 case NIX_AQ_INSTOP_INIT: 1003 if (req->ctype == NIX_AQ_CTYPE_RQ) 1004 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 1005 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1006 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 1007 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1008 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 1009 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1010 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 1011 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1012 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 1013 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1014 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); 1015 break; 1016 case NIX_AQ_INSTOP_NOP: 1017 case NIX_AQ_INSTOP_READ: 1018 case NIX_AQ_INSTOP_LOCK: 1019 case NIX_AQ_INSTOP_UNLOCK: 1020 break; 1021 default: 1022 rc = NIX_AF_ERR_AQ_ENQUEUE; 1023 spin_unlock(&aq->lock); 1024 return rc; 1025 } 1026 1027 /* Submit the instruction to AQ */ 1028 rc = nix_aq_enqueue_wait(rvu, block, &inst); 1029 if (rc) { 1030 spin_unlock(&aq->lock); 1031 return rc; 1032 } 1033 1034 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 1035 if (req->op == NIX_AQ_INSTOP_INIT) { 1036 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 1037 __set_bit(req->qidx, pfvf->rq_bmap); 1038 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 1039 __set_bit(req->qidx, pfvf->sq_bmap); 1040 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 1041 __set_bit(req->qidx, pfvf->cq_bmap); 1042 } 1043 1044 if (req->op == NIX_AQ_INSTOP_WRITE) { 1045 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1046 ena = (req->rq.ena & req->rq_mask.ena) | 1047 (test_bit(req->qidx, pfvf->rq_bmap) & 1048 ~req->rq_mask.ena); 1049 if (ena) 1050 __set_bit(req->qidx, pfvf->rq_bmap); 1051 else 1052 __clear_bit(req->qidx, pfvf->rq_bmap); 1053 } 1054 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1055 ena = (req->rq.ena & req->sq_mask.ena) | 1056 (test_bit(req->qidx, pfvf->sq_bmap) & 1057 ~req->sq_mask.ena); 1058 if (ena) 1059 __set_bit(req->qidx, pfvf->sq_bmap); 1060 else 1061 __clear_bit(req->qidx, pfvf->sq_bmap); 1062 } 1063 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1064 ena = (req->rq.ena & req->cq_mask.ena) | 1065 (test_bit(req->qidx, pfvf->cq_bmap) & 1066 ~req->cq_mask.ena); 1067 if (ena) 1068 __set_bit(req->qidx, pfvf->cq_bmap); 1069 else 1070 __clear_bit(req->qidx, pfvf->cq_bmap); 1071 } 1072 } 1073 1074 if (rsp) { 1075 /* Copy read context into mailbox */ 1076 if (req->op == NIX_AQ_INSTOP_READ) { 1077 if (req->ctype == NIX_AQ_CTYPE_RQ) 1078 memcpy(&rsp->rq, ctx, 1079 sizeof(struct nix_rq_ctx_s)); 1080 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1081 memcpy(&rsp->sq, ctx, 1082 sizeof(struct nix_sq_ctx_s)); 1083 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1084 memcpy(&rsp->cq, ctx, 1085 sizeof(struct nix_cq_ctx_s)); 1086 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1087 memcpy(&rsp->rss, ctx, 1088 sizeof(struct nix_rsse_s)); 1089 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1090 memcpy(&rsp->mce, ctx, 1091 sizeof(struct nix_rx_mce_s)); 1092 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1093 memcpy(&rsp->prof, ctx, 1094 sizeof(struct nix_bandprof_s)); 1095 } 1096 } 1097 1098 spin_unlock(&aq->lock); 1099 return 0; 1100 } 1101 1102 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, 1103 struct nix_aq_enq_req *req, u8 ctype) 1104 { 1105 struct nix_cn10k_aq_enq_req aq_req; 1106 struct nix_cn10k_aq_enq_rsp aq_rsp; 1107 int rc, word; 1108 1109 if (req->ctype != NIX_AQ_CTYPE_CQ) 1110 return 0; 1111 1112 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 1113 req->hdr.pcifunc, ctype, req->qidx); 1114 if (rc) { 1115 dev_err(rvu->dev, 1116 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", 1117 __func__, nix_get_ctx_name(ctype), req->qidx, 1118 req->hdr.pcifunc); 1119 return rc; 1120 } 1121 1122 /* Make copy of original context & mask which are required 1123 * for resubmission 1124 */ 1125 memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); 1126 memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); 1127 1128 /* exclude fields which HW can update */ 1129 aq_req.cq_mask.cq_err = 0; 1130 aq_req.cq_mask.wrptr = 0; 1131 aq_req.cq_mask.tail = 0; 1132 aq_req.cq_mask.head = 0; 1133 aq_req.cq_mask.avg_level = 0; 1134 aq_req.cq_mask.update_time = 0; 1135 aq_req.cq_mask.substream = 0; 1136 1137 /* Context mask (cq_mask) holds mask value of fields which 1138 * are changed in AQ WRITE operation. 1139 * for example cq.drop = 0xa; 1140 * cq_mask.drop = 0xff; 1141 * Below logic performs '&' between cq and cq_mask so that non 1142 * updated fields are masked out for request and response 1143 * comparison 1144 */ 1145 for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); 1146 word++) { 1147 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= 1148 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1149 *(u64 *)((u8 *)&aq_req.cq + word * 8) &= 1150 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1151 } 1152 1153 if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) 1154 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; 1155 1156 return 0; 1157 } 1158 1159 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 1160 struct nix_aq_enq_rsp *rsp) 1161 { 1162 struct nix_hw *nix_hw; 1163 int err, retries = 5; 1164 int blkaddr; 1165 1166 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 1167 if (blkaddr < 0) 1168 return NIX_AF_ERR_AF_LF_INVALID; 1169 1170 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1171 if (!nix_hw) 1172 return NIX_AF_ERR_INVALID_NIXBLK; 1173 1174 retry: 1175 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 1176 1177 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' 1178 * As a work around perfrom CQ context read after each AQ write. If AQ 1179 * read shows AQ write is not updated perform AQ write again. 1180 */ 1181 if (!err && req->op == NIX_AQ_INSTOP_WRITE) { 1182 err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); 1183 if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { 1184 if (retries--) 1185 goto retry; 1186 else 1187 return NIX_AF_ERR_CQ_CTX_WRITE_ERR; 1188 } 1189 } 1190 1191 return err; 1192 } 1193 1194 static const char *nix_get_ctx_name(int ctype) 1195 { 1196 switch (ctype) { 1197 case NIX_AQ_CTYPE_CQ: 1198 return "CQ"; 1199 case NIX_AQ_CTYPE_SQ: 1200 return "SQ"; 1201 case NIX_AQ_CTYPE_RQ: 1202 return "RQ"; 1203 case NIX_AQ_CTYPE_RSS: 1204 return "RSS"; 1205 } 1206 return ""; 1207 } 1208 1209 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 1210 { 1211 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1212 struct nix_aq_enq_req aq_req; 1213 unsigned long *bmap; 1214 int qidx, q_cnt = 0; 1215 int err = 0, rc; 1216 1217 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 1218 return NIX_AF_ERR_AQ_ENQUEUE; 1219 1220 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 1221 aq_req.hdr.pcifunc = req->hdr.pcifunc; 1222 1223 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1224 aq_req.cq.ena = 0; 1225 aq_req.cq_mask.ena = 1; 1226 aq_req.cq.bp_ena = 0; 1227 aq_req.cq_mask.bp_ena = 1; 1228 q_cnt = pfvf->cq_ctx->qsize; 1229 bmap = pfvf->cq_bmap; 1230 } 1231 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1232 aq_req.sq.ena = 0; 1233 aq_req.sq_mask.ena = 1; 1234 q_cnt = pfvf->sq_ctx->qsize; 1235 bmap = pfvf->sq_bmap; 1236 } 1237 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1238 aq_req.rq.ena = 0; 1239 aq_req.rq_mask.ena = 1; 1240 q_cnt = pfvf->rq_ctx->qsize; 1241 bmap = pfvf->rq_bmap; 1242 } 1243 1244 aq_req.ctype = req->ctype; 1245 aq_req.op = NIX_AQ_INSTOP_WRITE; 1246 1247 for (qidx = 0; qidx < q_cnt; qidx++) { 1248 if (!test_bit(qidx, bmap)) 1249 continue; 1250 aq_req.qidx = qidx; 1251 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 1252 if (rc) { 1253 err = rc; 1254 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 1255 nix_get_ctx_name(req->ctype), qidx); 1256 } 1257 } 1258 1259 return err; 1260 } 1261 1262 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 1263 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 1264 { 1265 struct nix_aq_enq_req lock_ctx_req; 1266 int err; 1267 1268 if (req->op != NIX_AQ_INSTOP_INIT) 1269 return 0; 1270 1271 if (req->ctype == NIX_AQ_CTYPE_MCE || 1272 req->ctype == NIX_AQ_CTYPE_DYNO) 1273 return 0; 1274 1275 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 1276 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 1277 lock_ctx_req.ctype = req->ctype; 1278 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 1279 lock_ctx_req.qidx = req->qidx; 1280 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 1281 if (err) 1282 dev_err(rvu->dev, 1283 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 1284 req->hdr.pcifunc, 1285 nix_get_ctx_name(req->ctype), req->qidx); 1286 return err; 1287 } 1288 1289 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1290 struct nix_aq_enq_req *req, 1291 struct nix_aq_enq_rsp *rsp) 1292 { 1293 int err; 1294 1295 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 1296 if (!err) 1297 err = nix_lf_hwctx_lockdown(rvu, req); 1298 return err; 1299 } 1300 #else 1301 1302 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1303 struct nix_aq_enq_req *req, 1304 struct nix_aq_enq_rsp *rsp) 1305 { 1306 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1307 } 1308 #endif 1309 /* CN10K mbox handler */ 1310 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1311 struct nix_cn10k_aq_enq_req *req, 1312 struct nix_cn10k_aq_enq_rsp *rsp) 1313 { 1314 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1315 (struct nix_aq_enq_rsp *)rsp); 1316 } 1317 1318 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1319 struct hwctx_disable_req *req, 1320 struct msg_rsp *rsp) 1321 { 1322 return nix_lf_hwctx_disable(rvu, req); 1323 } 1324 1325 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1326 struct nix_lf_alloc_req *req, 1327 struct nix_lf_alloc_rsp *rsp) 1328 { 1329 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1330 struct rvu_hwinfo *hw = rvu->hw; 1331 u16 pcifunc = req->hdr.pcifunc; 1332 struct rvu_block *block; 1333 struct rvu_pfvf *pfvf; 1334 u64 cfg, ctx_cfg; 1335 int blkaddr; 1336 1337 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1338 return NIX_AF_ERR_PARAM; 1339 1340 if (req->way_mask) 1341 req->way_mask &= 0xFFFF; 1342 1343 pfvf = rvu_get_pfvf(rvu, pcifunc); 1344 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1345 if (!pfvf->nixlf || blkaddr < 0) 1346 return NIX_AF_ERR_AF_LF_INVALID; 1347 1348 block = &hw->block[blkaddr]; 1349 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1350 if (nixlf < 0) 1351 return NIX_AF_ERR_AF_LF_INVALID; 1352 1353 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1354 if (req->npa_func) { 1355 /* If default, use 'this' NIXLF's PFFUNC */ 1356 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1357 req->npa_func = pcifunc; 1358 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1359 return NIX_AF_INVAL_NPA_PF_FUNC; 1360 } 1361 1362 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1363 if (req->sso_func) { 1364 /* If default, use 'this' NIXLF's PFFUNC */ 1365 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1366 req->sso_func = pcifunc; 1367 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1368 return NIX_AF_INVAL_SSO_PF_FUNC; 1369 } 1370 1371 /* If RSS is being enabled, check if requested config is valid. 1372 * RSS table size should be power of two, otherwise 1373 * RSS_GRP::OFFSET + adder might go beyond that group or 1374 * won't be able to use entire table. 1375 */ 1376 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1377 !is_power_of_2(req->rss_sz))) 1378 return NIX_AF_ERR_RSS_SIZE_INVALID; 1379 1380 if (req->rss_sz && 1381 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1382 return NIX_AF_ERR_RSS_GRPS_INVALID; 1383 1384 /* Reset this NIX LF */ 1385 err = rvu_lf_reset(rvu, block, nixlf); 1386 if (err) { 1387 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1388 block->addr - BLKADDR_NIX0, nixlf); 1389 return NIX_AF_ERR_LF_RESET; 1390 } 1391 1392 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1393 1394 /* Alloc NIX RQ HW context memory and config the base */ 1395 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1396 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1397 if (err) 1398 goto free_mem; 1399 1400 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1401 if (!pfvf->rq_bmap) 1402 goto free_mem; 1403 1404 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1405 (u64)pfvf->rq_ctx->iova); 1406 1407 /* Set caching and queue count in HW */ 1408 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1409 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1410 1411 /* Alloc NIX SQ HW context memory and config the base */ 1412 hwctx_size = 1UL << (ctx_cfg & 0xF); 1413 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1414 if (err) 1415 goto free_mem; 1416 1417 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1418 if (!pfvf->sq_bmap) 1419 goto free_mem; 1420 1421 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1422 (u64)pfvf->sq_ctx->iova); 1423 1424 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1425 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1426 1427 /* Alloc NIX CQ HW context memory and config the base */ 1428 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1429 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1430 if (err) 1431 goto free_mem; 1432 1433 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1434 if (!pfvf->cq_bmap) 1435 goto free_mem; 1436 1437 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1438 (u64)pfvf->cq_ctx->iova); 1439 1440 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1441 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1442 1443 /* Initialize receive side scaling (RSS) */ 1444 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1445 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1446 req->rss_grps, hwctx_size, req->way_mask, 1447 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); 1448 if (err) 1449 goto free_mem; 1450 1451 /* Alloc memory for CQINT's HW contexts */ 1452 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1453 qints = (cfg >> 24) & 0xFFF; 1454 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1455 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1456 if (err) 1457 goto free_mem; 1458 1459 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1460 (u64)pfvf->cq_ints_ctx->iova); 1461 1462 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1463 BIT_ULL(36) | req->way_mask << 20); 1464 1465 /* Alloc memory for QINT's HW contexts */ 1466 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1467 qints = (cfg >> 12) & 0xFFF; 1468 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1469 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1470 if (err) 1471 goto free_mem; 1472 1473 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1474 (u64)pfvf->nix_qints_ctx->iova); 1475 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1476 BIT_ULL(36) | req->way_mask << 20); 1477 1478 /* Setup VLANX TPID's. 1479 * Use VLAN1 for 802.1Q 1480 * and VLAN0 for 802.1AD. 1481 */ 1482 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1483 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1484 1485 /* Enable LMTST for this NIX LF */ 1486 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1487 1488 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1489 if (req->npa_func) 1490 cfg = req->npa_func; 1491 if (req->sso_func) 1492 cfg |= (u64)req->sso_func << 16; 1493 1494 cfg |= (u64)req->xqe_sz << 33; 1495 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1496 1497 /* Config Rx pkt length, csum checks and apad enable / disable */ 1498 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1499 1500 /* Configure pkind for TX parse config */ 1501 cfg = NPC_TX_DEF_PKIND; 1502 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1503 1504 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1505 if (is_sdp_pfvf(pcifunc)) 1506 intf = NIX_INTF_TYPE_SDP; 1507 1508 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, 1509 !!(req->flags & NIX_LF_LBK_BLK_SEL)); 1510 if (err) 1511 goto free_mem; 1512 1513 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1514 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1515 1516 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1517 rvu_write64(rvu, blkaddr, 1518 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1519 VTAGSIZE_T4 | VTAG_STRIP); 1520 1521 goto exit; 1522 1523 free_mem: 1524 nix_ctx_free(rvu, pfvf); 1525 rc = -ENOMEM; 1526 1527 exit: 1528 /* Set macaddr of this PF/VF */ 1529 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1530 1531 /* set SQB size info */ 1532 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1533 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1534 rsp->rx_chan_base = pfvf->rx_chan_base; 1535 rsp->tx_chan_base = pfvf->tx_chan_base; 1536 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1537 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1538 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1539 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1540 /* Get HW supported stat count */ 1541 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1542 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1543 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1544 /* Get count of CQ IRQs and error IRQs supported per LF */ 1545 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1546 rsp->qints = ((cfg >> 12) & 0xFFF); 1547 rsp->cints = ((cfg >> 24) & 0xFFF); 1548 rsp->cgx_links = hw->cgx_links; 1549 rsp->lbk_links = hw->lbk_links; 1550 rsp->sdp_links = hw->sdp_links; 1551 1552 return rc; 1553 } 1554 1555 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1556 struct msg_rsp *rsp) 1557 { 1558 struct rvu_hwinfo *hw = rvu->hw; 1559 u16 pcifunc = req->hdr.pcifunc; 1560 struct rvu_block *block; 1561 int blkaddr, nixlf, err; 1562 struct rvu_pfvf *pfvf; 1563 1564 pfvf = rvu_get_pfvf(rvu, pcifunc); 1565 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1566 if (!pfvf->nixlf || blkaddr < 0) 1567 return NIX_AF_ERR_AF_LF_INVALID; 1568 1569 block = &hw->block[blkaddr]; 1570 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1571 if (nixlf < 0) 1572 return NIX_AF_ERR_AF_LF_INVALID; 1573 1574 if (req->flags & NIX_LF_DISABLE_FLOWS) 1575 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1576 else 1577 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1578 1579 /* Free any tx vtag def entries used by this NIX LF */ 1580 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1581 nix_free_tx_vtag_entries(rvu, pcifunc); 1582 1583 nix_interface_deinit(rvu, pcifunc, nixlf); 1584 1585 /* Reset this NIX LF */ 1586 err = rvu_lf_reset(rvu, block, nixlf); 1587 if (err) { 1588 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1589 block->addr - BLKADDR_NIX0, nixlf); 1590 return NIX_AF_ERR_LF_RESET; 1591 } 1592 1593 nix_ctx_free(rvu, pfvf); 1594 1595 return 0; 1596 } 1597 1598 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1599 struct nix_mark_format_cfg *req, 1600 struct nix_mark_format_cfg_rsp *rsp) 1601 { 1602 u16 pcifunc = req->hdr.pcifunc; 1603 struct nix_hw *nix_hw; 1604 struct rvu_pfvf *pfvf; 1605 int blkaddr, rc; 1606 u32 cfg; 1607 1608 pfvf = rvu_get_pfvf(rvu, pcifunc); 1609 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1610 if (!pfvf->nixlf || blkaddr < 0) 1611 return NIX_AF_ERR_AF_LF_INVALID; 1612 1613 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1614 if (!nix_hw) 1615 return NIX_AF_ERR_INVALID_NIXBLK; 1616 1617 cfg = (((u32)req->offset & 0x7) << 16) | 1618 (((u32)req->y_mask & 0xF) << 12) | 1619 (((u32)req->y_val & 0xF) << 8) | 1620 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1621 1622 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1623 if (rc < 0) { 1624 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1625 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1626 return NIX_AF_ERR_MARK_CFG_FAIL; 1627 } 1628 1629 rsp->mark_format_idx = rc; 1630 return 0; 1631 } 1632 1633 /* Handle shaper update specially for few revisions */ 1634 static bool 1635 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, 1636 int lvl, u64 reg, u64 regval) 1637 { 1638 u64 regbase, oldval, sw_xoff = 0; 1639 u64 dbgval, md_debug0 = 0; 1640 unsigned long poll_tmo; 1641 bool rate_reg = 0; 1642 u32 schq; 1643 1644 regbase = reg & 0xFFFF; 1645 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1646 1647 /* Check for rate register */ 1648 switch (lvl) { 1649 case NIX_TXSCH_LVL_TL1: 1650 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); 1651 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); 1652 1653 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); 1654 break; 1655 case NIX_TXSCH_LVL_TL2: 1656 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); 1657 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); 1658 1659 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || 1660 regbase == NIX_AF_TL2X_PIR(0)); 1661 break; 1662 case NIX_TXSCH_LVL_TL3: 1663 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); 1664 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); 1665 1666 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || 1667 regbase == NIX_AF_TL3X_PIR(0)); 1668 break; 1669 case NIX_TXSCH_LVL_TL4: 1670 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); 1671 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); 1672 1673 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || 1674 regbase == NIX_AF_TL4X_PIR(0)); 1675 break; 1676 case NIX_TXSCH_LVL_MDQ: 1677 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); 1678 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || 1679 regbase == NIX_AF_MDQX_PIR(0)); 1680 break; 1681 } 1682 1683 if (!rate_reg) 1684 return false; 1685 1686 /* Nothing special to do when state is not toggled */ 1687 oldval = rvu_read64(rvu, blkaddr, reg); 1688 if ((oldval & 0x1) == (regval & 0x1)) { 1689 rvu_write64(rvu, blkaddr, reg, regval); 1690 return true; 1691 } 1692 1693 /* PIR/CIR disable */ 1694 if (!(regval & 0x1)) { 1695 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1696 rvu_write64(rvu, blkaddr, reg, 0); 1697 udelay(4); 1698 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1699 return true; 1700 } 1701 1702 /* PIR/CIR enable */ 1703 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1704 if (md_debug0) { 1705 poll_tmo = jiffies + usecs_to_jiffies(10000); 1706 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ 1707 do { 1708 if (time_after(jiffies, poll_tmo)) { 1709 dev_err(rvu->dev, 1710 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", 1711 nixlf, schq, lvl); 1712 goto exit; 1713 } 1714 usleep_range(1, 5); 1715 dbgval = rvu_read64(rvu, blkaddr, md_debug0); 1716 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); 1717 } 1718 rvu_write64(rvu, blkaddr, reg, regval); 1719 exit: 1720 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1721 return true; 1722 } 1723 1724 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr, 1725 int lvl, int schq) 1726 { 1727 u64 tlx_parent = 0, tlx_schedule = 0; 1728 1729 switch (lvl) { 1730 case NIX_TXSCH_LVL_TL2: 1731 tlx_parent = NIX_AF_TL2X_PARENT(schq); 1732 tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq); 1733 break; 1734 case NIX_TXSCH_LVL_TL3: 1735 tlx_parent = NIX_AF_TL3X_PARENT(schq); 1736 tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq); 1737 break; 1738 case NIX_TXSCH_LVL_TL4: 1739 tlx_parent = NIX_AF_TL4X_PARENT(schq); 1740 tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq); 1741 break; 1742 case NIX_TXSCH_LVL_MDQ: 1743 /* no need to reset SMQ_CFG as HW clears this CSR 1744 * on SMQ flush 1745 */ 1746 tlx_parent = NIX_AF_MDQX_PARENT(schq); 1747 tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq); 1748 break; 1749 default: 1750 return; 1751 } 1752 1753 if (tlx_parent) 1754 rvu_write64(rvu, blkaddr, tlx_parent, 0x0); 1755 1756 if (tlx_schedule) 1757 rvu_write64(rvu, blkaddr, tlx_schedule, 0x0); 1758 } 1759 1760 /* Disable shaping of pkts by a scheduler queue 1761 * at a given scheduler level. 1762 */ 1763 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1764 int nixlf, int lvl, int schq) 1765 { 1766 struct rvu_hwinfo *hw = rvu->hw; 1767 u64 cir_reg = 0, pir_reg = 0; 1768 u64 cfg; 1769 1770 switch (lvl) { 1771 case NIX_TXSCH_LVL_TL1: 1772 cir_reg = NIX_AF_TL1X_CIR(schq); 1773 pir_reg = 0; /* PIR not available at TL1 */ 1774 break; 1775 case NIX_TXSCH_LVL_TL2: 1776 cir_reg = NIX_AF_TL2X_CIR(schq); 1777 pir_reg = NIX_AF_TL2X_PIR(schq); 1778 break; 1779 case NIX_TXSCH_LVL_TL3: 1780 cir_reg = NIX_AF_TL3X_CIR(schq); 1781 pir_reg = NIX_AF_TL3X_PIR(schq); 1782 break; 1783 case NIX_TXSCH_LVL_TL4: 1784 cir_reg = NIX_AF_TL4X_CIR(schq); 1785 pir_reg = NIX_AF_TL4X_PIR(schq); 1786 break; 1787 case NIX_TXSCH_LVL_MDQ: 1788 cir_reg = NIX_AF_MDQX_CIR(schq); 1789 pir_reg = NIX_AF_MDQX_PIR(schq); 1790 break; 1791 } 1792 1793 /* Shaper state toggle needs wait/poll */ 1794 if (hw->cap.nix_shaper_toggle_wait) { 1795 if (cir_reg) 1796 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1797 lvl, cir_reg, 0); 1798 if (pir_reg) 1799 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1800 lvl, pir_reg, 0); 1801 return; 1802 } 1803 1804 if (!cir_reg) 1805 return; 1806 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1807 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1808 1809 if (!pir_reg) 1810 return; 1811 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1812 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1813 } 1814 1815 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1816 int lvl, int schq) 1817 { 1818 struct rvu_hwinfo *hw = rvu->hw; 1819 int link_level; 1820 int link; 1821 1822 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1823 return; 1824 1825 /* Reset TL4's SDP link config */ 1826 if (lvl == NIX_TXSCH_LVL_TL4) 1827 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1828 1829 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1830 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1831 if (lvl != link_level) 1832 return; 1833 1834 /* Reset TL2's CGX or LBK link config */ 1835 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1836 rvu_write64(rvu, blkaddr, 1837 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1838 } 1839 1840 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, 1841 int lvl, int schq) 1842 { 1843 struct rvu_hwinfo *hw = rvu->hw; 1844 u64 reg; 1845 1846 /* Skip this if shaping is not supported */ 1847 if (!hw->cap.nix_shaping) 1848 return; 1849 1850 /* Clear level specific SW_XOFF */ 1851 switch (lvl) { 1852 case NIX_TXSCH_LVL_TL1: 1853 reg = NIX_AF_TL1X_SW_XOFF(schq); 1854 break; 1855 case NIX_TXSCH_LVL_TL2: 1856 reg = NIX_AF_TL2X_SW_XOFF(schq); 1857 break; 1858 case NIX_TXSCH_LVL_TL3: 1859 reg = NIX_AF_TL3X_SW_XOFF(schq); 1860 break; 1861 case NIX_TXSCH_LVL_TL4: 1862 reg = NIX_AF_TL4X_SW_XOFF(schq); 1863 break; 1864 case NIX_TXSCH_LVL_MDQ: 1865 reg = NIX_AF_MDQX_SW_XOFF(schq); 1866 break; 1867 default: 1868 return; 1869 } 1870 1871 rvu_write64(rvu, blkaddr, reg, 0x0); 1872 } 1873 1874 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1875 { 1876 struct rvu_hwinfo *hw = rvu->hw; 1877 int pf = rvu_get_pf(pcifunc); 1878 u8 cgx_id = 0, lmac_id = 0; 1879 1880 if (is_afvf(pcifunc)) {/* LBK links */ 1881 return hw->cgx_links; 1882 } else if (is_pf_cgxmapped(rvu, pf)) { 1883 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1884 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1885 } 1886 1887 /* SDP link */ 1888 return hw->cgx_links + hw->lbk_links; 1889 } 1890 1891 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 1892 int link, int *start, int *end) 1893 { 1894 struct rvu_hwinfo *hw = rvu->hw; 1895 int pf = rvu_get_pf(pcifunc); 1896 1897 if (is_afvf(pcifunc)) { /* LBK links */ 1898 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1899 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 1900 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 1901 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1902 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 1903 } else { /* SDP link */ 1904 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 1905 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 1906 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 1907 } 1908 } 1909 1910 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 1911 struct nix_hw *nix_hw, 1912 struct nix_txsch_alloc_req *req) 1913 { 1914 struct rvu_hwinfo *hw = rvu->hw; 1915 int schq, req_schq, free_cnt; 1916 struct nix_txsch *txsch; 1917 int link, start, end; 1918 1919 txsch = &nix_hw->txsch[lvl]; 1920 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 1921 1922 if (!req_schq) 1923 return 0; 1924 1925 link = nix_get_tx_link(rvu, pcifunc); 1926 1927 /* For traffic aggregating scheduler level, one queue is enough */ 1928 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1929 if (req_schq != 1) 1930 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1931 return 0; 1932 } 1933 1934 /* Get free SCHQ count and check if request can be accomodated */ 1935 if (hw->cap.nix_fixed_txschq_mapping) { 1936 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1937 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 1938 if (end <= txsch->schq.max && schq < end && 1939 !test_bit(schq, txsch->schq.bmap)) 1940 free_cnt = 1; 1941 else 1942 free_cnt = 0; 1943 } else { 1944 free_cnt = rvu_rsrc_free_count(&txsch->schq); 1945 } 1946 1947 if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || 1948 req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) 1949 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1950 1951 /* If contiguous queues are needed, check for availability */ 1952 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 1953 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 1954 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1955 1956 return 0; 1957 } 1958 1959 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 1960 struct nix_txsch_alloc_rsp *rsp, 1961 int lvl, int start, int end) 1962 { 1963 struct rvu_hwinfo *hw = rvu->hw; 1964 u16 pcifunc = rsp->hdr.pcifunc; 1965 int idx, schq; 1966 1967 /* For traffic aggregating levels, queue alloc is based 1968 * on transmit link to which PF_FUNC is mapped to. 1969 */ 1970 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1971 /* A single TL queue is allocated */ 1972 if (rsp->schq_contig[lvl]) { 1973 rsp->schq_contig[lvl] = 1; 1974 rsp->schq_contig_list[lvl][0] = start; 1975 } 1976 1977 /* Both contig and non-contig reqs doesn't make sense here */ 1978 if (rsp->schq_contig[lvl]) 1979 rsp->schq[lvl] = 0; 1980 1981 if (rsp->schq[lvl]) { 1982 rsp->schq[lvl] = 1; 1983 rsp->schq_list[lvl][0] = start; 1984 } 1985 return; 1986 } 1987 1988 /* Adjust the queue request count if HW supports 1989 * only one queue per level configuration. 1990 */ 1991 if (hw->cap.nix_fixed_txschq_mapping) { 1992 idx = pcifunc & RVU_PFVF_FUNC_MASK; 1993 schq = start + idx; 1994 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 1995 rsp->schq_contig[lvl] = 0; 1996 rsp->schq[lvl] = 0; 1997 return; 1998 } 1999 2000 if (rsp->schq_contig[lvl]) { 2001 rsp->schq_contig[lvl] = 1; 2002 set_bit(schq, txsch->schq.bmap); 2003 rsp->schq_contig_list[lvl][0] = schq; 2004 rsp->schq[lvl] = 0; 2005 } else if (rsp->schq[lvl]) { 2006 rsp->schq[lvl] = 1; 2007 set_bit(schq, txsch->schq.bmap); 2008 rsp->schq_list[lvl][0] = schq; 2009 } 2010 return; 2011 } 2012 2013 /* Allocate contiguous queue indices requesty first */ 2014 if (rsp->schq_contig[lvl]) { 2015 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 2016 txsch->schq.max, start, 2017 rsp->schq_contig[lvl], 0); 2018 if (schq >= end) 2019 rsp->schq_contig[lvl] = 0; 2020 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 2021 set_bit(schq, txsch->schq.bmap); 2022 rsp->schq_contig_list[lvl][idx] = schq; 2023 schq++; 2024 } 2025 } 2026 2027 /* Allocate non-contiguous queue indices */ 2028 if (rsp->schq[lvl]) { 2029 idx = 0; 2030 for (schq = start; schq < end; schq++) { 2031 if (!test_bit(schq, txsch->schq.bmap)) { 2032 set_bit(schq, txsch->schq.bmap); 2033 rsp->schq_list[lvl][idx++] = schq; 2034 } 2035 if (idx == rsp->schq[lvl]) 2036 break; 2037 } 2038 /* Update how many were allocated */ 2039 rsp->schq[lvl] = idx; 2040 } 2041 } 2042 2043 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 2044 struct nix_txsch_alloc_req *req, 2045 struct nix_txsch_alloc_rsp *rsp) 2046 { 2047 struct rvu_hwinfo *hw = rvu->hw; 2048 u16 pcifunc = req->hdr.pcifunc; 2049 int link, blkaddr, rc = 0; 2050 int lvl, idx, start, end; 2051 struct nix_txsch *txsch; 2052 struct nix_hw *nix_hw; 2053 u32 *pfvf_map; 2054 int nixlf; 2055 u16 schq; 2056 2057 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2058 if (rc) 2059 return rc; 2060 2061 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2062 if (!nix_hw) 2063 return NIX_AF_ERR_INVALID_NIXBLK; 2064 2065 mutex_lock(&rvu->rsrc_lock); 2066 2067 /* Check if request is valid as per HW capabilities 2068 * and can be accomodated. 2069 */ 2070 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2071 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 2072 if (rc) 2073 goto err; 2074 } 2075 2076 /* Allocate requested Tx scheduler queues */ 2077 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2078 txsch = &nix_hw->txsch[lvl]; 2079 pfvf_map = txsch->pfvf_map; 2080 2081 if (!req->schq[lvl] && !req->schq_contig[lvl]) 2082 continue; 2083 2084 rsp->schq[lvl] = req->schq[lvl]; 2085 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 2086 2087 link = nix_get_tx_link(rvu, pcifunc); 2088 2089 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2090 start = link; 2091 end = link; 2092 } else if (hw->cap.nix_fixed_txschq_mapping) { 2093 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 2094 } else { 2095 start = 0; 2096 end = txsch->schq.max; 2097 } 2098 2099 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 2100 2101 /* Reset queue config */ 2102 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 2103 schq = rsp->schq_contig_list[lvl][idx]; 2104 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2105 NIX_TXSCHQ_CFG_DONE)) 2106 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2107 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2108 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2109 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2110 } 2111 2112 for (idx = 0; idx < req->schq[lvl]; idx++) { 2113 schq = rsp->schq_list[lvl][idx]; 2114 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2115 NIX_TXSCHQ_CFG_DONE)) 2116 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2117 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2118 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2119 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2120 } 2121 } 2122 2123 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 2124 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 2125 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 2126 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 2127 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 2128 goto exit; 2129 err: 2130 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 2131 exit: 2132 mutex_unlock(&rvu->rsrc_lock); 2133 return rc; 2134 } 2135 2136 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, 2137 struct nix_smq_flush_ctx *smq_flush_ctx) 2138 { 2139 struct nix_smq_tree_ctx *smq_tree_ctx; 2140 u64 parent_off, regval; 2141 u16 schq; 2142 int lvl; 2143 2144 smq_flush_ctx->smq = smq; 2145 2146 schq = smq; 2147 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2148 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2149 if (lvl == NIX_TXSCH_LVL_TL1) { 2150 smq_flush_ctx->tl1_schq = schq; 2151 smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); 2152 smq_tree_ctx->pir_off = 0; 2153 smq_tree_ctx->pir_val = 0; 2154 parent_off = 0; 2155 } else if (lvl == NIX_TXSCH_LVL_TL2) { 2156 smq_flush_ctx->tl2_schq = schq; 2157 smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); 2158 smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); 2159 parent_off = NIX_AF_TL2X_PARENT(schq); 2160 } else if (lvl == NIX_TXSCH_LVL_TL3) { 2161 smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq); 2162 smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq); 2163 parent_off = NIX_AF_TL3X_PARENT(schq); 2164 } else if (lvl == NIX_TXSCH_LVL_TL4) { 2165 smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq); 2166 smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq); 2167 parent_off = NIX_AF_TL4X_PARENT(schq); 2168 } else if (lvl == NIX_TXSCH_LVL_MDQ) { 2169 smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq); 2170 smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq); 2171 parent_off = NIX_AF_MDQX_PARENT(schq); 2172 } 2173 /* save cir/pir register values */ 2174 smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off); 2175 if (smq_tree_ctx->pir_off) 2176 smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off); 2177 2178 /* get parent txsch node */ 2179 if (parent_off) { 2180 regval = rvu_read64(rvu, blkaddr, parent_off); 2181 schq = (regval >> 16) & 0x1FF; 2182 } 2183 } 2184 } 2185 2186 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, 2187 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2188 { 2189 struct nix_txsch *txsch; 2190 struct nix_hw *nix_hw; 2191 u64 regoff; 2192 int tl2; 2193 2194 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2195 if (!nix_hw) 2196 return; 2197 2198 /* loop through all TL2s with matching PF_FUNC */ 2199 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; 2200 for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { 2201 /* skip the smq(flush) TL2 */ 2202 if (tl2 == smq_flush_ctx->tl2_schq) 2203 continue; 2204 /* skip unused TL2s */ 2205 if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) 2206 continue; 2207 /* skip if PF_FUNC doesn't match */ 2208 if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != 2209 (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] & 2210 ~RVU_PFVF_FUNC_MASK))) 2211 continue; 2212 /* enable/disable XOFF */ 2213 regoff = NIX_AF_TL2X_SW_XOFF(tl2); 2214 if (enable) 2215 rvu_write64(rvu, blkaddr, regoff, 0x1); 2216 else 2217 rvu_write64(rvu, blkaddr, regoff, 0x0); 2218 } 2219 } 2220 2221 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr, 2222 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2223 { 2224 u64 cir_off, pir_off, cir_val, pir_val; 2225 struct nix_smq_tree_ctx *smq_tree_ctx; 2226 int lvl; 2227 2228 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2229 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2230 cir_off = smq_tree_ctx->cir_off; 2231 cir_val = smq_tree_ctx->cir_val; 2232 pir_off = smq_tree_ctx->pir_off; 2233 pir_val = smq_tree_ctx->pir_val; 2234 2235 if (enable) { 2236 rvu_write64(rvu, blkaddr, cir_off, cir_val); 2237 if (lvl != NIX_TXSCH_LVL_TL1) 2238 rvu_write64(rvu, blkaddr, pir_off, pir_val); 2239 } else { 2240 rvu_write64(rvu, blkaddr, cir_off, 0x0); 2241 if (lvl != NIX_TXSCH_LVL_TL1) 2242 rvu_write64(rvu, blkaddr, pir_off, 0x0); 2243 } 2244 } 2245 } 2246 2247 static int nix_smq_flush(struct rvu *rvu, int blkaddr, 2248 int smq, u16 pcifunc, int nixlf) 2249 { 2250 struct nix_smq_flush_ctx *smq_flush_ctx; 2251 int pf = rvu_get_pf(pcifunc); 2252 u8 cgx_id = 0, lmac_id = 0; 2253 int err, restore_tx_en = 0; 2254 u64 cfg; 2255 2256 if (!is_rvu_otx2(rvu)) { 2257 /* Skip SMQ flush if pkt count is zero */ 2258 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq)); 2259 if (!cfg) 2260 return 0; 2261 } 2262 2263 /* enable cgx tx if disabled */ 2264 if (is_pf_cgxmapped(rvu, pf)) { 2265 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 2266 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), 2267 lmac_id, true); 2268 } 2269 2270 /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */ 2271 smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL); 2272 if (!smq_flush_ctx) 2273 return -ENOMEM; 2274 nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx); 2275 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); 2276 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); 2277 2278 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 2279 /* Do SMQ flush and set enqueue xoff */ 2280 cfg |= BIT_ULL(50) | BIT_ULL(49); 2281 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 2282 2283 /* Disable backpressure from physical link, 2284 * otherwise SMQ flush may stall. 2285 */ 2286 rvu_cgx_enadis_rx_bp(rvu, pf, false); 2287 2288 /* Wait for flush to complete */ 2289 err = rvu_poll_reg(rvu, blkaddr, 2290 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 2291 if (err) 2292 dev_info(rvu->dev, 2293 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n", 2294 nixlf, smq); 2295 2296 /* clear XOFF on TL2s */ 2297 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); 2298 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); 2299 kfree(smq_flush_ctx); 2300 2301 rvu_cgx_enadis_rx_bp(rvu, pf, true); 2302 /* restore cgx tx state */ 2303 if (restore_tx_en) 2304 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 2305 return err; 2306 } 2307 2308 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 2309 { 2310 int blkaddr, nixlf, lvl, schq, err; 2311 struct rvu_hwinfo *hw = rvu->hw; 2312 struct nix_txsch *txsch; 2313 struct nix_hw *nix_hw; 2314 u16 map_func; 2315 2316 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2317 if (blkaddr < 0) 2318 return NIX_AF_ERR_AF_LF_INVALID; 2319 2320 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2321 if (!nix_hw) 2322 return NIX_AF_ERR_INVALID_NIXBLK; 2323 2324 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2325 if (nixlf < 0) 2326 return NIX_AF_ERR_AF_LF_INVALID; 2327 2328 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ 2329 mutex_lock(&rvu->rsrc_lock); 2330 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2331 txsch = &nix_hw->txsch[lvl]; 2332 2333 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2334 continue; 2335 2336 for (schq = 0; schq < txsch->schq.max; schq++) { 2337 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2338 continue; 2339 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2340 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2341 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2342 } 2343 } 2344 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, 2345 nix_get_tx_link(rvu, pcifunc)); 2346 2347 /* On PF cleanup, clear cfg done flag as 2348 * PF would have changed default config. 2349 */ 2350 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 2351 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; 2352 schq = nix_get_tx_link(rvu, pcifunc); 2353 /* Do not clear pcifunc in txsch->pfvf_map[schq] because 2354 * VF might be using this TL1 queue 2355 */ 2356 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 2357 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); 2358 } 2359 2360 /* Flush SMQs */ 2361 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 2362 for (schq = 0; schq < txsch->schq.max; schq++) { 2363 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2364 continue; 2365 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2366 } 2367 2368 /* Now free scheduler queues to free pool */ 2369 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2370 /* TLs above aggregation level are shared across all PF 2371 * and it's VFs, hence skip freeing them. 2372 */ 2373 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2374 continue; 2375 2376 txsch = &nix_hw->txsch[lvl]; 2377 for (schq = 0; schq < txsch->schq.max; schq++) { 2378 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2379 continue; 2380 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2381 rvu_free_rsrc(&txsch->schq, schq); 2382 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2383 } 2384 } 2385 mutex_unlock(&rvu->rsrc_lock); 2386 2387 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ 2388 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); 2389 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); 2390 if (err) 2391 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 2392 2393 return 0; 2394 } 2395 2396 static int nix_txschq_free_one(struct rvu *rvu, 2397 struct nix_txsch_free_req *req) 2398 { 2399 struct rvu_hwinfo *hw = rvu->hw; 2400 u16 pcifunc = req->hdr.pcifunc; 2401 int lvl, schq, nixlf, blkaddr; 2402 struct nix_txsch *txsch; 2403 struct nix_hw *nix_hw; 2404 u32 *pfvf_map; 2405 int rc; 2406 2407 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2408 if (blkaddr < 0) 2409 return NIX_AF_ERR_AF_LF_INVALID; 2410 2411 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2412 if (!nix_hw) 2413 return NIX_AF_ERR_INVALID_NIXBLK; 2414 2415 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2416 if (nixlf < 0) 2417 return NIX_AF_ERR_AF_LF_INVALID; 2418 2419 lvl = req->schq_lvl; 2420 schq = req->schq; 2421 txsch = &nix_hw->txsch[lvl]; 2422 2423 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 2424 return 0; 2425 2426 pfvf_map = txsch->pfvf_map; 2427 mutex_lock(&rvu->rsrc_lock); 2428 2429 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 2430 rc = NIX_AF_ERR_TLX_INVALID; 2431 goto err; 2432 } 2433 2434 /* Clear SW_XOFF of this resource only. 2435 * For SMQ level, all path XOFF's 2436 * need to be made clear by user 2437 */ 2438 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2439 2440 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2441 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2442 2443 /* Flush if it is a SMQ. Onus of disabling 2444 * TL2/3 queue links before SMQ flush is on user 2445 */ 2446 if (lvl == NIX_TXSCH_LVL_SMQ && 2447 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { 2448 rc = NIX_AF_SMQ_FLUSH_FAILED; 2449 goto err; 2450 } 2451 2452 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2453 2454 /* Free the resource */ 2455 rvu_free_rsrc(&txsch->schq, schq); 2456 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2457 mutex_unlock(&rvu->rsrc_lock); 2458 return 0; 2459 err: 2460 mutex_unlock(&rvu->rsrc_lock); 2461 return rc; 2462 } 2463 2464 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 2465 struct nix_txsch_free_req *req, 2466 struct msg_rsp *rsp) 2467 { 2468 if (req->flags & TXSCHQ_FREE_ALL) 2469 return nix_txschq_free(rvu, req->hdr.pcifunc); 2470 else 2471 return nix_txschq_free_one(rvu, req); 2472 } 2473 2474 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 2475 int lvl, u64 reg, u64 regval) 2476 { 2477 u64 regbase = reg & 0xFFFF; 2478 u16 schq, parent; 2479 2480 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 2481 return false; 2482 2483 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2484 /* Check if this schq belongs to this PF/VF or not */ 2485 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 2486 return false; 2487 2488 parent = (regval >> 16) & 0x1FF; 2489 /* Validate MDQ's TL4 parent */ 2490 if (regbase == NIX_AF_MDQX_PARENT(0) && 2491 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 2492 return false; 2493 2494 /* Validate TL4's TL3 parent */ 2495 if (regbase == NIX_AF_TL4X_PARENT(0) && 2496 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 2497 return false; 2498 2499 /* Validate TL3's TL2 parent */ 2500 if (regbase == NIX_AF_TL3X_PARENT(0) && 2501 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 2502 return false; 2503 2504 /* Validate TL2's TL1 parent */ 2505 if (regbase == NIX_AF_TL2X_PARENT(0) && 2506 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 2507 return false; 2508 2509 return true; 2510 } 2511 2512 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 2513 { 2514 u64 regbase; 2515 2516 if (hw->cap.nix_shaping) 2517 return true; 2518 2519 /* If shaping and coloring is not supported, then 2520 * *_CIR and *_PIR registers should not be configured. 2521 */ 2522 regbase = reg & 0xFFFF; 2523 2524 switch (lvl) { 2525 case NIX_TXSCH_LVL_TL1: 2526 if (regbase == NIX_AF_TL1X_CIR(0)) 2527 return false; 2528 break; 2529 case NIX_TXSCH_LVL_TL2: 2530 if (regbase == NIX_AF_TL2X_CIR(0) || 2531 regbase == NIX_AF_TL2X_PIR(0)) 2532 return false; 2533 break; 2534 case NIX_TXSCH_LVL_TL3: 2535 if (regbase == NIX_AF_TL3X_CIR(0) || 2536 regbase == NIX_AF_TL3X_PIR(0)) 2537 return false; 2538 break; 2539 case NIX_TXSCH_LVL_TL4: 2540 if (regbase == NIX_AF_TL4X_CIR(0) || 2541 regbase == NIX_AF_TL4X_PIR(0)) 2542 return false; 2543 break; 2544 case NIX_TXSCH_LVL_MDQ: 2545 if (regbase == NIX_AF_MDQX_CIR(0) || 2546 regbase == NIX_AF_MDQX_PIR(0)) 2547 return false; 2548 break; 2549 } 2550 return true; 2551 } 2552 2553 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 2554 u16 pcifunc, int blkaddr) 2555 { 2556 u32 *pfvf_map; 2557 int schq; 2558 2559 schq = nix_get_tx_link(rvu, pcifunc); 2560 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 2561 /* Skip if PF has already done the config */ 2562 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 2563 return; 2564 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 2565 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 2566 2567 /* On OcteonTx2 the config was in bytes and newer silcons 2568 * it's changed to weight. 2569 */ 2570 if (!rvu->hw->cap.nix_common_dwrr_mtu) 2571 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2572 TXSCH_TL1_DFLT_RR_QTM); 2573 else 2574 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2575 CN10K_MAX_DWRR_WEIGHT); 2576 2577 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 2578 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 2579 } 2580 2581 /* Register offset - [15:0] 2582 * Scheduler Queue number - [25:16] 2583 */ 2584 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) 2585 2586 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, 2587 int blkaddr, struct nix_txschq_config *req, 2588 struct nix_txschq_config *rsp) 2589 { 2590 u16 pcifunc = req->hdr.pcifunc; 2591 int idx, schq; 2592 u64 reg; 2593 2594 for (idx = 0; idx < req->num_regs; idx++) { 2595 reg = req->reg[idx]; 2596 reg &= NIX_TX_SCHQ_MASK; 2597 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2598 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || 2599 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) 2600 return NIX_AF_INVAL_TXSCHQ_CFG; 2601 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); 2602 } 2603 rsp->lvl = req->lvl; 2604 rsp->num_regs = req->num_regs; 2605 return 0; 2606 } 2607 2608 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, 2609 struct nix_txsch *txsch, bool enable) 2610 { 2611 struct rvu_hwinfo *hw = rvu->hw; 2612 int lbk_link_start, lbk_links; 2613 u8 pf = rvu_get_pf(pcifunc); 2614 int schq; 2615 u64 cfg; 2616 2617 if (!is_pf_cgxmapped(rvu, pf)) 2618 return; 2619 2620 cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; 2621 lbk_link_start = hw->cgx_links; 2622 2623 for (schq = 0; schq < txsch->schq.max; schq++) { 2624 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2625 continue; 2626 /* Enable all LBK links with channel 63 by default so that 2627 * packets can be sent to LBK with a NPC TX MCAM rule 2628 */ 2629 lbk_links = hw->lbk_links; 2630 while (lbk_links--) 2631 rvu_write64(rvu, blkaddr, 2632 NIX_AF_TL3_TL2X_LINKX_CFG(schq, 2633 lbk_link_start + 2634 lbk_links), cfg); 2635 } 2636 } 2637 2638 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 2639 struct nix_txschq_config *req, 2640 struct nix_txschq_config *rsp) 2641 { 2642 u64 reg, val, regval, schq_regbase, val_mask; 2643 struct rvu_hwinfo *hw = rvu->hw; 2644 u16 pcifunc = req->hdr.pcifunc; 2645 struct nix_txsch *txsch; 2646 struct nix_hw *nix_hw; 2647 int blkaddr, idx, err; 2648 int nixlf, schq; 2649 u32 *pfvf_map; 2650 2651 if (req->lvl >= NIX_TXSCH_LVL_CNT || 2652 req->num_regs > MAX_REGS_PER_MBOX_MSG) 2653 return NIX_AF_INVAL_TXSCHQ_CFG; 2654 2655 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2656 if (err) 2657 return err; 2658 2659 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2660 if (!nix_hw) 2661 return NIX_AF_ERR_INVALID_NIXBLK; 2662 2663 if (req->read) 2664 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); 2665 2666 txsch = &nix_hw->txsch[req->lvl]; 2667 pfvf_map = txsch->pfvf_map; 2668 2669 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 2670 pcifunc & RVU_PFVF_FUNC_MASK) { 2671 mutex_lock(&rvu->rsrc_lock); 2672 if (req->lvl == NIX_TXSCH_LVL_TL1) 2673 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 2674 mutex_unlock(&rvu->rsrc_lock); 2675 return 0; 2676 } 2677 2678 for (idx = 0; idx < req->num_regs; idx++) { 2679 reg = req->reg[idx]; 2680 reg &= NIX_TX_SCHQ_MASK; 2681 regval = req->regval[idx]; 2682 schq_regbase = reg & 0xFFFF; 2683 val_mask = req->regval_mask[idx]; 2684 2685 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 2686 txsch->lvl, reg, regval)) 2687 return NIX_AF_INVAL_TXSCHQ_CFG; 2688 2689 /* Check if shaping and coloring is supported */ 2690 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 2691 continue; 2692 2693 val = rvu_read64(rvu, blkaddr, reg); 2694 regval = (val & val_mask) | (regval & ~val_mask); 2695 2696 /* Handle shaping state toggle specially */ 2697 if (hw->cap.nix_shaper_toggle_wait && 2698 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 2699 req->lvl, reg, regval)) 2700 continue; 2701 2702 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 2703 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 2704 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 2705 pcifunc, 0); 2706 regval &= ~(0x7FULL << 24); 2707 regval |= ((u64)nixlf << 24); 2708 } 2709 2710 /* Clear 'BP_ENA' config, if it's not allowed */ 2711 if (!hw->cap.nix_tx_link_bp) { 2712 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 2713 (schq_regbase & 0xFF00) == 2714 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 2715 regval &= ~BIT_ULL(13); 2716 } 2717 2718 /* Mark config as done for TL1 by PF */ 2719 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 2720 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 2721 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2722 mutex_lock(&rvu->rsrc_lock); 2723 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 2724 NIX_TXSCHQ_CFG_DONE); 2725 mutex_unlock(&rvu->rsrc_lock); 2726 } 2727 2728 /* SMQ flush is special hence split register writes such 2729 * that flush first and write rest of the bits later. 2730 */ 2731 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 2732 (regval & BIT_ULL(49))) { 2733 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2734 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2735 regval &= ~BIT_ULL(49); 2736 } 2737 rvu_write64(rvu, blkaddr, reg, regval); 2738 } 2739 2740 return 0; 2741 } 2742 2743 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 2744 struct nix_vtag_config *req) 2745 { 2746 u64 regval = req->vtag_size; 2747 2748 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2749 req->vtag_size > VTAGSIZE_T8) 2750 return -EINVAL; 2751 2752 /* RX VTAG Type 7 reserved for vf vlan */ 2753 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2754 return NIX_AF_ERR_RX_VTAG_INUSE; 2755 2756 if (req->rx.capture_vtag) 2757 regval |= BIT_ULL(5); 2758 if (req->rx.strip_vtag) 2759 regval |= BIT_ULL(4); 2760 2761 rvu_write64(rvu, blkaddr, 2762 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2763 return 0; 2764 } 2765 2766 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2767 u16 pcifunc, int index) 2768 { 2769 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2770 struct nix_txvlan *vlan; 2771 2772 if (!nix_hw) 2773 return NIX_AF_ERR_INVALID_NIXBLK; 2774 2775 vlan = &nix_hw->txvlan; 2776 if (vlan->entry2pfvf_map[index] != pcifunc) 2777 return NIX_AF_ERR_PARAM; 2778 2779 rvu_write64(rvu, blkaddr, 2780 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2781 rvu_write64(rvu, blkaddr, 2782 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2783 2784 vlan->entry2pfvf_map[index] = 0; 2785 rvu_free_rsrc(&vlan->rsrc, index); 2786 2787 return 0; 2788 } 2789 2790 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2791 { 2792 struct nix_txvlan *vlan; 2793 struct nix_hw *nix_hw; 2794 int index, blkaddr; 2795 2796 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2797 if (blkaddr < 0) 2798 return; 2799 2800 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2801 if (!nix_hw) 2802 return; 2803 2804 vlan = &nix_hw->txvlan; 2805 2806 mutex_lock(&vlan->rsrc_lock); 2807 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2808 for (index = 0; index < vlan->rsrc.max; index++) { 2809 if (vlan->entry2pfvf_map[index] == pcifunc) 2810 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2811 } 2812 mutex_unlock(&vlan->rsrc_lock); 2813 } 2814 2815 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2816 u64 vtag, u8 size) 2817 { 2818 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2819 struct nix_txvlan *vlan; 2820 u64 regval; 2821 int index; 2822 2823 if (!nix_hw) 2824 return NIX_AF_ERR_INVALID_NIXBLK; 2825 2826 vlan = &nix_hw->txvlan; 2827 2828 mutex_lock(&vlan->rsrc_lock); 2829 2830 index = rvu_alloc_rsrc(&vlan->rsrc); 2831 if (index < 0) { 2832 mutex_unlock(&vlan->rsrc_lock); 2833 return index; 2834 } 2835 2836 mutex_unlock(&vlan->rsrc_lock); 2837 2838 regval = size ? vtag : vtag << 32; 2839 2840 rvu_write64(rvu, blkaddr, 2841 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 2842 rvu_write64(rvu, blkaddr, 2843 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 2844 2845 return index; 2846 } 2847 2848 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 2849 struct nix_vtag_config *req) 2850 { 2851 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2852 u16 pcifunc = req->hdr.pcifunc; 2853 int idx0 = req->tx.vtag0_idx; 2854 int idx1 = req->tx.vtag1_idx; 2855 struct nix_txvlan *vlan; 2856 int err = 0; 2857 2858 if (!nix_hw) 2859 return NIX_AF_ERR_INVALID_NIXBLK; 2860 2861 vlan = &nix_hw->txvlan; 2862 if (req->tx.free_vtag0 && req->tx.free_vtag1) 2863 if (vlan->entry2pfvf_map[idx0] != pcifunc || 2864 vlan->entry2pfvf_map[idx1] != pcifunc) 2865 return NIX_AF_ERR_PARAM; 2866 2867 mutex_lock(&vlan->rsrc_lock); 2868 2869 if (req->tx.free_vtag0) { 2870 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 2871 if (err) 2872 goto exit; 2873 } 2874 2875 if (req->tx.free_vtag1) 2876 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 2877 2878 exit: 2879 mutex_unlock(&vlan->rsrc_lock); 2880 return err; 2881 } 2882 2883 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 2884 struct nix_vtag_config *req, 2885 struct nix_vtag_config_rsp *rsp) 2886 { 2887 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2888 struct nix_txvlan *vlan; 2889 u16 pcifunc = req->hdr.pcifunc; 2890 2891 if (!nix_hw) 2892 return NIX_AF_ERR_INVALID_NIXBLK; 2893 2894 vlan = &nix_hw->txvlan; 2895 if (req->tx.cfg_vtag0) { 2896 rsp->vtag0_idx = 2897 nix_tx_vtag_alloc(rvu, blkaddr, 2898 req->tx.vtag0, req->vtag_size); 2899 2900 if (rsp->vtag0_idx < 0) 2901 return NIX_AF_ERR_TX_VTAG_NOSPC; 2902 2903 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 2904 } 2905 2906 if (req->tx.cfg_vtag1) { 2907 rsp->vtag1_idx = 2908 nix_tx_vtag_alloc(rvu, blkaddr, 2909 req->tx.vtag1, req->vtag_size); 2910 2911 if (rsp->vtag1_idx < 0) 2912 goto err_free; 2913 2914 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 2915 } 2916 2917 return 0; 2918 2919 err_free: 2920 if (req->tx.cfg_vtag0) 2921 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 2922 2923 return NIX_AF_ERR_TX_VTAG_NOSPC; 2924 } 2925 2926 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 2927 struct nix_vtag_config *req, 2928 struct nix_vtag_config_rsp *rsp) 2929 { 2930 u16 pcifunc = req->hdr.pcifunc; 2931 int blkaddr, nixlf, err; 2932 2933 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2934 if (err) 2935 return err; 2936 2937 if (req->cfg_type) { 2938 /* rx vtag configuration */ 2939 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 2940 if (err) 2941 return NIX_AF_ERR_PARAM; 2942 } else { 2943 /* tx vtag configuration */ 2944 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 2945 (req->tx.free_vtag0 || req->tx.free_vtag1)) 2946 return NIX_AF_ERR_PARAM; 2947 2948 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 2949 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 2950 2951 if (req->tx.free_vtag0 || req->tx.free_vtag1) 2952 return nix_tx_vtag_decfg(rvu, blkaddr, req); 2953 } 2954 2955 return 0; 2956 } 2957 2958 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 2959 int mce, u8 op, u16 pcifunc, int next, bool eol) 2960 { 2961 struct nix_aq_enq_req aq_req; 2962 int err; 2963 2964 aq_req.hdr.pcifunc = 0; 2965 aq_req.ctype = NIX_AQ_CTYPE_MCE; 2966 aq_req.op = op; 2967 aq_req.qidx = mce; 2968 2969 /* Use RSS with RSS index 0 */ 2970 aq_req.mce.op = 1; 2971 aq_req.mce.index = 0; 2972 aq_req.mce.eol = eol; 2973 aq_req.mce.pf_func = pcifunc; 2974 aq_req.mce.next = next; 2975 2976 /* All fields valid */ 2977 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 2978 2979 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 2980 if (err) { 2981 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 2982 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 2983 return err; 2984 } 2985 return 0; 2986 } 2987 2988 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, 2989 u16 pcifunc, bool add) 2990 { 2991 struct mce *mce, *tail = NULL; 2992 bool delete = false; 2993 2994 /* Scan through the current list */ 2995 hlist_for_each_entry(mce, &mce_list->head, node) { 2996 /* If already exists, then delete */ 2997 if (mce->pcifunc == pcifunc && !add) { 2998 delete = true; 2999 break; 3000 } else if (mce->pcifunc == pcifunc && add) { 3001 /* entry already exists */ 3002 return 0; 3003 } 3004 tail = mce; 3005 } 3006 3007 if (delete) { 3008 hlist_del(&mce->node); 3009 kfree(mce); 3010 mce_list->count--; 3011 return 0; 3012 } 3013 3014 if (!add) 3015 return 0; 3016 3017 /* Add a new one to the list, at the tail */ 3018 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 3019 if (!mce) 3020 return -ENOMEM; 3021 mce->pcifunc = pcifunc; 3022 if (!tail) 3023 hlist_add_head(&mce->node, &mce_list->head); 3024 else 3025 hlist_add_behind(&mce->node, &tail->node); 3026 mce_list->count++; 3027 return 0; 3028 } 3029 3030 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, 3031 struct nix_mce_list *mce_list, 3032 int mce_idx, int mcam_index, bool add) 3033 { 3034 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; 3035 struct npc_mcam *mcam = &rvu->hw->mcam; 3036 struct nix_mcast *mcast; 3037 struct nix_hw *nix_hw; 3038 struct mce *mce; 3039 3040 if (!mce_list) 3041 return -EINVAL; 3042 3043 /* Get this PF/VF func's MCE index */ 3044 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 3045 3046 if (idx > (mce_idx + mce_list->max)) { 3047 dev_err(rvu->dev, 3048 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 3049 __func__, idx, mce_list->max, 3050 pcifunc >> RVU_PFVF_PF_SHIFT); 3051 return -EINVAL; 3052 } 3053 3054 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 3055 if (err) 3056 return err; 3057 3058 mcast = &nix_hw->mcast; 3059 mutex_lock(&mcast->mce_lock); 3060 3061 err = nix_update_mce_list_entry(mce_list, pcifunc, add); 3062 if (err) 3063 goto end; 3064 3065 /* Disable MCAM entry in NPC */ 3066 if (!mce_list->count) { 3067 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3068 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); 3069 goto end; 3070 } 3071 3072 /* Dump the updated list to HW */ 3073 idx = mce_idx; 3074 last_idx = idx + mce_list->count - 1; 3075 hlist_for_each_entry(mce, &mce_list->head, node) { 3076 if (idx > last_idx) 3077 break; 3078 3079 next_idx = idx + 1; 3080 /* EOL should be set in last MCE */ 3081 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 3082 mce->pcifunc, next_idx, 3083 (next_idx > last_idx) ? true : false); 3084 if (err) 3085 goto end; 3086 idx++; 3087 } 3088 3089 end: 3090 mutex_unlock(&mcast->mce_lock); 3091 return err; 3092 } 3093 3094 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, 3095 struct nix_mce_list **mce_list, int *mce_idx) 3096 { 3097 struct rvu_hwinfo *hw = rvu->hw; 3098 struct rvu_pfvf *pfvf; 3099 3100 if (!hw->cap.nix_rx_multicast || 3101 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { 3102 *mce_list = NULL; 3103 *mce_idx = 0; 3104 return; 3105 } 3106 3107 /* Get this PF/VF func's MCE index */ 3108 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 3109 3110 if (type == NIXLF_BCAST_ENTRY) { 3111 *mce_list = &pfvf->bcast_mce_list; 3112 *mce_idx = pfvf->bcast_mce_idx; 3113 } else if (type == NIXLF_ALLMULTI_ENTRY) { 3114 *mce_list = &pfvf->mcast_mce_list; 3115 *mce_idx = pfvf->mcast_mce_idx; 3116 } else if (type == NIXLF_PROMISC_ENTRY) { 3117 *mce_list = &pfvf->promisc_mce_list; 3118 *mce_idx = pfvf->promisc_mce_idx; 3119 } else { 3120 *mce_list = NULL; 3121 *mce_idx = 0; 3122 } 3123 } 3124 3125 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 3126 int type, bool add) 3127 { 3128 int err = 0, nixlf, blkaddr, mcam_index, mce_idx; 3129 struct npc_mcam *mcam = &rvu->hw->mcam; 3130 struct rvu_hwinfo *hw = rvu->hw; 3131 struct nix_mce_list *mce_list; 3132 int pf; 3133 3134 /* skip multicast pkt replication for AF's VFs & SDP links */ 3135 if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc)) 3136 return 0; 3137 3138 if (!hw->cap.nix_rx_multicast) 3139 return 0; 3140 3141 pf = rvu_get_pf(pcifunc); 3142 if (!is_pf_cgxmapped(rvu, pf)) 3143 return 0; 3144 3145 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3146 if (blkaddr < 0) 3147 return -EINVAL; 3148 3149 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 3150 if (nixlf < 0) 3151 return -EINVAL; 3152 3153 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 3154 3155 mcam_index = npc_get_nixlf_mcam_index(mcam, 3156 pcifunc & ~RVU_PFVF_FUNC_MASK, 3157 nixlf, type); 3158 err = nix_update_mce_list(rvu, pcifunc, mce_list, 3159 mce_idx, mcam_index, add); 3160 return err; 3161 } 3162 3163 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) 3164 { 3165 struct nix_mcast *mcast = &nix_hw->mcast; 3166 int err, pf, numvfs, idx; 3167 struct rvu_pfvf *pfvf; 3168 u16 pcifunc; 3169 u64 cfg; 3170 3171 /* Skip PF0 (i.e AF) */ 3172 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 3173 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 3174 /* If PF is not enabled, nothing to do */ 3175 if (!((cfg >> 20) & 0x01)) 3176 continue; 3177 /* Get numVFs attached to this PF */ 3178 numvfs = (cfg >> 12) & 0xFF; 3179 3180 pfvf = &rvu->pf[pf]; 3181 3182 /* This NIX0/1 block mapped to PF ? */ 3183 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 3184 continue; 3185 3186 /* save start idx of broadcast mce list */ 3187 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 3188 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 3189 3190 /* save start idx of multicast mce list */ 3191 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 3192 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); 3193 3194 /* save the start idx of promisc mce list */ 3195 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 3196 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); 3197 3198 for (idx = 0; idx < (numvfs + 1); idx++) { 3199 /* idx-0 is for PF, followed by VFs */ 3200 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 3201 pcifunc |= idx; 3202 /* Add dummy entries now, so that we don't have to check 3203 * for whether AQ_OP should be INIT/WRITE later on. 3204 * Will be updated when a NIXLF is attached/detached to 3205 * these PF/VFs. 3206 */ 3207 err = nix_blk_setup_mce(rvu, nix_hw, 3208 pfvf->bcast_mce_idx + idx, 3209 NIX_AQ_INSTOP_INIT, 3210 pcifunc, 0, true); 3211 if (err) 3212 return err; 3213 3214 /* add dummy entries to multicast mce list */ 3215 err = nix_blk_setup_mce(rvu, nix_hw, 3216 pfvf->mcast_mce_idx + idx, 3217 NIX_AQ_INSTOP_INIT, 3218 pcifunc, 0, true); 3219 if (err) 3220 return err; 3221 3222 /* add dummy entries to promisc mce list */ 3223 err = nix_blk_setup_mce(rvu, nix_hw, 3224 pfvf->promisc_mce_idx + idx, 3225 NIX_AQ_INSTOP_INIT, 3226 pcifunc, 0, true); 3227 if (err) 3228 return err; 3229 } 3230 } 3231 return 0; 3232 } 3233 3234 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3235 { 3236 struct nix_mcast *mcast = &nix_hw->mcast; 3237 struct rvu_hwinfo *hw = rvu->hw; 3238 int err, size; 3239 3240 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 3241 size = (1ULL << size); 3242 3243 /* Alloc memory for multicast/mirror replication entries */ 3244 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 3245 (256UL << MC_TBL_SIZE), size); 3246 if (err) 3247 return -ENOMEM; 3248 3249 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 3250 (u64)mcast->mce_ctx->iova); 3251 3252 /* Set max list length equal to max no of VFs per PF + PF itself */ 3253 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 3254 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 3255 3256 /* Alloc memory for multicast replication buffers */ 3257 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 3258 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 3259 (8UL << MC_BUF_CNT), size); 3260 if (err) 3261 return -ENOMEM; 3262 3263 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 3264 (u64)mcast->mcast_buf->iova); 3265 3266 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 3267 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 3268 3269 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 3270 BIT_ULL(63) | (mcast->replay_pkind << 24) | 3271 BIT_ULL(20) | MC_BUF_CNT); 3272 3273 mutex_init(&mcast->mce_lock); 3274 3275 return nix_setup_mce_tables(rvu, nix_hw); 3276 } 3277 3278 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 3279 { 3280 struct nix_txvlan *vlan = &nix_hw->txvlan; 3281 int err; 3282 3283 /* Allocate resource bimap for tx vtag def registers*/ 3284 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 3285 err = rvu_alloc_bitmap(&vlan->rsrc); 3286 if (err) 3287 return -ENOMEM; 3288 3289 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 3290 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 3291 sizeof(u16), GFP_KERNEL); 3292 if (!vlan->entry2pfvf_map) 3293 goto free_mem; 3294 3295 mutex_init(&vlan->rsrc_lock); 3296 return 0; 3297 3298 free_mem: 3299 kfree(vlan->rsrc.bmap); 3300 return -ENOMEM; 3301 } 3302 3303 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3304 { 3305 struct nix_txsch *txsch; 3306 int err, lvl, schq; 3307 u64 cfg, reg; 3308 3309 /* Get scheduler queue count of each type and alloc 3310 * bitmap for each for alloc/free/attach operations. 3311 */ 3312 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3313 txsch = &nix_hw->txsch[lvl]; 3314 txsch->lvl = lvl; 3315 switch (lvl) { 3316 case NIX_TXSCH_LVL_SMQ: 3317 reg = NIX_AF_MDQ_CONST; 3318 break; 3319 case NIX_TXSCH_LVL_TL4: 3320 reg = NIX_AF_TL4_CONST; 3321 break; 3322 case NIX_TXSCH_LVL_TL3: 3323 reg = NIX_AF_TL3_CONST; 3324 break; 3325 case NIX_TXSCH_LVL_TL2: 3326 reg = NIX_AF_TL2_CONST; 3327 break; 3328 case NIX_TXSCH_LVL_TL1: 3329 reg = NIX_AF_TL1_CONST; 3330 break; 3331 } 3332 cfg = rvu_read64(rvu, blkaddr, reg); 3333 txsch->schq.max = cfg & 0xFFFF; 3334 err = rvu_alloc_bitmap(&txsch->schq); 3335 if (err) 3336 return err; 3337 3338 /* Allocate memory for scheduler queues to 3339 * PF/VF pcifunc mapping info. 3340 */ 3341 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 3342 sizeof(u32), GFP_KERNEL); 3343 if (!txsch->pfvf_map) 3344 return -ENOMEM; 3345 for (schq = 0; schq < txsch->schq.max; schq++) 3346 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 3347 } 3348 3349 /* Setup a default value of 8192 as DWRR MTU */ 3350 if (rvu->hw->cap.nix_common_dwrr_mtu || 3351 rvu->hw->cap.nix_multiple_dwrr_mtu) { 3352 rvu_write64(rvu, blkaddr, 3353 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), 3354 convert_bytes_to_dwrr_mtu(8192)); 3355 rvu_write64(rvu, blkaddr, 3356 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK), 3357 convert_bytes_to_dwrr_mtu(8192)); 3358 rvu_write64(rvu, blkaddr, 3359 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP), 3360 convert_bytes_to_dwrr_mtu(8192)); 3361 } 3362 3363 return 0; 3364 } 3365 3366 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 3367 int blkaddr, u32 cfg) 3368 { 3369 int fmt_idx; 3370 3371 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 3372 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 3373 return fmt_idx; 3374 } 3375 if (fmt_idx >= nix_hw->mark_format.total) 3376 return -ERANGE; 3377 3378 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 3379 nix_hw->mark_format.cfg[fmt_idx] = cfg; 3380 nix_hw->mark_format.in_use++; 3381 return fmt_idx; 3382 } 3383 3384 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 3385 int blkaddr) 3386 { 3387 u64 cfgs[] = { 3388 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 3389 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 3390 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 3391 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 3392 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 3393 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 3394 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 3395 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 3396 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 3397 }; 3398 int i, rc; 3399 u64 total; 3400 3401 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 3402 nix_hw->mark_format.total = (u8)total; 3403 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 3404 GFP_KERNEL); 3405 if (!nix_hw->mark_format.cfg) 3406 return -ENOMEM; 3407 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 3408 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 3409 if (rc < 0) 3410 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 3411 i, rc); 3412 } 3413 3414 return 0; 3415 } 3416 3417 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3418 { 3419 /* CN10K supports LBK FIFO size 72 KB */ 3420 if (rvu->hw->lbk_bufsize == 0x12000) 3421 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 3422 else 3423 *max_mtu = NIC_HW_MAX_FRS; 3424 } 3425 3426 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3427 { 3428 int fifo_size = rvu_cgx_get_fifolen(rvu); 3429 3430 /* RPM supports FIFO len 128 KB and RPM2 supports double the 3431 * FIFO len to accommodate 8 LMACS 3432 */ 3433 if (fifo_size == 0x20000 || fifo_size == 0x40000) 3434 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 3435 else 3436 *max_mtu = NIC_HW_MAX_FRS; 3437 } 3438 3439 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 3440 struct nix_hw_info *rsp) 3441 { 3442 u16 pcifunc = req->hdr.pcifunc; 3443 u64 dwrr_mtu; 3444 int blkaddr; 3445 3446 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3447 if (blkaddr < 0) 3448 return NIX_AF_ERR_AF_LF_INVALID; 3449 3450 if (is_afvf(pcifunc)) 3451 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 3452 else 3453 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 3454 3455 rsp->min_mtu = NIC_HW_MIN_FRS; 3456 3457 if (!rvu->hw->cap.nix_common_dwrr_mtu && 3458 !rvu->hw->cap.nix_multiple_dwrr_mtu) { 3459 /* Return '1' on OTx2 */ 3460 rsp->rpm_dwrr_mtu = 1; 3461 rsp->sdp_dwrr_mtu = 1; 3462 rsp->lbk_dwrr_mtu = 1; 3463 return 0; 3464 } 3465 3466 /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */ 3467 dwrr_mtu = rvu_read64(rvu, blkaddr, 3468 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM)); 3469 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3470 3471 dwrr_mtu = rvu_read64(rvu, blkaddr, 3472 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP)); 3473 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3474 3475 dwrr_mtu = rvu_read64(rvu, blkaddr, 3476 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK)); 3477 rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3478 3479 return 0; 3480 } 3481 3482 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 3483 struct msg_rsp *rsp) 3484 { 3485 u16 pcifunc = req->hdr.pcifunc; 3486 int i, nixlf, blkaddr, err; 3487 u64 stats; 3488 3489 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3490 if (err) 3491 return err; 3492 3493 /* Get stats count supported by HW */ 3494 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 3495 3496 /* Reset tx stats */ 3497 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 3498 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 3499 3500 /* Reset rx stats */ 3501 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 3502 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 3503 3504 return 0; 3505 } 3506 3507 /* Returns the ALG index to be set into NPC_RX_ACTION */ 3508 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 3509 { 3510 int i; 3511 3512 /* Scan over exiting algo entries to find a match */ 3513 for (i = 0; i < nix_hw->flowkey.in_use; i++) 3514 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 3515 return i; 3516 3517 return -ERANGE; 3518 } 3519 3520 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 3521 { 3522 int idx, nr_field, key_off, field_marker, keyoff_marker; 3523 int max_key_off, max_bit_pos, group_member; 3524 struct nix_rx_flowkey_alg *field; 3525 struct nix_rx_flowkey_alg tmp; 3526 u32 key_type, valid_key; 3527 u32 l3_l4_src_dst; 3528 int l4_key_offset = 0; 3529 3530 if (!alg) 3531 return -EINVAL; 3532 3533 #define FIELDS_PER_ALG 5 3534 #define MAX_KEY_OFF 40 3535 /* Clear all fields */ 3536 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 3537 3538 /* Each of the 32 possible flow key algorithm definitions should 3539 * fall into above incremental config (except ALG0). Otherwise a 3540 * single NPC MCAM entry is not sufficient for supporting RSS. 3541 * 3542 * If a different definition or combination needed then NPC MCAM 3543 * has to be programmed to filter such pkts and it's action should 3544 * point to this definition to calculate flowtag or hash. 3545 * 3546 * The `for loop` goes over _all_ protocol field and the following 3547 * variables depicts the state machine forward progress logic. 3548 * 3549 * keyoff_marker - Enabled when hash byte length needs to be accounted 3550 * in field->key_offset update. 3551 * field_marker - Enabled when a new field needs to be selected. 3552 * group_member - Enabled when protocol is part of a group. 3553 */ 3554 3555 /* Last 4 bits (31:28) are reserved to specify SRC, DST 3556 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST, 3557 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST 3558 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST 3559 */ 3560 l3_l4_src_dst = flow_cfg; 3561 /* Reset these 4 bits, so that these won't be part of key */ 3562 flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK; 3563 3564 keyoff_marker = 0; max_key_off = 0; group_member = 0; 3565 nr_field = 0; key_off = 0; field_marker = 1; 3566 field = &tmp; max_bit_pos = fls(flow_cfg); 3567 for (idx = 0; 3568 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 3569 key_off < MAX_KEY_OFF; idx++) { 3570 key_type = BIT(idx); 3571 valid_key = flow_cfg & key_type; 3572 /* Found a field marker, reset the field values */ 3573 if (field_marker) 3574 memset(&tmp, 0, sizeof(tmp)); 3575 3576 field_marker = true; 3577 keyoff_marker = true; 3578 switch (key_type) { 3579 case NIX_FLOW_KEY_TYPE_PORT: 3580 field->sel_chan = true; 3581 /* This should be set to 1, when SEL_CHAN is set */ 3582 field->bytesm1 = 1; 3583 break; 3584 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 3585 field->lid = NPC_LID_LC; 3586 field->hdr_offset = 9; /* offset */ 3587 field->bytesm1 = 0; /* 1 byte */ 3588 field->ltype_match = NPC_LT_LC_IP; 3589 field->ltype_mask = 0xF; 3590 break; 3591 case NIX_FLOW_KEY_TYPE_IPV4: 3592 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 3593 field->lid = NPC_LID_LC; 3594 field->ltype_match = NPC_LT_LC_IP; 3595 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 3596 field->lid = NPC_LID_LG; 3597 field->ltype_match = NPC_LT_LG_TU_IP; 3598 } 3599 field->hdr_offset = 12; /* SIP offset */ 3600 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 3601 3602 /* Only SIP */ 3603 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 3604 field->bytesm1 = 3; /* SIP, 4 bytes */ 3605 3606 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 3607 /* Both SIP + DIP */ 3608 if (field->bytesm1 == 3) { 3609 field->bytesm1 = 7; /* SIP + DIP, 8B */ 3610 } else { 3611 /* Only DIP */ 3612 field->hdr_offset = 16; /* DIP off */ 3613 field->bytesm1 = 3; /* DIP, 4 bytes */ 3614 } 3615 } 3616 3617 field->ltype_mask = 0xF; /* Match only IPv4 */ 3618 keyoff_marker = false; 3619 break; 3620 case NIX_FLOW_KEY_TYPE_IPV6: 3621 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 3622 field->lid = NPC_LID_LC; 3623 field->ltype_match = NPC_LT_LC_IP6; 3624 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 3625 field->lid = NPC_LID_LG; 3626 field->ltype_match = NPC_LT_LG_TU_IP6; 3627 } 3628 field->hdr_offset = 8; /* SIP offset */ 3629 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 3630 3631 /* Only SIP */ 3632 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 3633 field->bytesm1 = 15; /* SIP, 16 bytes */ 3634 3635 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 3636 /* Both SIP + DIP */ 3637 if (field->bytesm1 == 15) { 3638 /* SIP + DIP, 32 bytes */ 3639 field->bytesm1 = 31; 3640 } else { 3641 /* Only DIP */ 3642 field->hdr_offset = 24; /* DIP off */ 3643 field->bytesm1 = 15; /* DIP,16 bytes */ 3644 } 3645 } 3646 field->ltype_mask = 0xF; /* Match only IPv6 */ 3647 break; 3648 case NIX_FLOW_KEY_TYPE_TCP: 3649 case NIX_FLOW_KEY_TYPE_UDP: 3650 case NIX_FLOW_KEY_TYPE_SCTP: 3651 case NIX_FLOW_KEY_TYPE_INNR_TCP: 3652 case NIX_FLOW_KEY_TYPE_INNR_UDP: 3653 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 3654 field->lid = NPC_LID_LD; 3655 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 3656 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 3657 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 3658 field->lid = NPC_LID_LH; 3659 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 3660 3661 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY) 3662 field->bytesm1 = 1; /* SRC, 2 bytes */ 3663 3664 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) { 3665 /* Both SRC + DST */ 3666 if (field->bytesm1 == 1) { 3667 /* SRC + DST, 4 bytes */ 3668 field->bytesm1 = 3; 3669 } else { 3670 /* Only DIP */ 3671 field->hdr_offset = 2; /* DST off */ 3672 field->bytesm1 = 1; /* DST, 2 bytes */ 3673 } 3674 } 3675 3676 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 3677 * so no need to change the ltype_match, just change 3678 * the lid for inner protocols 3679 */ 3680 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 3681 (int)NPC_LT_LH_TU_TCP); 3682 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 3683 (int)NPC_LT_LH_TU_UDP); 3684 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 3685 (int)NPC_LT_LH_TU_SCTP); 3686 3687 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 3688 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 3689 valid_key) { 3690 field->ltype_match |= NPC_LT_LD_TCP; 3691 group_member = true; 3692 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 3693 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 3694 valid_key) { 3695 field->ltype_match |= NPC_LT_LD_UDP; 3696 group_member = true; 3697 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 3698 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 3699 valid_key) { 3700 field->ltype_match |= NPC_LT_LD_SCTP; 3701 group_member = true; 3702 } 3703 field->ltype_mask = ~field->ltype_match; 3704 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 3705 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 3706 /* Handle the case where any of the group item 3707 * is enabled in the group but not the final one 3708 */ 3709 if (group_member) { 3710 valid_key = true; 3711 group_member = false; 3712 } 3713 } else { 3714 field_marker = false; 3715 keyoff_marker = false; 3716 } 3717 3718 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 3719 * remember the TCP key offset of 40 byte hash key. 3720 */ 3721 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 3722 l4_key_offset = key_off; 3723 break; 3724 case NIX_FLOW_KEY_TYPE_NVGRE: 3725 field->lid = NPC_LID_LD; 3726 field->hdr_offset = 4; /* VSID offset */ 3727 field->bytesm1 = 2; 3728 field->ltype_match = NPC_LT_LD_NVGRE; 3729 field->ltype_mask = 0xF; 3730 break; 3731 case NIX_FLOW_KEY_TYPE_VXLAN: 3732 case NIX_FLOW_KEY_TYPE_GENEVE: 3733 field->lid = NPC_LID_LE; 3734 field->bytesm1 = 2; 3735 field->hdr_offset = 4; 3736 field->ltype_mask = 0xF; 3737 field_marker = false; 3738 keyoff_marker = false; 3739 3740 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 3741 field->ltype_match |= NPC_LT_LE_VXLAN; 3742 group_member = true; 3743 } 3744 3745 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 3746 field->ltype_match |= NPC_LT_LE_GENEVE; 3747 group_member = true; 3748 } 3749 3750 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 3751 if (group_member) { 3752 field->ltype_mask = ~field->ltype_match; 3753 field_marker = true; 3754 keyoff_marker = true; 3755 valid_key = true; 3756 group_member = false; 3757 } 3758 } 3759 break; 3760 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 3761 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 3762 field->lid = NPC_LID_LA; 3763 field->ltype_match = NPC_LT_LA_ETHER; 3764 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 3765 field->lid = NPC_LID_LF; 3766 field->ltype_match = NPC_LT_LF_TU_ETHER; 3767 } 3768 field->hdr_offset = 0; 3769 field->bytesm1 = 5; /* DMAC 6 Byte */ 3770 field->ltype_mask = 0xF; 3771 break; 3772 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 3773 field->lid = NPC_LID_LC; 3774 field->hdr_offset = 40; /* IPV6 hdr */ 3775 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 3776 field->ltype_match = NPC_LT_LC_IP6_EXT; 3777 field->ltype_mask = 0xF; 3778 break; 3779 case NIX_FLOW_KEY_TYPE_GTPU: 3780 field->lid = NPC_LID_LE; 3781 field->hdr_offset = 4; 3782 field->bytesm1 = 3; /* 4 bytes TID*/ 3783 field->ltype_match = NPC_LT_LE_GTPU; 3784 field->ltype_mask = 0xF; 3785 break; 3786 case NIX_FLOW_KEY_TYPE_VLAN: 3787 field->lid = NPC_LID_LB; 3788 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 3789 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 3790 field->ltype_match = NPC_LT_LB_CTAG; 3791 field->ltype_mask = 0xF; 3792 field->fn_mask = 1; /* Mask out the first nibble */ 3793 break; 3794 case NIX_FLOW_KEY_TYPE_AH: 3795 case NIX_FLOW_KEY_TYPE_ESP: 3796 field->hdr_offset = 0; 3797 field->bytesm1 = 7; /* SPI + sequence number */ 3798 field->ltype_mask = 0xF; 3799 field->lid = NPC_LID_LE; 3800 field->ltype_match = NPC_LT_LE_ESP; 3801 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 3802 field->lid = NPC_LID_LD; 3803 field->ltype_match = NPC_LT_LD_AH; 3804 field->hdr_offset = 4; 3805 keyoff_marker = false; 3806 } 3807 break; 3808 } 3809 field->ena = 1; 3810 3811 /* Found a valid flow key type */ 3812 if (valid_key) { 3813 /* Use the key offset of TCP/UDP/SCTP fields 3814 * for ESP/AH fields. 3815 */ 3816 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 3817 key_type == NIX_FLOW_KEY_TYPE_AH) 3818 key_off = l4_key_offset; 3819 field->key_offset = key_off; 3820 memcpy(&alg[nr_field], field, sizeof(*field)); 3821 max_key_off = max(max_key_off, field->bytesm1 + 1); 3822 3823 /* Found a field marker, get the next field */ 3824 if (field_marker) 3825 nr_field++; 3826 } 3827 3828 /* Found a keyoff marker, update the new key_off */ 3829 if (keyoff_marker) { 3830 key_off += max_key_off; 3831 max_key_off = 0; 3832 } 3833 } 3834 /* Processed all the flow key types */ 3835 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 3836 return 0; 3837 else 3838 return NIX_AF_ERR_RSS_NOSPC_FIELD; 3839 } 3840 3841 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 3842 { 3843 u64 field[FIELDS_PER_ALG]; 3844 struct nix_hw *hw; 3845 int fid, rc; 3846 3847 hw = get_nix_hw(rvu->hw, blkaddr); 3848 if (!hw) 3849 return NIX_AF_ERR_INVALID_NIXBLK; 3850 3851 /* No room to add new flow hash algoritham */ 3852 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 3853 return NIX_AF_ERR_RSS_NOSPC_ALGO; 3854 3855 /* Generate algo fields for the given flow_cfg */ 3856 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 3857 if (rc) 3858 return rc; 3859 3860 /* Update ALGX_FIELDX register with generated fields */ 3861 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3862 rvu_write64(rvu, blkaddr, 3863 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 3864 fid), field[fid]); 3865 3866 /* Store the flow_cfg for futher lookup */ 3867 rc = hw->flowkey.in_use; 3868 hw->flowkey.flowkey[rc] = flow_cfg; 3869 hw->flowkey.in_use++; 3870 3871 return rc; 3872 } 3873 3874 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 3875 struct nix_rss_flowkey_cfg *req, 3876 struct nix_rss_flowkey_cfg_rsp *rsp) 3877 { 3878 u16 pcifunc = req->hdr.pcifunc; 3879 int alg_idx, nixlf, blkaddr; 3880 struct nix_hw *nix_hw; 3881 int err; 3882 3883 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3884 if (err) 3885 return err; 3886 3887 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3888 if (!nix_hw) 3889 return NIX_AF_ERR_INVALID_NIXBLK; 3890 3891 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 3892 /* Failed to get algo index from the exiting list, reserve new */ 3893 if (alg_idx < 0) { 3894 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 3895 req->flowkey_cfg); 3896 if (alg_idx < 0) 3897 return alg_idx; 3898 } 3899 rsp->alg_idx = alg_idx; 3900 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 3901 alg_idx, req->mcam_index); 3902 return 0; 3903 } 3904 3905 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 3906 { 3907 u32 flowkey_cfg, minkey_cfg; 3908 int alg, fid, rc; 3909 3910 /* Disable all flow key algx fieldx */ 3911 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 3912 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3913 rvu_write64(rvu, blkaddr, 3914 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 3915 0); 3916 } 3917 3918 /* IPv4/IPv6 SIP/DIPs */ 3919 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 3920 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3921 if (rc < 0) 3922 return rc; 3923 3924 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3925 minkey_cfg = flowkey_cfg; 3926 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 3927 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3928 if (rc < 0) 3929 return rc; 3930 3931 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3932 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 3933 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3934 if (rc < 0) 3935 return rc; 3936 3937 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3938 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 3939 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3940 if (rc < 0) 3941 return rc; 3942 3943 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3944 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3945 NIX_FLOW_KEY_TYPE_UDP; 3946 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3947 if (rc < 0) 3948 return rc; 3949 3950 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3951 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3952 NIX_FLOW_KEY_TYPE_SCTP; 3953 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3954 if (rc < 0) 3955 return rc; 3956 3957 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3958 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 3959 NIX_FLOW_KEY_TYPE_SCTP; 3960 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3961 if (rc < 0) 3962 return rc; 3963 3964 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3965 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3966 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 3967 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3968 if (rc < 0) 3969 return rc; 3970 3971 return 0; 3972 } 3973 3974 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 3975 struct nix_set_mac_addr *req, 3976 struct msg_rsp *rsp) 3977 { 3978 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 3979 u16 pcifunc = req->hdr.pcifunc; 3980 int blkaddr, nixlf, err; 3981 struct rvu_pfvf *pfvf; 3982 3983 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3984 if (err) 3985 return err; 3986 3987 pfvf = rvu_get_pfvf(rvu, pcifunc); 3988 3989 /* untrusted VF can't overwrite admin(PF) changes */ 3990 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 3991 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { 3992 dev_warn(rvu->dev, 3993 "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); 3994 return -EPERM; 3995 } 3996 3997 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 3998 3999 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 4000 pfvf->rx_chan_base, req->mac_addr); 4001 4002 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) 4003 ether_addr_copy(pfvf->default_mac, req->mac_addr); 4004 4005 rvu_switch_update_rules(rvu, pcifunc); 4006 4007 return 0; 4008 } 4009 4010 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 4011 struct msg_req *req, 4012 struct nix_get_mac_addr_rsp *rsp) 4013 { 4014 u16 pcifunc = req->hdr.pcifunc; 4015 struct rvu_pfvf *pfvf; 4016 4017 if (!is_nixlf_attached(rvu, pcifunc)) 4018 return NIX_AF_ERR_AF_LF_INVALID; 4019 4020 pfvf = rvu_get_pfvf(rvu, pcifunc); 4021 4022 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 4023 4024 return 0; 4025 } 4026 4027 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 4028 struct msg_rsp *rsp) 4029 { 4030 bool allmulti, promisc, nix_rx_multicast; 4031 u16 pcifunc = req->hdr.pcifunc; 4032 struct rvu_pfvf *pfvf; 4033 int nixlf, err; 4034 4035 pfvf = rvu_get_pfvf(rvu, pcifunc); 4036 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; 4037 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; 4038 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; 4039 4040 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; 4041 4042 if (is_vf(pcifunc) && !nix_rx_multicast && 4043 (promisc || allmulti)) { 4044 dev_warn_ratelimited(rvu->dev, 4045 "VF promisc/multicast not supported\n"); 4046 return 0; 4047 } 4048 4049 /* untrusted VF can't configure promisc/allmulti */ 4050 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 4051 (promisc || allmulti)) 4052 return 0; 4053 4054 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4055 if (err) 4056 return err; 4057 4058 if (nix_rx_multicast) { 4059 /* add/del this PF_FUNC to/from mcast pkt replication list */ 4060 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, 4061 allmulti); 4062 if (err) { 4063 dev_err(rvu->dev, 4064 "Failed to update pcifunc 0x%x to multicast list\n", 4065 pcifunc); 4066 return err; 4067 } 4068 4069 /* add/del this PF_FUNC to/from promisc pkt replication list */ 4070 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, 4071 promisc); 4072 if (err) { 4073 dev_err(rvu->dev, 4074 "Failed to update pcifunc 0x%x to promisc list\n", 4075 pcifunc); 4076 return err; 4077 } 4078 } 4079 4080 /* install/uninstall allmulti entry */ 4081 if (allmulti) { 4082 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, 4083 pfvf->rx_chan_base); 4084 } else { 4085 if (!nix_rx_multicast) 4086 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); 4087 } 4088 4089 /* install/uninstall promisc entry */ 4090 if (promisc) 4091 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 4092 pfvf->rx_chan_base, 4093 pfvf->rx_chan_cnt); 4094 else 4095 if (!nix_rx_multicast) 4096 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 4097 4098 return 0; 4099 } 4100 4101 static void nix_find_link_frs(struct rvu *rvu, 4102 struct nix_frs_cfg *req, u16 pcifunc) 4103 { 4104 int pf = rvu_get_pf(pcifunc); 4105 struct rvu_pfvf *pfvf; 4106 int maxlen, minlen; 4107 int numvfs, hwvf; 4108 int vf; 4109 4110 /* Update with requester's min/max lengths */ 4111 pfvf = rvu_get_pfvf(rvu, pcifunc); 4112 pfvf->maxlen = req->maxlen; 4113 if (req->update_minlen) 4114 pfvf->minlen = req->minlen; 4115 4116 maxlen = req->maxlen; 4117 minlen = req->update_minlen ? req->minlen : 0; 4118 4119 /* Get this PF's numVFs and starting hwvf */ 4120 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 4121 4122 /* For each VF, compare requested max/minlen */ 4123 for (vf = 0; vf < numvfs; vf++) { 4124 pfvf = &rvu->hwvf[hwvf + vf]; 4125 if (pfvf->maxlen > maxlen) 4126 maxlen = pfvf->maxlen; 4127 if (req->update_minlen && 4128 pfvf->minlen && pfvf->minlen < minlen) 4129 minlen = pfvf->minlen; 4130 } 4131 4132 /* Compare requested max/minlen with PF's max/minlen */ 4133 pfvf = &rvu->pf[pf]; 4134 if (pfvf->maxlen > maxlen) 4135 maxlen = pfvf->maxlen; 4136 if (req->update_minlen && 4137 pfvf->minlen && pfvf->minlen < minlen) 4138 minlen = pfvf->minlen; 4139 4140 /* Update the request with max/min PF's and it's VF's max/min */ 4141 req->maxlen = maxlen; 4142 if (req->update_minlen) 4143 req->minlen = minlen; 4144 } 4145 4146 static int 4147 nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, 4148 u16 pcifunc, u64 tx_credits) 4149 { 4150 struct rvu_hwinfo *hw = rvu->hw; 4151 int pf = rvu_get_pf(pcifunc); 4152 u8 cgx_id = 0, lmac_id = 0; 4153 unsigned long poll_tmo; 4154 bool restore_tx_en = 0; 4155 struct nix_hw *nix_hw; 4156 u64 cfg, sw_xoff = 0; 4157 u32 schq = 0; 4158 u32 credits; 4159 int rc; 4160 4161 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4162 if (!nix_hw) 4163 return NIX_AF_ERR_INVALID_NIXBLK; 4164 4165 if (tx_credits == nix_hw->tx_credits[link]) 4166 return 0; 4167 4168 /* Enable cgx tx if disabled for credits to be back */ 4169 if (is_pf_cgxmapped(rvu, pf)) { 4170 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 4171 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), 4172 lmac_id, true); 4173 } 4174 4175 mutex_lock(&rvu->rsrc_lock); 4176 /* Disable new traffic to link */ 4177 if (hw->cap.nix_shaping) { 4178 schq = nix_get_tx_link(rvu, pcifunc); 4179 sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)); 4180 rvu_write64(rvu, blkaddr, 4181 NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0)); 4182 } 4183 4184 rc = NIX_AF_ERR_LINK_CREDITS; 4185 poll_tmo = jiffies + usecs_to_jiffies(200000); 4186 /* Wait for credits to return */ 4187 do { 4188 if (time_after(jiffies, poll_tmo)) 4189 goto exit; 4190 usleep_range(100, 200); 4191 4192 cfg = rvu_read64(rvu, blkaddr, 4193 NIX_AF_TX_LINKX_NORM_CREDIT(link)); 4194 credits = (cfg >> 12) & 0xFFFFFULL; 4195 } while (credits != nix_hw->tx_credits[link]); 4196 4197 cfg &= ~(0xFFFFFULL << 12); 4198 cfg |= (tx_credits << 12); 4199 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 4200 rc = 0; 4201 4202 nix_hw->tx_credits[link] = tx_credits; 4203 4204 exit: 4205 /* Enable traffic back */ 4206 if (hw->cap.nix_shaping && !sw_xoff) 4207 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0); 4208 4209 /* Restore state of cgx tx */ 4210 if (restore_tx_en) 4211 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 4212 4213 mutex_unlock(&rvu->rsrc_lock); 4214 return rc; 4215 } 4216 4217 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 4218 struct msg_rsp *rsp) 4219 { 4220 struct rvu_hwinfo *hw = rvu->hw; 4221 u16 pcifunc = req->hdr.pcifunc; 4222 int pf = rvu_get_pf(pcifunc); 4223 int blkaddr, schq, link = -1; 4224 struct nix_txsch *txsch; 4225 u64 cfg, lmac_fifo_len; 4226 struct nix_hw *nix_hw; 4227 struct rvu_pfvf *pfvf; 4228 u8 cgx = 0, lmac = 0; 4229 u16 max_mtu; 4230 4231 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4232 if (blkaddr < 0) 4233 return NIX_AF_ERR_AF_LF_INVALID; 4234 4235 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4236 if (!nix_hw) 4237 return NIX_AF_ERR_INVALID_NIXBLK; 4238 4239 if (is_afvf(pcifunc)) 4240 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 4241 else 4242 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 4243 4244 if (!req->sdp_link && req->maxlen > max_mtu) 4245 return NIX_AF_ERR_FRS_INVALID; 4246 4247 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 4248 return NIX_AF_ERR_FRS_INVALID; 4249 4250 /* Check if requester wants to update SMQ's */ 4251 if (!req->update_smq) 4252 goto rx_frscfg; 4253 4254 /* Update min/maxlen in each of the SMQ attached to this PF/VF */ 4255 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 4256 mutex_lock(&rvu->rsrc_lock); 4257 for (schq = 0; schq < txsch->schq.max; schq++) { 4258 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 4259 continue; 4260 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); 4261 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); 4262 if (req->update_minlen) 4263 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); 4264 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); 4265 } 4266 mutex_unlock(&rvu->rsrc_lock); 4267 4268 rx_frscfg: 4269 /* Check if config is for SDP link */ 4270 if (req->sdp_link) { 4271 if (!hw->sdp_links) 4272 return NIX_AF_ERR_RX_LINK_INVALID; 4273 link = hw->cgx_links + hw->lbk_links; 4274 goto linkcfg; 4275 } 4276 4277 /* Check if the request is from CGX mapped RVU PF */ 4278 if (is_pf_cgxmapped(rvu, pf)) { 4279 /* Get CGX and LMAC to which this PF is mapped and find link */ 4280 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 4281 link = (cgx * hw->lmac_per_cgx) + lmac; 4282 } else if (pf == 0) { 4283 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 4284 pfvf = rvu_get_pfvf(rvu, pcifunc); 4285 link = hw->cgx_links + pfvf->lbkid; 4286 } 4287 4288 if (link < 0) 4289 return NIX_AF_ERR_RX_LINK_INVALID; 4290 4291 4292 linkcfg: 4293 nix_find_link_frs(rvu, req, pcifunc); 4294 4295 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 4296 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 4297 if (req->update_minlen) 4298 cfg = (cfg & ~0xFFFFULL) | req->minlen; 4299 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 4300 4301 if (req->sdp_link || pf == 0) 4302 return 0; 4303 4304 /* Update transmit credits for CGX links */ 4305 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac); 4306 if (!lmac_fifo_len) { 4307 dev_err(rvu->dev, 4308 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", 4309 __func__, cgx, lmac); 4310 return 0; 4311 } 4312 return nix_config_link_credits(rvu, blkaddr, link, pcifunc, 4313 (lmac_fifo_len - req->maxlen) / 16); 4314 } 4315 4316 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 4317 struct msg_rsp *rsp) 4318 { 4319 int nixlf, blkaddr, err; 4320 u64 cfg; 4321 4322 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 4323 if (err) 4324 return err; 4325 4326 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 4327 /* Set the interface configuration */ 4328 if (req->len_verify & BIT(0)) 4329 cfg |= BIT_ULL(41); 4330 else 4331 cfg &= ~BIT_ULL(41); 4332 4333 if (req->len_verify & BIT(1)) 4334 cfg |= BIT_ULL(40); 4335 else 4336 cfg &= ~BIT_ULL(40); 4337 4338 if (req->len_verify & NIX_RX_DROP_RE) 4339 cfg |= BIT_ULL(32); 4340 else 4341 cfg &= ~BIT_ULL(32); 4342 4343 if (req->csum_verify & BIT(0)) 4344 cfg |= BIT_ULL(37); 4345 else 4346 cfg &= ~BIT_ULL(37); 4347 4348 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 4349 4350 return 0; 4351 } 4352 4353 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 4354 { 4355 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 4356 } 4357 4358 static void nix_link_config(struct rvu *rvu, int blkaddr, 4359 struct nix_hw *nix_hw) 4360 { 4361 struct rvu_hwinfo *hw = rvu->hw; 4362 int cgx, lmac_cnt, slink, link; 4363 u16 lbk_max_frs, lmac_max_frs; 4364 unsigned long lmac_bmap; 4365 u64 tx_credits, cfg; 4366 u64 lmac_fifo_len; 4367 int iter; 4368 4369 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 4370 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 4371 4372 /* Set default min/max packet lengths allowed on NIX Rx links. 4373 * 4374 * With HW reset minlen value of 60byte, HW will treat ARP pkts 4375 * as undersize and report them to SW as error pkts, hence 4376 * setting it to 40 bytes. 4377 */ 4378 for (link = 0; link < hw->cgx_links; link++) { 4379 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4380 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 4381 } 4382 4383 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 4384 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4385 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 4386 } 4387 if (hw->sdp_links) { 4388 link = hw->cgx_links + hw->lbk_links; 4389 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4390 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 4391 } 4392 4393 /* Get MCS external bypass status for CN10K-B */ 4394 if (mcs_get_blkcnt() == 1) { 4395 /* Adjust for 2 credits when external bypass is disabled */ 4396 nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2; 4397 } 4398 4399 /* Set credits for Tx links assuming max packet length allowed. 4400 * This will be reconfigured based on MTU set for PF/VF. 4401 */ 4402 for (cgx = 0; cgx < hw->cgx; cgx++) { 4403 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 4404 /* Skip when cgx is not available or lmac cnt is zero */ 4405 if (lmac_cnt <= 0) 4406 continue; 4407 slink = cgx * hw->lmac_per_cgx; 4408 4409 /* Get LMAC id's from bitmap */ 4410 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); 4411 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { 4412 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter); 4413 if (!lmac_fifo_len) { 4414 dev_err(rvu->dev, 4415 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", 4416 __func__, cgx, iter); 4417 continue; 4418 } 4419 tx_credits = (lmac_fifo_len - lmac_max_frs) / 16; 4420 /* Enable credits and set credit pkt count to max allowed */ 4421 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4422 cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt); 4423 4424 link = iter + slink; 4425 nix_hw->tx_credits[link] = tx_credits; 4426 rvu_write64(rvu, blkaddr, 4427 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 4428 } 4429 } 4430 4431 /* Set Tx credits for LBK link */ 4432 slink = hw->cgx_links; 4433 for (link = slink; link < (slink + hw->lbk_links); link++) { 4434 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 4435 nix_hw->tx_credits[link] = tx_credits; 4436 /* Enable credits and set credit pkt count to max allowed */ 4437 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4438 rvu_write64(rvu, blkaddr, 4439 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 4440 } 4441 } 4442 4443 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 4444 { 4445 int idx, err; 4446 u64 status; 4447 4448 /* Start X2P bus calibration */ 4449 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4450 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 4451 /* Wait for calibration to complete */ 4452 err = rvu_poll_reg(rvu, blkaddr, 4453 NIX_AF_STATUS, BIT_ULL(10), false); 4454 if (err) { 4455 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 4456 return err; 4457 } 4458 4459 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 4460 /* Check if CGX devices are ready */ 4461 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 4462 /* Skip when cgx port is not available */ 4463 if (!rvu_cgx_pdata(idx, rvu) || 4464 (status & (BIT_ULL(16 + idx)))) 4465 continue; 4466 dev_err(rvu->dev, 4467 "CGX%d didn't respond to NIX X2P calibration\n", idx); 4468 err = -EBUSY; 4469 } 4470 4471 /* Check if LBK is ready */ 4472 if (!(status & BIT_ULL(19))) { 4473 dev_err(rvu->dev, 4474 "LBK didn't respond to NIX X2P calibration\n"); 4475 err = -EBUSY; 4476 } 4477 4478 /* Clear 'calibrate_x2p' bit */ 4479 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4480 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 4481 if (err || (status & 0x3FFULL)) 4482 dev_err(rvu->dev, 4483 "NIX X2P calibration failed, status 0x%llx\n", status); 4484 if (err) 4485 return err; 4486 return 0; 4487 } 4488 4489 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 4490 { 4491 u64 cfg; 4492 int err; 4493 4494 /* Set admin queue endianness */ 4495 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 4496 #ifdef __BIG_ENDIAN 4497 cfg |= BIT_ULL(8); 4498 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4499 #else 4500 cfg &= ~BIT_ULL(8); 4501 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4502 #endif 4503 4504 /* Do not bypass NDC cache */ 4505 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 4506 cfg &= ~0x3FFEULL; 4507 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 4508 /* Disable caching of SQB aka SQEs */ 4509 cfg |= 0x04ULL; 4510 #endif 4511 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 4512 4513 /* Result structure can be followed by RQ/SQ/CQ context at 4514 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 4515 * operation type. Alloc sufficient result memory for all operations. 4516 */ 4517 err = rvu_aq_alloc(rvu, &block->aq, 4518 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 4519 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 4520 if (err) 4521 return err; 4522 4523 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 4524 rvu_write64(rvu, block->addr, 4525 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 4526 return 0; 4527 } 4528 4529 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) 4530 { 4531 struct rvu_hwinfo *hw = rvu->hw; 4532 u64 hw_const; 4533 4534 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 4535 4536 /* On OcteonTx2 DWRR quantum is directly configured into each of 4537 * the transmit scheduler queues. And PF/VF drivers were free to 4538 * config any value upto 2^24. 4539 * On CN10K, HW is modified, the quantum configuration at scheduler 4540 * queues is in terms of weight. And SW needs to setup a base DWRR MTU 4541 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do 4542 * 'DWRR MTU * weight' to get the quantum. 4543 * 4544 * Check if HW uses a common MTU for all DWRR quantum configs. 4545 * On OcteonTx2 this register field is '0'. 4546 */ 4547 if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61))) 4548 hw->cap.nix_common_dwrr_mtu = true; 4549 4550 if (hw_const & BIT_ULL(61)) 4551 hw->cap.nix_multiple_dwrr_mtu = true; 4552 } 4553 4554 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 4555 { 4556 const struct npc_lt_def_cfg *ltdefs; 4557 struct rvu_hwinfo *hw = rvu->hw; 4558 int blkaddr = nix_hw->blkaddr; 4559 struct rvu_block *block; 4560 int err; 4561 u64 cfg; 4562 4563 block = &hw->block[blkaddr]; 4564 4565 if (is_rvu_96xx_B0(rvu)) { 4566 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 4567 * internal state when conditional clocks are turned off. 4568 * Hence enable them. 4569 */ 4570 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4571 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 4572 4573 /* Set chan/link to backpressure TL3 instead of TL2 */ 4574 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 4575 4576 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 4577 * This sticky mode is known to cause SQ stalls when multiple 4578 * SQs are mapped to same SMQ and transmitting pkts at a time. 4579 */ 4580 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 4581 cfg &= ~BIT_ULL(15); 4582 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 4583 } 4584 4585 ltdefs = rvu->kpu.lt_def; 4586 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 4587 err = nix_calibrate_x2p(rvu, blkaddr); 4588 if (err) 4589 return err; 4590 4591 /* Setup capabilities of the NIX block */ 4592 rvu_nix_setup_capabilities(rvu, blkaddr); 4593 4594 /* Initialize admin queue */ 4595 err = nix_aq_init(rvu, block); 4596 if (err) 4597 return err; 4598 4599 /* Restore CINT timer delay to HW reset values */ 4600 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 4601 4602 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG); 4603 4604 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ 4605 cfg |= 1ULL; 4606 if (!is_rvu_otx2(rvu)) 4607 cfg |= NIX_PTP_1STEP_EN; 4608 4609 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg); 4610 4611 if (!is_rvu_otx2(rvu)) 4612 rvu_nix_block_cn10k_init(rvu, nix_hw); 4613 4614 if (is_block_implemented(hw, blkaddr)) { 4615 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 4616 if (err) 4617 return err; 4618 4619 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); 4620 if (err) 4621 return err; 4622 4623 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 4624 if (err) 4625 return err; 4626 4627 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 4628 if (err) 4629 return err; 4630 4631 err = nix_setup_txvlan(rvu, nix_hw); 4632 if (err) 4633 return err; 4634 4635 /* Configure segmentation offload formats */ 4636 nix_setup_lso(rvu, nix_hw, blkaddr); 4637 4638 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 4639 * This helps HW protocol checker to identify headers 4640 * and validate length and checksums. 4641 */ 4642 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 4643 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 4644 ltdefs->rx_ol2.ltype_mask); 4645 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 4646 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 4647 ltdefs->rx_oip4.ltype_mask); 4648 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 4649 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 4650 ltdefs->rx_iip4.ltype_mask); 4651 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 4652 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 4653 ltdefs->rx_oip6.ltype_mask); 4654 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 4655 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 4656 ltdefs->rx_iip6.ltype_mask); 4657 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 4658 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 4659 ltdefs->rx_otcp.ltype_mask); 4660 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 4661 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 4662 ltdefs->rx_itcp.ltype_mask); 4663 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 4664 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 4665 ltdefs->rx_oudp.ltype_mask); 4666 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 4667 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 4668 ltdefs->rx_iudp.ltype_mask); 4669 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 4670 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 4671 ltdefs->rx_osctp.ltype_mask); 4672 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 4673 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 4674 ltdefs->rx_isctp.ltype_mask); 4675 4676 if (!is_rvu_otx2(rvu)) { 4677 /* Enable APAD calculation for other protocols 4678 * matching APAD0 and APAD1 lt def registers. 4679 */ 4680 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, 4681 (ltdefs->rx_apad0.valid << 11) | 4682 (ltdefs->rx_apad0.lid << 8) | 4683 (ltdefs->rx_apad0.ltype_match << 4) | 4684 ltdefs->rx_apad0.ltype_mask); 4685 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, 4686 (ltdefs->rx_apad1.valid << 11) | 4687 (ltdefs->rx_apad1.lid << 8) | 4688 (ltdefs->rx_apad1.ltype_match << 4) | 4689 ltdefs->rx_apad1.ltype_mask); 4690 4691 /* Receive ethertype defination register defines layer 4692 * information in NPC_RESULT_S to identify the Ethertype 4693 * location in L2 header. Used for Ethertype overwriting 4694 * in inline IPsec flow. 4695 */ 4696 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), 4697 (ltdefs->rx_et[0].offset << 12) | 4698 (ltdefs->rx_et[0].valid << 11) | 4699 (ltdefs->rx_et[0].lid << 8) | 4700 (ltdefs->rx_et[0].ltype_match << 4) | 4701 ltdefs->rx_et[0].ltype_mask); 4702 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), 4703 (ltdefs->rx_et[1].offset << 12) | 4704 (ltdefs->rx_et[1].valid << 11) | 4705 (ltdefs->rx_et[1].lid << 8) | 4706 (ltdefs->rx_et[1].ltype_match << 4) | 4707 ltdefs->rx_et[1].ltype_mask); 4708 } 4709 4710 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 4711 if (err) 4712 return err; 4713 4714 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, 4715 sizeof(u64), GFP_KERNEL); 4716 if (!nix_hw->tx_credits) 4717 return -ENOMEM; 4718 4719 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 4720 nix_link_config(rvu, blkaddr, nix_hw); 4721 4722 /* Enable Channel backpressure */ 4723 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 4724 } 4725 return 0; 4726 } 4727 4728 int rvu_nix_init(struct rvu *rvu) 4729 { 4730 struct rvu_hwinfo *hw = rvu->hw; 4731 struct nix_hw *nix_hw; 4732 int blkaddr = 0, err; 4733 int i = 0; 4734 4735 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 4736 GFP_KERNEL); 4737 if (!hw->nix) 4738 return -ENOMEM; 4739 4740 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4741 while (blkaddr) { 4742 nix_hw = &hw->nix[i]; 4743 nix_hw->rvu = rvu; 4744 nix_hw->blkaddr = blkaddr; 4745 err = rvu_nix_block_init(rvu, nix_hw); 4746 if (err) 4747 return err; 4748 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4749 i++; 4750 } 4751 4752 return 0; 4753 } 4754 4755 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 4756 struct rvu_block *block) 4757 { 4758 struct nix_txsch *txsch; 4759 struct nix_mcast *mcast; 4760 struct nix_txvlan *vlan; 4761 struct nix_hw *nix_hw; 4762 int lvl; 4763 4764 rvu_aq_free(rvu, block->aq); 4765 4766 if (is_block_implemented(rvu->hw, blkaddr)) { 4767 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4768 if (!nix_hw) 4769 return; 4770 4771 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 4772 txsch = &nix_hw->txsch[lvl]; 4773 kfree(txsch->schq.bmap); 4774 } 4775 4776 kfree(nix_hw->tx_credits); 4777 4778 nix_ipolicer_freemem(rvu, nix_hw); 4779 4780 vlan = &nix_hw->txvlan; 4781 kfree(vlan->rsrc.bmap); 4782 mutex_destroy(&vlan->rsrc_lock); 4783 4784 mcast = &nix_hw->mcast; 4785 qmem_free(rvu->dev, mcast->mce_ctx); 4786 qmem_free(rvu->dev, mcast->mcast_buf); 4787 mutex_destroy(&mcast->mce_lock); 4788 } 4789 } 4790 4791 void rvu_nix_freemem(struct rvu *rvu) 4792 { 4793 struct rvu_hwinfo *hw = rvu->hw; 4794 struct rvu_block *block; 4795 int blkaddr = 0; 4796 4797 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4798 while (blkaddr) { 4799 block = &hw->block[blkaddr]; 4800 rvu_nix_block_freemem(rvu, blkaddr, block); 4801 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4802 } 4803 } 4804 4805 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 4806 struct msg_rsp *rsp) 4807 { 4808 u16 pcifunc = req->hdr.pcifunc; 4809 struct rvu_pfvf *pfvf; 4810 int nixlf, err; 4811 4812 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4813 if (err) 4814 return err; 4815 4816 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 4817 4818 npc_mcam_enable_flows(rvu, pcifunc); 4819 4820 pfvf = rvu_get_pfvf(rvu, pcifunc); 4821 set_bit(NIXLF_INITIALIZED, &pfvf->flags); 4822 4823 rvu_switch_update_rules(rvu, pcifunc); 4824 4825 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 4826 } 4827 4828 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 4829 struct msg_rsp *rsp) 4830 { 4831 u16 pcifunc = req->hdr.pcifunc; 4832 struct rvu_pfvf *pfvf; 4833 int nixlf, err; 4834 4835 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4836 if (err) 4837 return err; 4838 4839 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 4840 4841 pfvf = rvu_get_pfvf(rvu, pcifunc); 4842 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 4843 4844 return rvu_cgx_start_stop_io(rvu, pcifunc, false); 4845 } 4846 4847 #define RX_SA_BASE GENMASK_ULL(52, 7) 4848 4849 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 4850 { 4851 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 4852 struct hwctx_disable_req ctx_req; 4853 int pf = rvu_get_pf(pcifunc); 4854 struct mac_ops *mac_ops; 4855 u8 cgx_id, lmac_id; 4856 u64 sa_base; 4857 void *cgxd; 4858 int err; 4859 4860 ctx_req.hdr.pcifunc = pcifunc; 4861 4862 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 4863 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 4864 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 4865 nix_interface_deinit(rvu, pcifunc, nixlf); 4866 nix_rx_sync(rvu, blkaddr); 4867 nix_txschq_free(rvu, pcifunc); 4868 4869 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 4870 4871 rvu_cgx_start_stop_io(rvu, pcifunc, false); 4872 4873 if (pfvf->sq_ctx) { 4874 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 4875 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4876 if (err) 4877 dev_err(rvu->dev, "SQ ctx disable failed\n"); 4878 } 4879 4880 if (pfvf->rq_ctx) { 4881 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 4882 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4883 if (err) 4884 dev_err(rvu->dev, "RQ ctx disable failed\n"); 4885 } 4886 4887 if (pfvf->cq_ctx) { 4888 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 4889 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4890 if (err) 4891 dev_err(rvu->dev, "CQ ctx disable failed\n"); 4892 } 4893 4894 /* reset HW config done for Switch headers */ 4895 rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, 4896 (PKIND_TX | PKIND_RX), 0, 0, 0, 0); 4897 4898 /* Disabling CGX and NPC config done for PTP */ 4899 if (pfvf->hw_rx_tstamp_en) { 4900 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 4901 cgxd = rvu_cgx_pdata(cgx_id, rvu); 4902 mac_ops = get_mac_ops(cgxd); 4903 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); 4904 /* Undo NPC config done for PTP */ 4905 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) 4906 dev_err(rvu->dev, "NPC config for PTP failed\n"); 4907 pfvf->hw_rx_tstamp_en = false; 4908 } 4909 4910 /* reset priority flow control config */ 4911 rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0); 4912 4913 /* reset 802.3x flow control config */ 4914 rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0); 4915 4916 nix_ctx_free(rvu, pfvf); 4917 4918 nix_free_all_bandprof(rvu, pcifunc); 4919 4920 sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf)); 4921 if (FIELD_GET(RX_SA_BASE, sa_base)) { 4922 err = rvu_cpt_ctx_flush(rvu, pcifunc); 4923 if (err) 4924 dev_err(rvu->dev, 4925 "CPT ctx flush failed with error: %d\n", err); 4926 } 4927 } 4928 4929 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 4930 4931 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 4932 { 4933 struct rvu_hwinfo *hw = rvu->hw; 4934 struct rvu_block *block; 4935 int blkaddr, pf; 4936 int nixlf; 4937 u64 cfg; 4938 4939 pf = rvu_get_pf(pcifunc); 4940 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 4941 return 0; 4942 4943 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4944 if (blkaddr < 0) 4945 return NIX_AF_ERR_AF_LF_INVALID; 4946 4947 block = &hw->block[blkaddr]; 4948 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 4949 if (nixlf < 0) 4950 return NIX_AF_ERR_AF_LF_INVALID; 4951 4952 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 4953 4954 if (enable) 4955 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 4956 else 4957 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 4958 4959 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 4960 4961 return 0; 4962 } 4963 4964 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 4965 struct msg_rsp *rsp) 4966 { 4967 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 4968 } 4969 4970 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 4971 struct msg_rsp *rsp) 4972 { 4973 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 4974 } 4975 4976 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 4977 struct nix_lso_format_cfg *req, 4978 struct nix_lso_format_cfg_rsp *rsp) 4979 { 4980 u16 pcifunc = req->hdr.pcifunc; 4981 struct nix_hw *nix_hw; 4982 struct rvu_pfvf *pfvf; 4983 int blkaddr, idx, f; 4984 u64 reg; 4985 4986 pfvf = rvu_get_pfvf(rvu, pcifunc); 4987 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4988 if (!pfvf->nixlf || blkaddr < 0) 4989 return NIX_AF_ERR_AF_LF_INVALID; 4990 4991 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4992 if (!nix_hw) 4993 return NIX_AF_ERR_INVALID_NIXBLK; 4994 4995 /* Find existing matching LSO format, if any */ 4996 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 4997 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 4998 reg = rvu_read64(rvu, blkaddr, 4999 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 5000 if (req->fields[f] != (reg & req->field_mask)) 5001 break; 5002 } 5003 5004 if (f == NIX_LSO_FIELD_MAX) 5005 break; 5006 } 5007 5008 if (idx < nix_hw->lso.in_use) { 5009 /* Match found */ 5010 rsp->lso_format_idx = idx; 5011 return 0; 5012 } 5013 5014 if (nix_hw->lso.in_use == nix_hw->lso.total) 5015 return NIX_AF_ERR_LSO_CFG_FAIL; 5016 5017 rsp->lso_format_idx = nix_hw->lso.in_use++; 5018 5019 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 5020 rvu_write64(rvu, blkaddr, 5021 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 5022 req->fields[f]); 5023 5024 return 0; 5025 } 5026 5027 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) 5028 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) 5029 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) 5030 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) 5031 5032 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) 5033 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) 5034 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) 5035 5036 #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32) 5037 #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22) 5038 #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0) 5039 5040 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, 5041 int blkaddr) 5042 { 5043 u8 cpt_idx, cpt_blkaddr; 5044 u64 val; 5045 5046 cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; 5047 if (req->enable) { 5048 val = 0; 5049 /* Enable context prefetching */ 5050 if (!is_rvu_otx2(rvu)) 5051 val |= BIT_ULL(51); 5052 5053 /* Set OPCODE and EGRP */ 5054 val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); 5055 val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); 5056 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); 5057 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); 5058 5059 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); 5060 5061 /* Set CPT queue for inline IPSec */ 5062 val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); 5063 val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, 5064 req->inst_qsel.cpt_pf_func); 5065 5066 if (!is_rvu_otx2(rvu)) { 5067 cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : 5068 BLKADDR_CPT1; 5069 val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); 5070 } 5071 5072 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 5073 val); 5074 5075 /* Set CPT credit */ 5076 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 5077 if ((val & 0x3FFFFF) != 0x3FFFFF) 5078 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 5079 0x3FFFFF - val); 5080 5081 val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit); 5082 val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid); 5083 val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th); 5084 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val); 5085 } else { 5086 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); 5087 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 5088 0x0); 5089 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 5090 if ((val & 0x3FFFFF) != 0x3FFFFF) 5091 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 5092 0x3FFFFF - val); 5093 } 5094 } 5095 5096 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, 5097 struct nix_inline_ipsec_cfg *req, 5098 struct msg_rsp *rsp) 5099 { 5100 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5101 return 0; 5102 5103 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); 5104 if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) 5105 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); 5106 5107 return 0; 5108 } 5109 5110 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, 5111 struct msg_req *req, 5112 struct nix_inline_ipsec_cfg *rsp) 5113 5114 { 5115 u64 val; 5116 5117 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5118 return 0; 5119 5120 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG); 5121 rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val); 5122 rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val); 5123 rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val); 5124 rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val); 5125 5126 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0)); 5127 rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val); 5128 rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val); 5129 rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val); 5130 5131 return 0; 5132 } 5133 5134 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, 5135 struct nix_inline_ipsec_lf_cfg *req, 5136 struct msg_rsp *rsp) 5137 { 5138 int lf, blkaddr, err; 5139 u64 val; 5140 5141 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5142 return 0; 5143 5144 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); 5145 if (err) 5146 return err; 5147 5148 if (req->enable) { 5149 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ 5150 val = (u64)req->ipsec_cfg0.tt << 44 | 5151 (u64)req->ipsec_cfg0.tag_const << 20 | 5152 (u64)req->ipsec_cfg0.sa_pow2_size << 16 | 5153 req->ipsec_cfg0.lenm1_max; 5154 5155 if (blkaddr == BLKADDR_NIX1) 5156 val |= BIT_ULL(46); 5157 5158 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); 5159 5160 /* Set SA_IDX_W and SA_IDX_MAX */ 5161 val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | 5162 req->ipsec_cfg1.sa_idx_max; 5163 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); 5164 5165 /* Set SA base address */ 5166 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5167 req->sa_base_addr); 5168 } else { 5169 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); 5170 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); 5171 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5172 0x0); 5173 } 5174 5175 return 0; 5176 } 5177 5178 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 5179 { 5180 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 5181 5182 /* overwrite vf mac address with default_mac */ 5183 if (from_vf) 5184 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 5185 } 5186 5187 /* NIX ingress policers or bandwidth profiles APIs */ 5188 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) 5189 { 5190 struct npc_lt_def_cfg defs, *ltdefs; 5191 5192 ltdefs = &defs; 5193 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); 5194 5195 /* Extract PCP and DEI fields from outer VLAN from byte offset 5196 * 2 from the start of LB_PTR (ie TAG). 5197 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN 5198 * fields are considered when 'Tunnel enable' is set in profile. 5199 */ 5200 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, 5201 (2UL << 12) | (ltdefs->ovlan.lid << 8) | 5202 (ltdefs->ovlan.ltype_match << 4) | 5203 ltdefs->ovlan.ltype_mask); 5204 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, 5205 (2UL << 12) | (ltdefs->ivlan.lid << 8) | 5206 (ltdefs->ivlan.ltype_match << 4) | 5207 ltdefs->ivlan.ltype_mask); 5208 5209 /* DSCP field in outer and tunneled IPv4 packets */ 5210 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, 5211 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | 5212 (ltdefs->rx_oip4.ltype_match << 4) | 5213 ltdefs->rx_oip4.ltype_mask); 5214 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, 5215 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | 5216 (ltdefs->rx_iip4.ltype_match << 4) | 5217 ltdefs->rx_iip4.ltype_mask); 5218 5219 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ 5220 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, 5221 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | 5222 (ltdefs->rx_oip6.ltype_match << 4) | 5223 ltdefs->rx_oip6.ltype_mask); 5224 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, 5225 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | 5226 (ltdefs->rx_iip6.ltype_match << 4) | 5227 ltdefs->rx_iip6.ltype_mask); 5228 } 5229 5230 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, 5231 int layer, int prof_idx) 5232 { 5233 struct nix_cn10k_aq_enq_req aq_req; 5234 int rc; 5235 5236 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5237 5238 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); 5239 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5240 aq_req.op = NIX_AQ_INSTOP_INIT; 5241 5242 /* Context is all zeros, submit to AQ */ 5243 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5244 (struct nix_aq_enq_req *)&aq_req, NULL); 5245 if (rc) 5246 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", 5247 layer, prof_idx); 5248 return rc; 5249 } 5250 5251 static int nix_setup_ipolicers(struct rvu *rvu, 5252 struct nix_hw *nix_hw, int blkaddr) 5253 { 5254 struct rvu_hwinfo *hw = rvu->hw; 5255 struct nix_ipolicer *ipolicer; 5256 int err, layer, prof_idx; 5257 u64 cfg; 5258 5259 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 5260 if (!(cfg & BIT_ULL(61))) { 5261 hw->cap.ipolicer = false; 5262 return 0; 5263 } 5264 5265 hw->cap.ipolicer = true; 5266 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, 5267 sizeof(*ipolicer), GFP_KERNEL); 5268 if (!nix_hw->ipolicer) 5269 return -ENOMEM; 5270 5271 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); 5272 5273 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5274 ipolicer = &nix_hw->ipolicer[layer]; 5275 switch (layer) { 5276 case BAND_PROF_LEAF_LAYER: 5277 ipolicer->band_prof.max = cfg & 0XFFFF; 5278 break; 5279 case BAND_PROF_MID_LAYER: 5280 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; 5281 break; 5282 case BAND_PROF_TOP_LAYER: 5283 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; 5284 break; 5285 } 5286 5287 if (!ipolicer->band_prof.max) 5288 continue; 5289 5290 err = rvu_alloc_bitmap(&ipolicer->band_prof); 5291 if (err) 5292 return err; 5293 5294 ipolicer->pfvf_map = devm_kcalloc(rvu->dev, 5295 ipolicer->band_prof.max, 5296 sizeof(u16), GFP_KERNEL); 5297 if (!ipolicer->pfvf_map) 5298 return -ENOMEM; 5299 5300 ipolicer->match_id = devm_kcalloc(rvu->dev, 5301 ipolicer->band_prof.max, 5302 sizeof(u16), GFP_KERNEL); 5303 if (!ipolicer->match_id) 5304 return -ENOMEM; 5305 5306 for (prof_idx = 0; 5307 prof_idx < ipolicer->band_prof.max; prof_idx++) { 5308 /* Set AF as current owner for INIT ops to succeed */ 5309 ipolicer->pfvf_map[prof_idx] = 0x00; 5310 5311 /* There is no enable bit in the profile context, 5312 * so no context disable. So let's INIT them here 5313 * so that PF/VF later on have to just do WRITE to 5314 * setup policer rates and config. 5315 */ 5316 err = nix_init_policer_context(rvu, nix_hw, 5317 layer, prof_idx); 5318 if (err) 5319 return err; 5320 } 5321 5322 /* Allocate memory for maintaining ref_counts for MID level 5323 * profiles, this will be needed for leaf layer profiles' 5324 * aggregation. 5325 */ 5326 if (layer != BAND_PROF_MID_LAYER) 5327 continue; 5328 5329 ipolicer->ref_count = devm_kcalloc(rvu->dev, 5330 ipolicer->band_prof.max, 5331 sizeof(u16), GFP_KERNEL); 5332 if (!ipolicer->ref_count) 5333 return -ENOMEM; 5334 } 5335 5336 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ 5337 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); 5338 5339 nix_config_rx_pkt_policer_precolor(rvu, blkaddr); 5340 5341 return 0; 5342 } 5343 5344 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) 5345 { 5346 struct nix_ipolicer *ipolicer; 5347 int layer; 5348 5349 if (!rvu->hw->cap.ipolicer) 5350 return; 5351 5352 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5353 ipolicer = &nix_hw->ipolicer[layer]; 5354 5355 if (!ipolicer->band_prof.max) 5356 continue; 5357 5358 kfree(ipolicer->band_prof.bmap); 5359 } 5360 } 5361 5362 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 5363 struct nix_hw *nix_hw, u16 pcifunc) 5364 { 5365 struct nix_ipolicer *ipolicer; 5366 int layer, hi_layer, prof_idx; 5367 5368 /* Bits [15:14] in profile index represent layer */ 5369 layer = (req->qidx >> 14) & 0x03; 5370 prof_idx = req->qidx & 0x3FFF; 5371 5372 ipolicer = &nix_hw->ipolicer[layer]; 5373 if (prof_idx >= ipolicer->band_prof.max) 5374 return -EINVAL; 5375 5376 /* Check if the profile is allocated to the requesting PCIFUNC or not 5377 * with the exception of AF. AF is allowed to read and update contexts. 5378 */ 5379 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) 5380 return -EINVAL; 5381 5382 /* If this profile is linked to higher layer profile then check 5383 * if that profile is also allocated to the requesting PCIFUNC 5384 * or not. 5385 */ 5386 if (!req->prof.hl_en) 5387 return 0; 5388 5389 /* Leaf layer profile can link only to mid layer and 5390 * mid layer to top layer. 5391 */ 5392 if (layer == BAND_PROF_LEAF_LAYER) 5393 hi_layer = BAND_PROF_MID_LAYER; 5394 else if (layer == BAND_PROF_MID_LAYER) 5395 hi_layer = BAND_PROF_TOP_LAYER; 5396 else 5397 return -EINVAL; 5398 5399 ipolicer = &nix_hw->ipolicer[hi_layer]; 5400 prof_idx = req->prof.band_prof_id; 5401 if (prof_idx >= ipolicer->band_prof.max || 5402 ipolicer->pfvf_map[prof_idx] != pcifunc) 5403 return -EINVAL; 5404 5405 return 0; 5406 } 5407 5408 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, 5409 struct nix_bandprof_alloc_req *req, 5410 struct nix_bandprof_alloc_rsp *rsp) 5411 { 5412 int blkaddr, layer, prof, idx, err; 5413 u16 pcifunc = req->hdr.pcifunc; 5414 struct nix_ipolicer *ipolicer; 5415 struct nix_hw *nix_hw; 5416 5417 if (!rvu->hw->cap.ipolicer) 5418 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5419 5420 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5421 if (err) 5422 return err; 5423 5424 mutex_lock(&rvu->rsrc_lock); 5425 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5426 if (layer == BAND_PROF_INVAL_LAYER) 5427 continue; 5428 if (!req->prof_count[layer]) 5429 continue; 5430 5431 ipolicer = &nix_hw->ipolicer[layer]; 5432 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5433 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ 5434 if (idx == MAX_BANDPROF_PER_PFFUNC) 5435 break; 5436 5437 prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5438 if (prof < 0) 5439 break; 5440 rsp->prof_count[layer]++; 5441 rsp->prof_idx[layer][idx] = prof; 5442 ipolicer->pfvf_map[prof] = pcifunc; 5443 } 5444 } 5445 mutex_unlock(&rvu->rsrc_lock); 5446 return 0; 5447 } 5448 5449 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) 5450 { 5451 int blkaddr, layer, prof_idx, err; 5452 struct nix_ipolicer *ipolicer; 5453 struct nix_hw *nix_hw; 5454 5455 if (!rvu->hw->cap.ipolicer) 5456 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5457 5458 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5459 if (err) 5460 return err; 5461 5462 mutex_lock(&rvu->rsrc_lock); 5463 /* Free all the profiles allocated to the PCIFUNC */ 5464 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5465 if (layer == BAND_PROF_INVAL_LAYER) 5466 continue; 5467 ipolicer = &nix_hw->ipolicer[layer]; 5468 5469 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { 5470 if (ipolicer->pfvf_map[prof_idx] != pcifunc) 5471 continue; 5472 5473 /* Clear ratelimit aggregation, if any */ 5474 if (layer == BAND_PROF_LEAF_LAYER && 5475 ipolicer->match_id[prof_idx]) 5476 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5477 5478 ipolicer->pfvf_map[prof_idx] = 0x00; 5479 ipolicer->match_id[prof_idx] = 0; 5480 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5481 } 5482 } 5483 mutex_unlock(&rvu->rsrc_lock); 5484 return 0; 5485 } 5486 5487 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, 5488 struct nix_bandprof_free_req *req, 5489 struct msg_rsp *rsp) 5490 { 5491 int blkaddr, layer, prof_idx, idx, err; 5492 u16 pcifunc = req->hdr.pcifunc; 5493 struct nix_ipolicer *ipolicer; 5494 struct nix_hw *nix_hw; 5495 5496 if (req->free_all) 5497 return nix_free_all_bandprof(rvu, pcifunc); 5498 5499 if (!rvu->hw->cap.ipolicer) 5500 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5501 5502 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5503 if (err) 5504 return err; 5505 5506 mutex_lock(&rvu->rsrc_lock); 5507 /* Free the requested profile indices */ 5508 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5509 if (layer == BAND_PROF_INVAL_LAYER) 5510 continue; 5511 if (!req->prof_count[layer]) 5512 continue; 5513 5514 ipolicer = &nix_hw->ipolicer[layer]; 5515 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5516 if (idx == MAX_BANDPROF_PER_PFFUNC) 5517 break; 5518 prof_idx = req->prof_idx[layer][idx]; 5519 if (prof_idx >= ipolicer->band_prof.max || 5520 ipolicer->pfvf_map[prof_idx] != pcifunc) 5521 continue; 5522 5523 /* Clear ratelimit aggregation, if any */ 5524 if (layer == BAND_PROF_LEAF_LAYER && 5525 ipolicer->match_id[prof_idx]) 5526 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5527 5528 ipolicer->pfvf_map[prof_idx] = 0x00; 5529 ipolicer->match_id[prof_idx] = 0; 5530 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5531 } 5532 } 5533 mutex_unlock(&rvu->rsrc_lock); 5534 return 0; 5535 } 5536 5537 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, 5538 struct nix_cn10k_aq_enq_req *aq_req, 5539 struct nix_cn10k_aq_enq_rsp *aq_rsp, 5540 u16 pcifunc, u8 ctype, u32 qidx) 5541 { 5542 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5543 aq_req->hdr.pcifunc = pcifunc; 5544 aq_req->ctype = ctype; 5545 aq_req->op = NIX_AQ_INSTOP_READ; 5546 aq_req->qidx = qidx; 5547 5548 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5549 (struct nix_aq_enq_req *)aq_req, 5550 (struct nix_aq_enq_rsp *)aq_rsp); 5551 } 5552 5553 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, 5554 struct nix_hw *nix_hw, 5555 struct nix_cn10k_aq_enq_req *aq_req, 5556 struct nix_cn10k_aq_enq_rsp *aq_rsp, 5557 u32 leaf_prof, u16 mid_prof) 5558 { 5559 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5560 aq_req->hdr.pcifunc = 0x00; 5561 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; 5562 aq_req->op = NIX_AQ_INSTOP_WRITE; 5563 aq_req->qidx = leaf_prof; 5564 5565 aq_req->prof.band_prof_id = mid_prof; 5566 aq_req->prof_mask.band_prof_id = GENMASK(6, 0); 5567 aq_req->prof.hl_en = 1; 5568 aq_req->prof_mask.hl_en = 1; 5569 5570 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5571 (struct nix_aq_enq_req *)aq_req, 5572 (struct nix_aq_enq_rsp *)aq_rsp); 5573 } 5574 5575 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, 5576 u16 rq_idx, u16 match_id) 5577 { 5578 int leaf_prof, mid_prof, leaf_match; 5579 struct nix_cn10k_aq_enq_req aq_req; 5580 struct nix_cn10k_aq_enq_rsp aq_rsp; 5581 struct nix_ipolicer *ipolicer; 5582 struct nix_hw *nix_hw; 5583 int blkaddr, idx, rc; 5584 5585 if (!rvu->hw->cap.ipolicer) 5586 return 0; 5587 5588 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5589 if (rc) 5590 return rc; 5591 5592 /* Fetch the RQ's context to see if policing is enabled */ 5593 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, 5594 NIX_AQ_CTYPE_RQ, rq_idx); 5595 if (rc) { 5596 dev_err(rvu->dev, 5597 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", 5598 __func__, rq_idx, pcifunc); 5599 return rc; 5600 } 5601 5602 if (!aq_rsp.rq.policer_ena) 5603 return 0; 5604 5605 /* Get the bandwidth profile ID mapped to this RQ */ 5606 leaf_prof = aq_rsp.rq.band_prof_id; 5607 5608 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; 5609 ipolicer->match_id[leaf_prof] = match_id; 5610 5611 /* Check if any other leaf profile is marked with same match_id */ 5612 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 5613 if (idx == leaf_prof) 5614 continue; 5615 if (ipolicer->match_id[idx] != match_id) 5616 continue; 5617 5618 leaf_match = idx; 5619 break; 5620 } 5621 5622 if (idx == ipolicer->band_prof.max) 5623 return 0; 5624 5625 /* Fetch the matching profile's context to check if it's already 5626 * mapped to a mid level profile. 5627 */ 5628 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5629 NIX_AQ_CTYPE_BANDPROF, leaf_match); 5630 if (rc) { 5631 dev_err(rvu->dev, 5632 "%s: Failed to fetch context of leaf profile %d\n", 5633 __func__, leaf_match); 5634 return rc; 5635 } 5636 5637 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5638 if (aq_rsp.prof.hl_en) { 5639 /* Get Mid layer prof index and map leaf_prof index 5640 * also such that flows that are being steered 5641 * to different RQs and marked with same match_id 5642 * are rate limited in a aggregate fashion 5643 */ 5644 mid_prof = aq_rsp.prof.band_prof_id; 5645 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5646 &aq_req, &aq_rsp, 5647 leaf_prof, mid_prof); 5648 if (rc) { 5649 dev_err(rvu->dev, 5650 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5651 __func__, leaf_prof, mid_prof); 5652 goto exit; 5653 } 5654 5655 mutex_lock(&rvu->rsrc_lock); 5656 ipolicer->ref_count[mid_prof]++; 5657 mutex_unlock(&rvu->rsrc_lock); 5658 goto exit; 5659 } 5660 5661 /* Allocate a mid layer profile and 5662 * map both 'leaf_prof' and 'leaf_match' profiles to it. 5663 */ 5664 mutex_lock(&rvu->rsrc_lock); 5665 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5666 if (mid_prof < 0) { 5667 dev_err(rvu->dev, 5668 "%s: Unable to allocate mid layer profile\n", __func__); 5669 mutex_unlock(&rvu->rsrc_lock); 5670 goto exit; 5671 } 5672 mutex_unlock(&rvu->rsrc_lock); 5673 ipolicer->pfvf_map[mid_prof] = 0x00; 5674 ipolicer->ref_count[mid_prof] = 0; 5675 5676 /* Initialize mid layer profile same as 'leaf_prof' */ 5677 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5678 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 5679 if (rc) { 5680 dev_err(rvu->dev, 5681 "%s: Failed to fetch context of leaf profile %d\n", 5682 __func__, leaf_prof); 5683 goto exit; 5684 } 5685 5686 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5687 aq_req.hdr.pcifunc = 0x00; 5688 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); 5689 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5690 aq_req.op = NIX_AQ_INSTOP_WRITE; 5691 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); 5692 memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s)); 5693 /* Clear higher layer enable bit in the mid profile, just in case */ 5694 aq_req.prof.hl_en = 0; 5695 aq_req.prof_mask.hl_en = 1; 5696 5697 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5698 (struct nix_aq_enq_req *)&aq_req, NULL); 5699 if (rc) { 5700 dev_err(rvu->dev, 5701 "%s: Failed to INIT context of mid layer profile %d\n", 5702 __func__, mid_prof); 5703 goto exit; 5704 } 5705 5706 /* Map both leaf profiles to this mid layer profile */ 5707 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5708 &aq_req, &aq_rsp, 5709 leaf_prof, mid_prof); 5710 if (rc) { 5711 dev_err(rvu->dev, 5712 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5713 __func__, leaf_prof, mid_prof); 5714 goto exit; 5715 } 5716 5717 mutex_lock(&rvu->rsrc_lock); 5718 ipolicer->ref_count[mid_prof]++; 5719 mutex_unlock(&rvu->rsrc_lock); 5720 5721 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5722 &aq_req, &aq_rsp, 5723 leaf_match, mid_prof); 5724 if (rc) { 5725 dev_err(rvu->dev, 5726 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5727 __func__, leaf_match, mid_prof); 5728 ipolicer->ref_count[mid_prof]--; 5729 goto exit; 5730 } 5731 5732 mutex_lock(&rvu->rsrc_lock); 5733 ipolicer->ref_count[mid_prof]++; 5734 mutex_unlock(&rvu->rsrc_lock); 5735 5736 exit: 5737 return rc; 5738 } 5739 5740 /* Called with mutex rsrc_lock */ 5741 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 5742 u32 leaf_prof) 5743 { 5744 struct nix_cn10k_aq_enq_req aq_req; 5745 struct nix_cn10k_aq_enq_rsp aq_rsp; 5746 struct nix_ipolicer *ipolicer; 5747 u16 mid_prof; 5748 int rc; 5749 5750 mutex_unlock(&rvu->rsrc_lock); 5751 5752 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5753 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 5754 5755 mutex_lock(&rvu->rsrc_lock); 5756 if (rc) { 5757 dev_err(rvu->dev, 5758 "%s: Failed to fetch context of leaf profile %d\n", 5759 __func__, leaf_prof); 5760 return; 5761 } 5762 5763 if (!aq_rsp.prof.hl_en) 5764 return; 5765 5766 mid_prof = aq_rsp.prof.band_prof_id; 5767 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5768 ipolicer->ref_count[mid_prof]--; 5769 /* If ref_count is zero, free mid layer profile */ 5770 if (!ipolicer->ref_count[mid_prof]) { 5771 ipolicer->pfvf_map[mid_prof] = 0x00; 5772 rvu_free_rsrc(&ipolicer->band_prof, mid_prof); 5773 } 5774 } 5775 5776 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, 5777 struct nix_bandprof_get_hwinfo_rsp *rsp) 5778 { 5779 struct nix_ipolicer *ipolicer; 5780 int blkaddr, layer, err; 5781 struct nix_hw *nix_hw; 5782 u64 tu; 5783 5784 if (!rvu->hw->cap.ipolicer) 5785 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5786 5787 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 5788 if (err) 5789 return err; 5790 5791 /* Return number of bandwidth profiles free at each layer */ 5792 mutex_lock(&rvu->rsrc_lock); 5793 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5794 if (layer == BAND_PROF_INVAL_LAYER) 5795 continue; 5796 5797 ipolicer = &nix_hw->ipolicer[layer]; 5798 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); 5799 } 5800 mutex_unlock(&rvu->rsrc_lock); 5801 5802 /* Set the policer timeunit in nanosec */ 5803 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); 5804 rsp->policer_timeunit = (tu + 1) * 100; 5805 5806 return 0; 5807 } 5808