1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 11 #include "rvu_struct.h" 12 #include "rvu_reg.h" 13 #include "rvu.h" 14 #include "npc.h" 15 #include "cgx.h" 16 #include "lmac_common.h" 17 #include "rvu_npc_hash.h" 18 19 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 20 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 21 int type, int chan_id); 22 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 23 int type, bool add); 24 static int nix_setup_ipolicers(struct rvu *rvu, 25 struct nix_hw *nix_hw, int blkaddr); 26 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); 27 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 28 struct nix_hw *nix_hw, u16 pcifunc); 29 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); 30 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 31 u32 leaf_prof); 32 static const char *nix_get_ctx_name(int ctype); 33 34 enum mc_tbl_sz { 35 MC_TBL_SZ_256, 36 MC_TBL_SZ_512, 37 MC_TBL_SZ_1K, 38 MC_TBL_SZ_2K, 39 MC_TBL_SZ_4K, 40 MC_TBL_SZ_8K, 41 MC_TBL_SZ_16K, 42 MC_TBL_SZ_32K, 43 MC_TBL_SZ_64K, 44 }; 45 46 enum mc_buf_cnt { 47 MC_BUF_CNT_8, 48 MC_BUF_CNT_16, 49 MC_BUF_CNT_32, 50 MC_BUF_CNT_64, 51 MC_BUF_CNT_128, 52 MC_BUF_CNT_256, 53 MC_BUF_CNT_512, 54 MC_BUF_CNT_1024, 55 MC_BUF_CNT_2048, 56 }; 57 58 enum nix_makr_fmt_indexes { 59 NIX_MARK_CFG_IP_DSCP_RED, 60 NIX_MARK_CFG_IP_DSCP_YELLOW, 61 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 62 NIX_MARK_CFG_IP_ECN_RED, 63 NIX_MARK_CFG_IP_ECN_YELLOW, 64 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 65 NIX_MARK_CFG_VLAN_DEI_RED, 66 NIX_MARK_CFG_VLAN_DEI_YELLOW, 67 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 68 NIX_MARK_CFG_MAX, 69 }; 70 71 /* For now considering MC resources needed for broadcast 72 * pkt replication only. i.e 256 HWVFs + 12 PFs. 73 */ 74 #define MC_TBL_SIZE MC_TBL_SZ_512 75 #define MC_BUF_CNT MC_BUF_CNT_128 76 77 struct mce { 78 struct hlist_node node; 79 u16 pcifunc; 80 }; 81 82 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 83 { 84 int i = 0; 85 86 /*If blkaddr is 0, return the first nix block address*/ 87 if (blkaddr == 0) 88 return rvu->nix_blkaddr[blkaddr]; 89 90 while (i + 1 < MAX_NIX_BLKS) { 91 if (rvu->nix_blkaddr[i] == blkaddr) 92 return rvu->nix_blkaddr[i + 1]; 93 i++; 94 } 95 96 return 0; 97 } 98 99 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 100 { 101 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 102 int blkaddr; 103 104 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 105 if (!pfvf->nixlf || blkaddr < 0) 106 return false; 107 return true; 108 } 109 110 int rvu_get_nixlf_count(struct rvu *rvu) 111 { 112 int blkaddr = 0, max = 0; 113 struct rvu_block *block; 114 115 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 116 while (blkaddr) { 117 block = &rvu->hw->block[blkaddr]; 118 max += block->lf.max; 119 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 120 } 121 return max; 122 } 123 124 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 125 { 126 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 127 struct rvu_hwinfo *hw = rvu->hw; 128 int blkaddr; 129 130 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 131 if (!pfvf->nixlf || blkaddr < 0) 132 return NIX_AF_ERR_AF_LF_INVALID; 133 134 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 135 if (*nixlf < 0) 136 return NIX_AF_ERR_AF_LF_INVALID; 137 138 if (nix_blkaddr) 139 *nix_blkaddr = blkaddr; 140 141 return 0; 142 } 143 144 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, 145 struct nix_hw **nix_hw, int *blkaddr) 146 { 147 struct rvu_pfvf *pfvf; 148 149 pfvf = rvu_get_pfvf(rvu, pcifunc); 150 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 151 if (!pfvf->nixlf || *blkaddr < 0) 152 return NIX_AF_ERR_AF_LF_INVALID; 153 154 *nix_hw = get_nix_hw(rvu->hw, *blkaddr); 155 if (!*nix_hw) 156 return NIX_AF_ERR_INVALID_NIXBLK; 157 return 0; 158 } 159 160 static void nix_mce_list_init(struct nix_mce_list *list, int max) 161 { 162 INIT_HLIST_HEAD(&list->head); 163 list->count = 0; 164 list->max = max; 165 } 166 167 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) 168 { 169 int idx; 170 171 if (!mcast) 172 return 0; 173 174 idx = mcast->next_free_mce; 175 mcast->next_free_mce += count; 176 return idx; 177 } 178 179 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 180 { 181 int nix_blkaddr = 0, i = 0; 182 struct rvu *rvu = hw->rvu; 183 184 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 185 while (nix_blkaddr) { 186 if (blkaddr == nix_blkaddr && hw->nix) 187 return &hw->nix[i]; 188 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 189 i++; 190 } 191 return NULL; 192 } 193 194 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type) 195 { 196 if (hw->cap.nix_multiple_dwrr_mtu) 197 return NIX_AF_DWRR_MTUX(smq_link_type); 198 199 if (smq_link_type == SMQ_LINK_TYPE_SDP) 200 return NIX_AF_DWRR_SDP_MTU; 201 202 /* Here it's same reg for RPM and LBK */ 203 return NIX_AF_DWRR_RPM_MTU; 204 } 205 206 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) 207 { 208 dwrr_mtu &= 0x1FULL; 209 210 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 211 * Value of 4 is reserved for MTU value of 9728 bytes. 212 * Value of 5 is reserved for MTU value of 10240 bytes. 213 */ 214 switch (dwrr_mtu) { 215 case 4: 216 return 9728; 217 case 5: 218 return 10240; 219 default: 220 return BIT_ULL(dwrr_mtu); 221 } 222 223 return 0; 224 } 225 226 u32 convert_bytes_to_dwrr_mtu(u32 bytes) 227 { 228 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 229 * Value of 4 is reserved for MTU value of 9728 bytes. 230 * Value of 5 is reserved for MTU value of 10240 bytes. 231 */ 232 if (bytes > BIT_ULL(16)) 233 return 0; 234 235 switch (bytes) { 236 case 9728: 237 return 4; 238 case 10240: 239 return 5; 240 default: 241 return ilog2(bytes); 242 } 243 244 return 0; 245 } 246 247 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 248 { 249 int err; 250 251 /* Sync all in flight RX packets to LLC/DRAM */ 252 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 253 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 254 if (err) 255 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); 256 257 /* SW_SYNC ensures all existing transactions are finished and pkts 258 * are written to LLC/DRAM, queues should be teared down after 259 * successful SW_SYNC. Due to a HW errata, in some rare scenarios 260 * an existing transaction might end after SW_SYNC operation. To 261 * ensure operation is fully done, do the SW_SYNC twice. 262 */ 263 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 264 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 265 if (err) 266 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); 267 } 268 269 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 270 int lvl, u16 pcifunc, u16 schq) 271 { 272 struct rvu_hwinfo *hw = rvu->hw; 273 struct nix_txsch *txsch; 274 struct nix_hw *nix_hw; 275 u16 map_func; 276 277 nix_hw = get_nix_hw(rvu->hw, blkaddr); 278 if (!nix_hw) 279 return false; 280 281 txsch = &nix_hw->txsch[lvl]; 282 /* Check out of bounds */ 283 if (schq >= txsch->schq.max) 284 return false; 285 286 mutex_lock(&rvu->rsrc_lock); 287 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 288 mutex_unlock(&rvu->rsrc_lock); 289 290 /* TLs aggegating traffic are shared across PF and VFs */ 291 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 292 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 293 return false; 294 else 295 return true; 296 } 297 298 if (map_func != pcifunc) 299 return false; 300 301 return true; 302 } 303 304 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, 305 struct nix_lf_alloc_rsp *rsp, bool loop) 306 { 307 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); 308 u16 req_chan_base, req_chan_end, req_chan_cnt; 309 struct rvu_hwinfo *hw = rvu->hw; 310 struct sdp_node_info *sdp_info; 311 int pkind, pf, vf, lbkid, vfid; 312 u8 cgx_id, lmac_id; 313 bool from_vf; 314 int err; 315 316 pf = rvu_get_pf(pcifunc); 317 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 318 type != NIX_INTF_TYPE_SDP) 319 return 0; 320 321 switch (type) { 322 case NIX_INTF_TYPE_CGX: 323 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 324 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 325 326 pkind = rvu_npc_get_pkind(rvu, pf); 327 if (pkind < 0) { 328 dev_err(rvu->dev, 329 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 330 return -EINVAL; 331 } 332 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 333 pfvf->tx_chan_base = pfvf->rx_chan_base; 334 pfvf->rx_chan_cnt = 1; 335 pfvf->tx_chan_cnt = 1; 336 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; 337 338 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 339 rvu_npc_set_pkind(rvu, pkind, pfvf); 340 341 break; 342 case NIX_INTF_TYPE_LBK: 343 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 344 345 /* If NIX1 block is present on the silicon then NIXes are 346 * assigned alternatively for lbk interfaces. NIX0 should 347 * send packets on lbk link 1 channels and NIX1 should send 348 * on lbk link 0 channels for the communication between 349 * NIX0 and NIX1. 350 */ 351 lbkid = 0; 352 if (rvu->hw->lbk_links > 1) 353 lbkid = vf & 0x1 ? 0 : 1; 354 355 /* By default NIX0 is configured to send packet on lbk link 1 356 * (which corresponds to LBK1), same packet will receive on 357 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 358 * (which corresponds to LBK2) packet will receive on NIX0 lbk 359 * link 1. 360 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 361 * transmits and receives on lbk link 0, whick corresponds 362 * to LBK1 block, back to back connectivity between NIX and 363 * LBK can be achieved (which is similar to 96xx) 364 * 365 * RX TX 366 * NIX0 lbk link 1 (LBK2) 1 (LBK1) 367 * NIX0 lbk link 0 (LBK0) 0 (LBK0) 368 * NIX1 lbk link 0 (LBK1) 0 (LBK2) 369 * NIX1 lbk link 1 (LBK3) 1 (LBK3) 370 */ 371 if (loop) 372 lbkid = !lbkid; 373 374 /* Note that AF's VFs work in pairs and talk over consecutive 375 * loopback channels.Therefore if odd number of AF VFs are 376 * enabled then the last VF remains with no pair. 377 */ 378 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 379 pfvf->tx_chan_base = vf & 0x1 ? 380 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 381 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 382 pfvf->rx_chan_cnt = 1; 383 pfvf->tx_chan_cnt = 1; 384 rsp->tx_link = hw->cgx_links + lbkid; 385 pfvf->lbkid = lbkid; 386 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); 387 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 388 pfvf->rx_chan_base, 389 pfvf->rx_chan_cnt); 390 391 break; 392 case NIX_INTF_TYPE_SDP: 393 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 394 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; 395 sdp_info = parent_pf->sdp_info; 396 if (!sdp_info) { 397 dev_err(rvu->dev, "Invalid sdp_info pointer\n"); 398 return -EINVAL; 399 } 400 if (from_vf) { 401 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + 402 sdp_info->num_pf_rings; 403 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 404 for (vfid = 0; vfid < vf; vfid++) 405 req_chan_base += sdp_info->vf_rings[vfid]; 406 req_chan_cnt = sdp_info->vf_rings[vf]; 407 req_chan_end = req_chan_base + req_chan_cnt - 1; 408 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || 409 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { 410 dev_err(rvu->dev, 411 "PF_Func 0x%x: Invalid channel base and count\n", 412 pcifunc); 413 return -EINVAL; 414 } 415 } else { 416 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; 417 req_chan_cnt = sdp_info->num_pf_rings; 418 } 419 420 pfvf->rx_chan_base = req_chan_base; 421 pfvf->rx_chan_cnt = req_chan_cnt; 422 pfvf->tx_chan_base = pfvf->rx_chan_base; 423 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; 424 425 rsp->tx_link = hw->cgx_links + hw->lbk_links; 426 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 427 pfvf->rx_chan_base, 428 pfvf->rx_chan_cnt); 429 break; 430 } 431 432 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 433 * RVU PF/VF's MAC address. 434 */ 435 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 436 pfvf->rx_chan_base, pfvf->mac_addr); 437 438 /* Add this PF_FUNC to bcast pkt replication list */ 439 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); 440 if (err) { 441 dev_err(rvu->dev, 442 "Bcast list, failed to enable PF_FUNC 0x%x\n", 443 pcifunc); 444 return err; 445 } 446 /* Install MCAM rule matching Ethernet broadcast mac address */ 447 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 448 nixlf, pfvf->rx_chan_base); 449 450 pfvf->maxlen = NIC_HW_MIN_FRS; 451 pfvf->minlen = NIC_HW_MIN_FRS; 452 453 return 0; 454 } 455 456 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 457 { 458 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 459 int err; 460 461 pfvf->maxlen = 0; 462 pfvf->minlen = 0; 463 464 /* Remove this PF_FUNC from bcast pkt replication list */ 465 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); 466 if (err) { 467 dev_err(rvu->dev, 468 "Bcast list, failed to disable PF_FUNC 0x%x\n", 469 pcifunc); 470 } 471 472 /* Free and disable any MCAM entries used by this NIX LF */ 473 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 474 475 /* Disable DMAC filters used */ 476 rvu_cgx_disable_dmac_entries(rvu, pcifunc); 477 } 478 479 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 480 struct nix_bp_cfg_req *req, 481 struct msg_rsp *rsp) 482 { 483 u16 pcifunc = req->hdr.pcifunc; 484 struct rvu_pfvf *pfvf; 485 int blkaddr, pf, type; 486 u16 chan_base, chan; 487 u64 cfg; 488 489 pf = rvu_get_pf(pcifunc); 490 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 491 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 492 return 0; 493 494 pfvf = rvu_get_pfvf(rvu, pcifunc); 495 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 496 497 chan_base = pfvf->rx_chan_base + req->chan_base; 498 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 499 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 500 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 501 cfg & ~BIT_ULL(16)); 502 } 503 return 0; 504 } 505 506 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 507 int type, int chan_id) 508 { 509 int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; 510 u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt; 511 struct rvu_hwinfo *hw = rvu->hw; 512 struct rvu_pfvf *pfvf; 513 u8 cgx_id, lmac_id; 514 u64 cfg; 515 516 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 517 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 518 lmac_chan_cnt = cfg & 0xFF; 519 520 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; 521 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); 522 523 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 524 sdp_chan_cnt = cfg & 0xFFF; 525 sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; 526 527 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 528 529 /* Backpressure IDs range division 530 * CGX channles are mapped to (0 - 191) BPIDs 531 * LBK channles are mapped to (192 - 255) BPIDs 532 * SDP channles are mapped to (256 - 511) BPIDs 533 * 534 * Lmac channles and bpids mapped as follows 535 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 536 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 537 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 538 */ 539 switch (type) { 540 case NIX_INTF_TYPE_CGX: 541 if ((req->chan_base + req->chan_cnt) > 16) 542 return -EINVAL; 543 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 544 /* Assign bpid based on cgx, lmac and chan id */ 545 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + 546 (lmac_id * lmac_chan_cnt) + req->chan_base; 547 548 if (req->bpid_per_chan) 549 bpid += chan_id; 550 if (bpid > cgx_bpid_cnt) 551 return -EINVAL; 552 break; 553 554 case NIX_INTF_TYPE_LBK: 555 if ((req->chan_base + req->chan_cnt) > 63) 556 return -EINVAL; 557 bpid = cgx_bpid_cnt + req->chan_base; 558 if (req->bpid_per_chan) 559 bpid += chan_id; 560 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) 561 return -EINVAL; 562 break; 563 case NIX_INTF_TYPE_SDP: 564 if ((req->chan_base + req->chan_cnt) > 255) 565 return -EINVAL; 566 567 bpid = sdp_bpid_cnt + req->chan_base; 568 if (req->bpid_per_chan) 569 bpid += chan_id; 570 571 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) 572 return -EINVAL; 573 break; 574 default: 575 return -EINVAL; 576 } 577 return bpid; 578 } 579 580 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 581 struct nix_bp_cfg_req *req, 582 struct nix_bp_cfg_rsp *rsp) 583 { 584 int blkaddr, pf, type, chan_id = 0; 585 u16 pcifunc = req->hdr.pcifunc; 586 struct rvu_pfvf *pfvf; 587 u16 chan_base, chan; 588 s16 bpid, bpid_base; 589 u64 cfg; 590 591 pf = rvu_get_pf(pcifunc); 592 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 593 if (is_sdp_pfvf(pcifunc)) 594 type = NIX_INTF_TYPE_SDP; 595 596 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ 597 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 598 type != NIX_INTF_TYPE_SDP) 599 return 0; 600 601 pfvf = rvu_get_pfvf(rvu, pcifunc); 602 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 603 604 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 605 chan_base = pfvf->rx_chan_base + req->chan_base; 606 bpid = bpid_base; 607 608 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 609 if (bpid < 0) { 610 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 611 return -EINVAL; 612 } 613 614 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 615 cfg &= ~GENMASK_ULL(8, 0); 616 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 617 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); 618 chan_id++; 619 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 620 } 621 622 for (chan = 0; chan < req->chan_cnt; chan++) { 623 /* Map channel and bpid assign to it */ 624 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 625 (bpid_base & 0x3FF); 626 if (req->bpid_per_chan) 627 bpid_base++; 628 } 629 rsp->chan_cnt = req->chan_cnt; 630 631 return 0; 632 } 633 634 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 635 u64 format, bool v4, u64 *fidx) 636 { 637 struct nix_lso_format field = {0}; 638 639 /* IP's Length field */ 640 field.layer = NIX_TXLAYER_OL3; 641 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 642 field.offset = v4 ? 2 : 4; 643 field.sizem1 = 1; /* i.e 2 bytes */ 644 field.alg = NIX_LSOALG_ADD_PAYLEN; 645 rvu_write64(rvu, blkaddr, 646 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 647 *(u64 *)&field); 648 649 /* No ID field in IPv6 header */ 650 if (!v4) 651 return; 652 653 /* IP's ID field */ 654 field.layer = NIX_TXLAYER_OL3; 655 field.offset = 4; 656 field.sizem1 = 1; /* i.e 2 bytes */ 657 field.alg = NIX_LSOALG_ADD_SEGNUM; 658 rvu_write64(rvu, blkaddr, 659 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 660 *(u64 *)&field); 661 } 662 663 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 664 u64 format, u64 *fidx) 665 { 666 struct nix_lso_format field = {0}; 667 668 /* TCP's sequence number field */ 669 field.layer = NIX_TXLAYER_OL4; 670 field.offset = 4; 671 field.sizem1 = 3; /* i.e 4 bytes */ 672 field.alg = NIX_LSOALG_ADD_OFFSET; 673 rvu_write64(rvu, blkaddr, 674 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 675 *(u64 *)&field); 676 677 /* TCP's flags field */ 678 field.layer = NIX_TXLAYER_OL4; 679 field.offset = 12; 680 field.sizem1 = 1; /* 2 bytes */ 681 field.alg = NIX_LSOALG_TCP_FLAGS; 682 rvu_write64(rvu, blkaddr, 683 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 684 *(u64 *)&field); 685 } 686 687 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 688 { 689 u64 cfg, idx, fidx = 0; 690 691 /* Get max HW supported format indices */ 692 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 693 nix_hw->lso.total = cfg; 694 695 /* Enable LSO */ 696 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 697 /* For TSO, set first and middle segment flags to 698 * mask out PSH, RST & FIN flags in TCP packet 699 */ 700 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 701 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 702 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 703 704 /* Setup default static LSO formats 705 * 706 * Configure format fields for TCPv4 segmentation offload 707 */ 708 idx = NIX_LSO_FORMAT_IDX_TSOV4; 709 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 710 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 711 712 /* Set rest of the fields to NOP */ 713 for (; fidx < 8; fidx++) { 714 rvu_write64(rvu, blkaddr, 715 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 716 } 717 nix_hw->lso.in_use++; 718 719 /* Configure format fields for TCPv6 segmentation offload */ 720 idx = NIX_LSO_FORMAT_IDX_TSOV6; 721 fidx = 0; 722 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 723 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 724 725 /* Set rest of the fields to NOP */ 726 for (; fidx < 8; fidx++) { 727 rvu_write64(rvu, blkaddr, 728 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 729 } 730 nix_hw->lso.in_use++; 731 } 732 733 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 734 { 735 kfree(pfvf->rq_bmap); 736 kfree(pfvf->sq_bmap); 737 kfree(pfvf->cq_bmap); 738 if (pfvf->rq_ctx) 739 qmem_free(rvu->dev, pfvf->rq_ctx); 740 if (pfvf->sq_ctx) 741 qmem_free(rvu->dev, pfvf->sq_ctx); 742 if (pfvf->cq_ctx) 743 qmem_free(rvu->dev, pfvf->cq_ctx); 744 if (pfvf->rss_ctx) 745 qmem_free(rvu->dev, pfvf->rss_ctx); 746 if (pfvf->nix_qints_ctx) 747 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 748 if (pfvf->cq_ints_ctx) 749 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 750 751 pfvf->rq_bmap = NULL; 752 pfvf->cq_bmap = NULL; 753 pfvf->sq_bmap = NULL; 754 pfvf->rq_ctx = NULL; 755 pfvf->sq_ctx = NULL; 756 pfvf->cq_ctx = NULL; 757 pfvf->rss_ctx = NULL; 758 pfvf->nix_qints_ctx = NULL; 759 pfvf->cq_ints_ctx = NULL; 760 } 761 762 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 763 struct rvu_pfvf *pfvf, int nixlf, 764 int rss_sz, int rss_grps, int hwctx_size, 765 u64 way_mask, bool tag_lsb_as_adder) 766 { 767 int err, grp, num_indices; 768 u64 val; 769 770 /* RSS is not requested for this NIXLF */ 771 if (!rss_sz) 772 return 0; 773 num_indices = rss_sz * rss_grps; 774 775 /* Alloc NIX RSS HW context memory and config the base */ 776 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 777 if (err) 778 return err; 779 780 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 781 (u64)pfvf->rss_ctx->iova); 782 783 /* Config full RSS table size, enable RSS and caching */ 784 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | 785 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); 786 787 if (tag_lsb_as_adder) 788 val |= BIT_ULL(5); 789 790 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); 791 /* Config RSS group offset and sizes */ 792 for (grp = 0; grp < rss_grps; grp++) 793 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 794 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 795 return 0; 796 } 797 798 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 799 struct nix_aq_inst_s *inst) 800 { 801 struct admin_queue *aq = block->aq; 802 struct nix_aq_res_s *result; 803 int timeout = 1000; 804 u64 reg, head; 805 int ret; 806 807 result = (struct nix_aq_res_s *)aq->res->base; 808 809 /* Get current head pointer where to append this instruction */ 810 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 811 head = (reg >> 4) & AQ_PTR_MASK; 812 813 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 814 (void *)inst, aq->inst->entry_sz); 815 memset(result, 0, sizeof(*result)); 816 /* sync into memory */ 817 wmb(); 818 819 /* Ring the doorbell and wait for result */ 820 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 821 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 822 cpu_relax(); 823 udelay(1); 824 timeout--; 825 if (!timeout) 826 return -EBUSY; 827 } 828 829 if (result->compcode != NIX_AQ_COMP_GOOD) { 830 /* TODO: Replace this with some error code */ 831 if (result->compcode == NIX_AQ_COMP_CTX_FAULT || 832 result->compcode == NIX_AQ_COMP_LOCKERR || 833 result->compcode == NIX_AQ_COMP_CTX_POISON) { 834 ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX); 835 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX); 836 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX); 837 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX); 838 if (ret) 839 dev_err(rvu->dev, 840 "%s: Not able to unlock cachelines\n", __func__); 841 } 842 843 return -EBUSY; 844 } 845 846 return 0; 847 } 848 849 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, 850 u16 *smq, u16 *smq_mask) 851 { 852 struct nix_cn10k_aq_enq_req *aq_req; 853 854 if (!is_rvu_otx2(rvu)) { 855 aq_req = (struct nix_cn10k_aq_enq_req *)req; 856 *smq = aq_req->sq.smq; 857 *smq_mask = aq_req->sq_mask.smq; 858 } else { 859 *smq = req->sq.smq; 860 *smq_mask = req->sq_mask.smq; 861 } 862 } 863 864 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 865 struct nix_aq_enq_req *req, 866 struct nix_aq_enq_rsp *rsp) 867 { 868 struct rvu_hwinfo *hw = rvu->hw; 869 u16 pcifunc = req->hdr.pcifunc; 870 int nixlf, blkaddr, rc = 0; 871 struct nix_aq_inst_s inst; 872 struct rvu_block *block; 873 struct admin_queue *aq; 874 struct rvu_pfvf *pfvf; 875 u16 smq, smq_mask; 876 void *ctx, *mask; 877 bool ena; 878 u64 cfg; 879 880 blkaddr = nix_hw->blkaddr; 881 block = &hw->block[blkaddr]; 882 aq = block->aq; 883 if (!aq) { 884 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 885 return NIX_AF_ERR_AQ_ENQUEUE; 886 } 887 888 pfvf = rvu_get_pfvf(rvu, pcifunc); 889 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 890 891 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile 892 * operations done by AF itself. 893 */ 894 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || 895 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { 896 if (!pfvf->nixlf || nixlf < 0) 897 return NIX_AF_ERR_AF_LF_INVALID; 898 } 899 900 switch (req->ctype) { 901 case NIX_AQ_CTYPE_RQ: 902 /* Check if index exceeds max no of queues */ 903 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 904 rc = NIX_AF_ERR_AQ_ENQUEUE; 905 break; 906 case NIX_AQ_CTYPE_SQ: 907 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 908 rc = NIX_AF_ERR_AQ_ENQUEUE; 909 break; 910 case NIX_AQ_CTYPE_CQ: 911 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 912 rc = NIX_AF_ERR_AQ_ENQUEUE; 913 break; 914 case NIX_AQ_CTYPE_RSS: 915 /* Check if RSS is enabled and qidx is within range */ 916 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 917 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 918 (req->qidx >= (256UL << (cfg & 0xF)))) 919 rc = NIX_AF_ERR_AQ_ENQUEUE; 920 break; 921 case NIX_AQ_CTYPE_MCE: 922 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 923 924 /* Check if index exceeds MCE list length */ 925 if (!nix_hw->mcast.mce_ctx || 926 (req->qidx >= (256UL << (cfg & 0xF)))) 927 rc = NIX_AF_ERR_AQ_ENQUEUE; 928 929 /* Adding multicast lists for requests from PF/VFs is not 930 * yet supported, so ignore this. 931 */ 932 if (rsp) 933 rc = NIX_AF_ERR_AQ_ENQUEUE; 934 break; 935 case NIX_AQ_CTYPE_BANDPROF: 936 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, 937 nix_hw, pcifunc)) 938 rc = NIX_AF_ERR_INVALID_BANDPROF; 939 break; 940 default: 941 rc = NIX_AF_ERR_AQ_ENQUEUE; 942 } 943 944 if (rc) 945 return rc; 946 947 nix_get_aq_req_smq(rvu, req, &smq, &smq_mask); 948 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 949 if (req->ctype == NIX_AQ_CTYPE_SQ && 950 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 951 (req->op == NIX_AQ_INSTOP_WRITE && 952 req->sq_mask.ena && req->sq.ena && smq_mask))) { 953 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 954 pcifunc, smq)) 955 return NIX_AF_ERR_AQ_ENQUEUE; 956 } 957 958 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 959 inst.lf = nixlf; 960 inst.cindex = req->qidx; 961 inst.ctype = req->ctype; 962 inst.op = req->op; 963 /* Currently we are not supporting enqueuing multiple instructions, 964 * so always choose first entry in result memory. 965 */ 966 inst.res_addr = (u64)aq->res->iova; 967 968 /* Hardware uses same aq->res->base for updating result of 969 * previous instruction hence wait here till it is done. 970 */ 971 spin_lock(&aq->lock); 972 973 /* Clean result + context memory */ 974 memset(aq->res->base, 0, aq->res->entry_sz); 975 /* Context needs to be written at RES_ADDR + 128 */ 976 ctx = aq->res->base + 128; 977 /* Mask needs to be written at RES_ADDR + 256 */ 978 mask = aq->res->base + 256; 979 980 switch (req->op) { 981 case NIX_AQ_INSTOP_WRITE: 982 if (req->ctype == NIX_AQ_CTYPE_RQ) 983 memcpy(mask, &req->rq_mask, 984 sizeof(struct nix_rq_ctx_s)); 985 else if (req->ctype == NIX_AQ_CTYPE_SQ) 986 memcpy(mask, &req->sq_mask, 987 sizeof(struct nix_sq_ctx_s)); 988 else if (req->ctype == NIX_AQ_CTYPE_CQ) 989 memcpy(mask, &req->cq_mask, 990 sizeof(struct nix_cq_ctx_s)); 991 else if (req->ctype == NIX_AQ_CTYPE_RSS) 992 memcpy(mask, &req->rss_mask, 993 sizeof(struct nix_rsse_s)); 994 else if (req->ctype == NIX_AQ_CTYPE_MCE) 995 memcpy(mask, &req->mce_mask, 996 sizeof(struct nix_rx_mce_s)); 997 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 998 memcpy(mask, &req->prof_mask, 999 sizeof(struct nix_bandprof_s)); 1000 fallthrough; 1001 case NIX_AQ_INSTOP_INIT: 1002 if (req->ctype == NIX_AQ_CTYPE_RQ) 1003 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 1004 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1005 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 1006 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1007 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 1008 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1009 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 1010 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1011 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 1012 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1013 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); 1014 break; 1015 case NIX_AQ_INSTOP_NOP: 1016 case NIX_AQ_INSTOP_READ: 1017 case NIX_AQ_INSTOP_LOCK: 1018 case NIX_AQ_INSTOP_UNLOCK: 1019 break; 1020 default: 1021 rc = NIX_AF_ERR_AQ_ENQUEUE; 1022 spin_unlock(&aq->lock); 1023 return rc; 1024 } 1025 1026 /* Submit the instruction to AQ */ 1027 rc = nix_aq_enqueue_wait(rvu, block, &inst); 1028 if (rc) { 1029 spin_unlock(&aq->lock); 1030 return rc; 1031 } 1032 1033 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 1034 if (req->op == NIX_AQ_INSTOP_INIT) { 1035 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 1036 __set_bit(req->qidx, pfvf->rq_bmap); 1037 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 1038 __set_bit(req->qidx, pfvf->sq_bmap); 1039 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 1040 __set_bit(req->qidx, pfvf->cq_bmap); 1041 } 1042 1043 if (req->op == NIX_AQ_INSTOP_WRITE) { 1044 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1045 ena = (req->rq.ena & req->rq_mask.ena) | 1046 (test_bit(req->qidx, pfvf->rq_bmap) & 1047 ~req->rq_mask.ena); 1048 if (ena) 1049 __set_bit(req->qidx, pfvf->rq_bmap); 1050 else 1051 __clear_bit(req->qidx, pfvf->rq_bmap); 1052 } 1053 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1054 ena = (req->rq.ena & req->sq_mask.ena) | 1055 (test_bit(req->qidx, pfvf->sq_bmap) & 1056 ~req->sq_mask.ena); 1057 if (ena) 1058 __set_bit(req->qidx, pfvf->sq_bmap); 1059 else 1060 __clear_bit(req->qidx, pfvf->sq_bmap); 1061 } 1062 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1063 ena = (req->rq.ena & req->cq_mask.ena) | 1064 (test_bit(req->qidx, pfvf->cq_bmap) & 1065 ~req->cq_mask.ena); 1066 if (ena) 1067 __set_bit(req->qidx, pfvf->cq_bmap); 1068 else 1069 __clear_bit(req->qidx, pfvf->cq_bmap); 1070 } 1071 } 1072 1073 if (rsp) { 1074 /* Copy read context into mailbox */ 1075 if (req->op == NIX_AQ_INSTOP_READ) { 1076 if (req->ctype == NIX_AQ_CTYPE_RQ) 1077 memcpy(&rsp->rq, ctx, 1078 sizeof(struct nix_rq_ctx_s)); 1079 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1080 memcpy(&rsp->sq, ctx, 1081 sizeof(struct nix_sq_ctx_s)); 1082 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1083 memcpy(&rsp->cq, ctx, 1084 sizeof(struct nix_cq_ctx_s)); 1085 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1086 memcpy(&rsp->rss, ctx, 1087 sizeof(struct nix_rsse_s)); 1088 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1089 memcpy(&rsp->mce, ctx, 1090 sizeof(struct nix_rx_mce_s)); 1091 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1092 memcpy(&rsp->prof, ctx, 1093 sizeof(struct nix_bandprof_s)); 1094 } 1095 } 1096 1097 spin_unlock(&aq->lock); 1098 return 0; 1099 } 1100 1101 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, 1102 struct nix_aq_enq_req *req, u8 ctype) 1103 { 1104 struct nix_cn10k_aq_enq_req aq_req; 1105 struct nix_cn10k_aq_enq_rsp aq_rsp; 1106 int rc, word; 1107 1108 if (req->ctype != NIX_AQ_CTYPE_CQ) 1109 return 0; 1110 1111 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 1112 req->hdr.pcifunc, ctype, req->qidx); 1113 if (rc) { 1114 dev_err(rvu->dev, 1115 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", 1116 __func__, nix_get_ctx_name(ctype), req->qidx, 1117 req->hdr.pcifunc); 1118 return rc; 1119 } 1120 1121 /* Make copy of original context & mask which are required 1122 * for resubmission 1123 */ 1124 memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); 1125 memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); 1126 1127 /* exclude fields which HW can update */ 1128 aq_req.cq_mask.cq_err = 0; 1129 aq_req.cq_mask.wrptr = 0; 1130 aq_req.cq_mask.tail = 0; 1131 aq_req.cq_mask.head = 0; 1132 aq_req.cq_mask.avg_level = 0; 1133 aq_req.cq_mask.update_time = 0; 1134 aq_req.cq_mask.substream = 0; 1135 1136 /* Context mask (cq_mask) holds mask value of fields which 1137 * are changed in AQ WRITE operation. 1138 * for example cq.drop = 0xa; 1139 * cq_mask.drop = 0xff; 1140 * Below logic performs '&' between cq and cq_mask so that non 1141 * updated fields are masked out for request and response 1142 * comparison 1143 */ 1144 for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); 1145 word++) { 1146 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= 1147 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1148 *(u64 *)((u8 *)&aq_req.cq + word * 8) &= 1149 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1150 } 1151 1152 if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) 1153 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; 1154 1155 return 0; 1156 } 1157 1158 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 1159 struct nix_aq_enq_rsp *rsp) 1160 { 1161 struct nix_hw *nix_hw; 1162 int err, retries = 5; 1163 int blkaddr; 1164 1165 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 1166 if (blkaddr < 0) 1167 return NIX_AF_ERR_AF_LF_INVALID; 1168 1169 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1170 if (!nix_hw) 1171 return NIX_AF_ERR_INVALID_NIXBLK; 1172 1173 retry: 1174 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 1175 1176 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' 1177 * As a work around perfrom CQ context read after each AQ write. If AQ 1178 * read shows AQ write is not updated perform AQ write again. 1179 */ 1180 if (!err && req->op == NIX_AQ_INSTOP_WRITE) { 1181 err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); 1182 if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { 1183 if (retries--) 1184 goto retry; 1185 else 1186 return NIX_AF_ERR_CQ_CTX_WRITE_ERR; 1187 } 1188 } 1189 1190 return err; 1191 } 1192 1193 static const char *nix_get_ctx_name(int ctype) 1194 { 1195 switch (ctype) { 1196 case NIX_AQ_CTYPE_CQ: 1197 return "CQ"; 1198 case NIX_AQ_CTYPE_SQ: 1199 return "SQ"; 1200 case NIX_AQ_CTYPE_RQ: 1201 return "RQ"; 1202 case NIX_AQ_CTYPE_RSS: 1203 return "RSS"; 1204 } 1205 return ""; 1206 } 1207 1208 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 1209 { 1210 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1211 struct nix_aq_enq_req aq_req; 1212 unsigned long *bmap; 1213 int qidx, q_cnt = 0; 1214 int err = 0, rc; 1215 1216 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 1217 return NIX_AF_ERR_AQ_ENQUEUE; 1218 1219 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 1220 aq_req.hdr.pcifunc = req->hdr.pcifunc; 1221 1222 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1223 aq_req.cq.ena = 0; 1224 aq_req.cq_mask.ena = 1; 1225 aq_req.cq.bp_ena = 0; 1226 aq_req.cq_mask.bp_ena = 1; 1227 q_cnt = pfvf->cq_ctx->qsize; 1228 bmap = pfvf->cq_bmap; 1229 } 1230 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1231 aq_req.sq.ena = 0; 1232 aq_req.sq_mask.ena = 1; 1233 q_cnt = pfvf->sq_ctx->qsize; 1234 bmap = pfvf->sq_bmap; 1235 } 1236 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1237 aq_req.rq.ena = 0; 1238 aq_req.rq_mask.ena = 1; 1239 q_cnt = pfvf->rq_ctx->qsize; 1240 bmap = pfvf->rq_bmap; 1241 } 1242 1243 aq_req.ctype = req->ctype; 1244 aq_req.op = NIX_AQ_INSTOP_WRITE; 1245 1246 for (qidx = 0; qidx < q_cnt; qidx++) { 1247 if (!test_bit(qidx, bmap)) 1248 continue; 1249 aq_req.qidx = qidx; 1250 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 1251 if (rc) { 1252 err = rc; 1253 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 1254 nix_get_ctx_name(req->ctype), qidx); 1255 } 1256 } 1257 1258 return err; 1259 } 1260 1261 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 1262 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 1263 { 1264 struct nix_aq_enq_req lock_ctx_req; 1265 int err; 1266 1267 if (req->op != NIX_AQ_INSTOP_INIT) 1268 return 0; 1269 1270 if (req->ctype == NIX_AQ_CTYPE_MCE || 1271 req->ctype == NIX_AQ_CTYPE_DYNO) 1272 return 0; 1273 1274 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 1275 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 1276 lock_ctx_req.ctype = req->ctype; 1277 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 1278 lock_ctx_req.qidx = req->qidx; 1279 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 1280 if (err) 1281 dev_err(rvu->dev, 1282 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 1283 req->hdr.pcifunc, 1284 nix_get_ctx_name(req->ctype), req->qidx); 1285 return err; 1286 } 1287 1288 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1289 struct nix_aq_enq_req *req, 1290 struct nix_aq_enq_rsp *rsp) 1291 { 1292 int err; 1293 1294 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 1295 if (!err) 1296 err = nix_lf_hwctx_lockdown(rvu, req); 1297 return err; 1298 } 1299 #else 1300 1301 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1302 struct nix_aq_enq_req *req, 1303 struct nix_aq_enq_rsp *rsp) 1304 { 1305 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1306 } 1307 #endif 1308 /* CN10K mbox handler */ 1309 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1310 struct nix_cn10k_aq_enq_req *req, 1311 struct nix_cn10k_aq_enq_rsp *rsp) 1312 { 1313 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1314 (struct nix_aq_enq_rsp *)rsp); 1315 } 1316 1317 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1318 struct hwctx_disable_req *req, 1319 struct msg_rsp *rsp) 1320 { 1321 return nix_lf_hwctx_disable(rvu, req); 1322 } 1323 1324 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1325 struct nix_lf_alloc_req *req, 1326 struct nix_lf_alloc_rsp *rsp) 1327 { 1328 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1329 struct rvu_hwinfo *hw = rvu->hw; 1330 u16 pcifunc = req->hdr.pcifunc; 1331 struct rvu_block *block; 1332 struct rvu_pfvf *pfvf; 1333 u64 cfg, ctx_cfg; 1334 int blkaddr; 1335 1336 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1337 return NIX_AF_ERR_PARAM; 1338 1339 if (req->way_mask) 1340 req->way_mask &= 0xFFFF; 1341 1342 pfvf = rvu_get_pfvf(rvu, pcifunc); 1343 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1344 if (!pfvf->nixlf || blkaddr < 0) 1345 return NIX_AF_ERR_AF_LF_INVALID; 1346 1347 block = &hw->block[blkaddr]; 1348 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1349 if (nixlf < 0) 1350 return NIX_AF_ERR_AF_LF_INVALID; 1351 1352 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1353 if (req->npa_func) { 1354 /* If default, use 'this' NIXLF's PFFUNC */ 1355 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1356 req->npa_func = pcifunc; 1357 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1358 return NIX_AF_INVAL_NPA_PF_FUNC; 1359 } 1360 1361 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1362 if (req->sso_func) { 1363 /* If default, use 'this' NIXLF's PFFUNC */ 1364 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1365 req->sso_func = pcifunc; 1366 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1367 return NIX_AF_INVAL_SSO_PF_FUNC; 1368 } 1369 1370 /* If RSS is being enabled, check if requested config is valid. 1371 * RSS table size should be power of two, otherwise 1372 * RSS_GRP::OFFSET + adder might go beyond that group or 1373 * won't be able to use entire table. 1374 */ 1375 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1376 !is_power_of_2(req->rss_sz))) 1377 return NIX_AF_ERR_RSS_SIZE_INVALID; 1378 1379 if (req->rss_sz && 1380 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1381 return NIX_AF_ERR_RSS_GRPS_INVALID; 1382 1383 /* Reset this NIX LF */ 1384 err = rvu_lf_reset(rvu, block, nixlf); 1385 if (err) { 1386 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1387 block->addr - BLKADDR_NIX0, nixlf); 1388 return NIX_AF_ERR_LF_RESET; 1389 } 1390 1391 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1392 1393 /* Alloc NIX RQ HW context memory and config the base */ 1394 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1395 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1396 if (err) 1397 goto free_mem; 1398 1399 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1400 if (!pfvf->rq_bmap) 1401 goto free_mem; 1402 1403 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1404 (u64)pfvf->rq_ctx->iova); 1405 1406 /* Set caching and queue count in HW */ 1407 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1408 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1409 1410 /* Alloc NIX SQ HW context memory and config the base */ 1411 hwctx_size = 1UL << (ctx_cfg & 0xF); 1412 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1413 if (err) 1414 goto free_mem; 1415 1416 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1417 if (!pfvf->sq_bmap) 1418 goto free_mem; 1419 1420 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1421 (u64)pfvf->sq_ctx->iova); 1422 1423 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1424 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1425 1426 /* Alloc NIX CQ HW context memory and config the base */ 1427 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1428 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1429 if (err) 1430 goto free_mem; 1431 1432 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1433 if (!pfvf->cq_bmap) 1434 goto free_mem; 1435 1436 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1437 (u64)pfvf->cq_ctx->iova); 1438 1439 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1440 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1441 1442 /* Initialize receive side scaling (RSS) */ 1443 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1444 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1445 req->rss_grps, hwctx_size, req->way_mask, 1446 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); 1447 if (err) 1448 goto free_mem; 1449 1450 /* Alloc memory for CQINT's HW contexts */ 1451 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1452 qints = (cfg >> 24) & 0xFFF; 1453 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1454 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1455 if (err) 1456 goto free_mem; 1457 1458 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1459 (u64)pfvf->cq_ints_ctx->iova); 1460 1461 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1462 BIT_ULL(36) | req->way_mask << 20); 1463 1464 /* Alloc memory for QINT's HW contexts */ 1465 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1466 qints = (cfg >> 12) & 0xFFF; 1467 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1468 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1469 if (err) 1470 goto free_mem; 1471 1472 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1473 (u64)pfvf->nix_qints_ctx->iova); 1474 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1475 BIT_ULL(36) | req->way_mask << 20); 1476 1477 /* Setup VLANX TPID's. 1478 * Use VLAN1 for 802.1Q 1479 * and VLAN0 for 802.1AD. 1480 */ 1481 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1482 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1483 1484 /* Enable LMTST for this NIX LF */ 1485 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1486 1487 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1488 if (req->npa_func) 1489 cfg = req->npa_func; 1490 if (req->sso_func) 1491 cfg |= (u64)req->sso_func << 16; 1492 1493 cfg |= (u64)req->xqe_sz << 33; 1494 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1495 1496 /* Config Rx pkt length, csum checks and apad enable / disable */ 1497 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1498 1499 /* Configure pkind for TX parse config */ 1500 cfg = NPC_TX_DEF_PKIND; 1501 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1502 1503 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1504 if (is_sdp_pfvf(pcifunc)) 1505 intf = NIX_INTF_TYPE_SDP; 1506 1507 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, 1508 !!(req->flags & NIX_LF_LBK_BLK_SEL)); 1509 if (err) 1510 goto free_mem; 1511 1512 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1513 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1514 1515 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1516 rvu_write64(rvu, blkaddr, 1517 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1518 VTAGSIZE_T4 | VTAG_STRIP); 1519 1520 goto exit; 1521 1522 free_mem: 1523 nix_ctx_free(rvu, pfvf); 1524 rc = -ENOMEM; 1525 1526 exit: 1527 /* Set macaddr of this PF/VF */ 1528 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1529 1530 /* set SQB size info */ 1531 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1532 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1533 rsp->rx_chan_base = pfvf->rx_chan_base; 1534 rsp->tx_chan_base = pfvf->tx_chan_base; 1535 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1536 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1537 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1538 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1539 /* Get HW supported stat count */ 1540 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1541 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1542 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1543 /* Get count of CQ IRQs and error IRQs supported per LF */ 1544 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1545 rsp->qints = ((cfg >> 12) & 0xFFF); 1546 rsp->cints = ((cfg >> 24) & 0xFFF); 1547 rsp->cgx_links = hw->cgx_links; 1548 rsp->lbk_links = hw->lbk_links; 1549 rsp->sdp_links = hw->sdp_links; 1550 1551 return rc; 1552 } 1553 1554 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1555 struct msg_rsp *rsp) 1556 { 1557 struct rvu_hwinfo *hw = rvu->hw; 1558 u16 pcifunc = req->hdr.pcifunc; 1559 struct rvu_block *block; 1560 int blkaddr, nixlf, err; 1561 struct rvu_pfvf *pfvf; 1562 1563 pfvf = rvu_get_pfvf(rvu, pcifunc); 1564 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1565 if (!pfvf->nixlf || blkaddr < 0) 1566 return NIX_AF_ERR_AF_LF_INVALID; 1567 1568 block = &hw->block[blkaddr]; 1569 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1570 if (nixlf < 0) 1571 return NIX_AF_ERR_AF_LF_INVALID; 1572 1573 if (req->flags & NIX_LF_DISABLE_FLOWS) 1574 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1575 else 1576 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1577 1578 /* Free any tx vtag def entries used by this NIX LF */ 1579 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1580 nix_free_tx_vtag_entries(rvu, pcifunc); 1581 1582 nix_interface_deinit(rvu, pcifunc, nixlf); 1583 1584 /* Reset this NIX LF */ 1585 err = rvu_lf_reset(rvu, block, nixlf); 1586 if (err) { 1587 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1588 block->addr - BLKADDR_NIX0, nixlf); 1589 return NIX_AF_ERR_LF_RESET; 1590 } 1591 1592 nix_ctx_free(rvu, pfvf); 1593 1594 return 0; 1595 } 1596 1597 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1598 struct nix_mark_format_cfg *req, 1599 struct nix_mark_format_cfg_rsp *rsp) 1600 { 1601 u16 pcifunc = req->hdr.pcifunc; 1602 struct nix_hw *nix_hw; 1603 struct rvu_pfvf *pfvf; 1604 int blkaddr, rc; 1605 u32 cfg; 1606 1607 pfvf = rvu_get_pfvf(rvu, pcifunc); 1608 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1609 if (!pfvf->nixlf || blkaddr < 0) 1610 return NIX_AF_ERR_AF_LF_INVALID; 1611 1612 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1613 if (!nix_hw) 1614 return NIX_AF_ERR_INVALID_NIXBLK; 1615 1616 cfg = (((u32)req->offset & 0x7) << 16) | 1617 (((u32)req->y_mask & 0xF) << 12) | 1618 (((u32)req->y_val & 0xF) << 8) | 1619 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1620 1621 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1622 if (rc < 0) { 1623 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1624 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1625 return NIX_AF_ERR_MARK_CFG_FAIL; 1626 } 1627 1628 rsp->mark_format_idx = rc; 1629 return 0; 1630 } 1631 1632 /* Handle shaper update specially for few revisions */ 1633 static bool 1634 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, 1635 int lvl, u64 reg, u64 regval) 1636 { 1637 u64 regbase, oldval, sw_xoff = 0; 1638 u64 dbgval, md_debug0 = 0; 1639 unsigned long poll_tmo; 1640 bool rate_reg = 0; 1641 u32 schq; 1642 1643 regbase = reg & 0xFFFF; 1644 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1645 1646 /* Check for rate register */ 1647 switch (lvl) { 1648 case NIX_TXSCH_LVL_TL1: 1649 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); 1650 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); 1651 1652 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); 1653 break; 1654 case NIX_TXSCH_LVL_TL2: 1655 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); 1656 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); 1657 1658 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || 1659 regbase == NIX_AF_TL2X_PIR(0)); 1660 break; 1661 case NIX_TXSCH_LVL_TL3: 1662 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); 1663 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); 1664 1665 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || 1666 regbase == NIX_AF_TL3X_PIR(0)); 1667 break; 1668 case NIX_TXSCH_LVL_TL4: 1669 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); 1670 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); 1671 1672 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || 1673 regbase == NIX_AF_TL4X_PIR(0)); 1674 break; 1675 case NIX_TXSCH_LVL_MDQ: 1676 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); 1677 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || 1678 regbase == NIX_AF_MDQX_PIR(0)); 1679 break; 1680 } 1681 1682 if (!rate_reg) 1683 return false; 1684 1685 /* Nothing special to do when state is not toggled */ 1686 oldval = rvu_read64(rvu, blkaddr, reg); 1687 if ((oldval & 0x1) == (regval & 0x1)) { 1688 rvu_write64(rvu, blkaddr, reg, regval); 1689 return true; 1690 } 1691 1692 /* PIR/CIR disable */ 1693 if (!(regval & 0x1)) { 1694 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1695 rvu_write64(rvu, blkaddr, reg, 0); 1696 udelay(4); 1697 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1698 return true; 1699 } 1700 1701 /* PIR/CIR enable */ 1702 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1703 if (md_debug0) { 1704 poll_tmo = jiffies + usecs_to_jiffies(10000); 1705 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ 1706 do { 1707 if (time_after(jiffies, poll_tmo)) { 1708 dev_err(rvu->dev, 1709 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", 1710 nixlf, schq, lvl); 1711 goto exit; 1712 } 1713 usleep_range(1, 5); 1714 dbgval = rvu_read64(rvu, blkaddr, md_debug0); 1715 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); 1716 } 1717 rvu_write64(rvu, blkaddr, reg, regval); 1718 exit: 1719 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1720 return true; 1721 } 1722 1723 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr, 1724 int lvl, int schq) 1725 { 1726 u64 tlx_parent = 0, tlx_schedule = 0; 1727 1728 switch (lvl) { 1729 case NIX_TXSCH_LVL_TL2: 1730 tlx_parent = NIX_AF_TL2X_PARENT(schq); 1731 tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq); 1732 break; 1733 case NIX_TXSCH_LVL_TL3: 1734 tlx_parent = NIX_AF_TL3X_PARENT(schq); 1735 tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq); 1736 break; 1737 case NIX_TXSCH_LVL_TL4: 1738 tlx_parent = NIX_AF_TL4X_PARENT(schq); 1739 tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq); 1740 break; 1741 case NIX_TXSCH_LVL_MDQ: 1742 /* no need to reset SMQ_CFG as HW clears this CSR 1743 * on SMQ flush 1744 */ 1745 tlx_parent = NIX_AF_MDQX_PARENT(schq); 1746 tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq); 1747 break; 1748 default: 1749 return; 1750 } 1751 1752 if (tlx_parent) 1753 rvu_write64(rvu, blkaddr, tlx_parent, 0x0); 1754 1755 if (tlx_schedule) 1756 rvu_write64(rvu, blkaddr, tlx_schedule, 0x0); 1757 } 1758 1759 /* Disable shaping of pkts by a scheduler queue 1760 * at a given scheduler level. 1761 */ 1762 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1763 int nixlf, int lvl, int schq) 1764 { 1765 struct rvu_hwinfo *hw = rvu->hw; 1766 u64 cir_reg = 0, pir_reg = 0; 1767 u64 cfg; 1768 1769 switch (lvl) { 1770 case NIX_TXSCH_LVL_TL1: 1771 cir_reg = NIX_AF_TL1X_CIR(schq); 1772 pir_reg = 0; /* PIR not available at TL1 */ 1773 break; 1774 case NIX_TXSCH_LVL_TL2: 1775 cir_reg = NIX_AF_TL2X_CIR(schq); 1776 pir_reg = NIX_AF_TL2X_PIR(schq); 1777 break; 1778 case NIX_TXSCH_LVL_TL3: 1779 cir_reg = NIX_AF_TL3X_CIR(schq); 1780 pir_reg = NIX_AF_TL3X_PIR(schq); 1781 break; 1782 case NIX_TXSCH_LVL_TL4: 1783 cir_reg = NIX_AF_TL4X_CIR(schq); 1784 pir_reg = NIX_AF_TL4X_PIR(schq); 1785 break; 1786 case NIX_TXSCH_LVL_MDQ: 1787 cir_reg = NIX_AF_MDQX_CIR(schq); 1788 pir_reg = NIX_AF_MDQX_PIR(schq); 1789 break; 1790 } 1791 1792 /* Shaper state toggle needs wait/poll */ 1793 if (hw->cap.nix_shaper_toggle_wait) { 1794 if (cir_reg) 1795 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1796 lvl, cir_reg, 0); 1797 if (pir_reg) 1798 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1799 lvl, pir_reg, 0); 1800 return; 1801 } 1802 1803 if (!cir_reg) 1804 return; 1805 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1806 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1807 1808 if (!pir_reg) 1809 return; 1810 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1811 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1812 } 1813 1814 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1815 int lvl, int schq) 1816 { 1817 struct rvu_hwinfo *hw = rvu->hw; 1818 int link_level; 1819 int link; 1820 1821 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1822 return; 1823 1824 /* Reset TL4's SDP link config */ 1825 if (lvl == NIX_TXSCH_LVL_TL4) 1826 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1827 1828 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1829 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1830 if (lvl != link_level) 1831 return; 1832 1833 /* Reset TL2's CGX or LBK link config */ 1834 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1835 rvu_write64(rvu, blkaddr, 1836 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1837 } 1838 1839 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, 1840 int lvl, int schq) 1841 { 1842 struct rvu_hwinfo *hw = rvu->hw; 1843 u64 reg; 1844 1845 /* Skip this if shaping is not supported */ 1846 if (!hw->cap.nix_shaping) 1847 return; 1848 1849 /* Clear level specific SW_XOFF */ 1850 switch (lvl) { 1851 case NIX_TXSCH_LVL_TL1: 1852 reg = NIX_AF_TL1X_SW_XOFF(schq); 1853 break; 1854 case NIX_TXSCH_LVL_TL2: 1855 reg = NIX_AF_TL2X_SW_XOFF(schq); 1856 break; 1857 case NIX_TXSCH_LVL_TL3: 1858 reg = NIX_AF_TL3X_SW_XOFF(schq); 1859 break; 1860 case NIX_TXSCH_LVL_TL4: 1861 reg = NIX_AF_TL4X_SW_XOFF(schq); 1862 break; 1863 case NIX_TXSCH_LVL_MDQ: 1864 reg = NIX_AF_MDQX_SW_XOFF(schq); 1865 break; 1866 default: 1867 return; 1868 } 1869 1870 rvu_write64(rvu, blkaddr, reg, 0x0); 1871 } 1872 1873 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1874 { 1875 struct rvu_hwinfo *hw = rvu->hw; 1876 int pf = rvu_get_pf(pcifunc); 1877 u8 cgx_id = 0, lmac_id = 0; 1878 1879 if (is_afvf(pcifunc)) {/* LBK links */ 1880 return hw->cgx_links; 1881 } else if (is_pf_cgxmapped(rvu, pf)) { 1882 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1883 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1884 } 1885 1886 /* SDP link */ 1887 return hw->cgx_links + hw->lbk_links; 1888 } 1889 1890 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 1891 int link, int *start, int *end) 1892 { 1893 struct rvu_hwinfo *hw = rvu->hw; 1894 int pf = rvu_get_pf(pcifunc); 1895 1896 if (is_afvf(pcifunc)) { /* LBK links */ 1897 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1898 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 1899 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 1900 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1901 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 1902 } else { /* SDP link */ 1903 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 1904 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 1905 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 1906 } 1907 } 1908 1909 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 1910 struct nix_hw *nix_hw, 1911 struct nix_txsch_alloc_req *req) 1912 { 1913 struct rvu_hwinfo *hw = rvu->hw; 1914 int schq, req_schq, free_cnt; 1915 struct nix_txsch *txsch; 1916 int link, start, end; 1917 1918 txsch = &nix_hw->txsch[lvl]; 1919 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 1920 1921 if (!req_schq) 1922 return 0; 1923 1924 link = nix_get_tx_link(rvu, pcifunc); 1925 1926 /* For traffic aggregating scheduler level, one queue is enough */ 1927 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1928 if (req_schq != 1) 1929 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1930 return 0; 1931 } 1932 1933 /* Get free SCHQ count and check if request can be accomodated */ 1934 if (hw->cap.nix_fixed_txschq_mapping) { 1935 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1936 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 1937 if (end <= txsch->schq.max && schq < end && 1938 !test_bit(schq, txsch->schq.bmap)) 1939 free_cnt = 1; 1940 else 1941 free_cnt = 0; 1942 } else { 1943 free_cnt = rvu_rsrc_free_count(&txsch->schq); 1944 } 1945 1946 if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || 1947 req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) 1948 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1949 1950 /* If contiguous queues are needed, check for availability */ 1951 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 1952 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 1953 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1954 1955 return 0; 1956 } 1957 1958 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 1959 struct nix_txsch_alloc_rsp *rsp, 1960 int lvl, int start, int end) 1961 { 1962 struct rvu_hwinfo *hw = rvu->hw; 1963 u16 pcifunc = rsp->hdr.pcifunc; 1964 int idx, schq; 1965 1966 /* For traffic aggregating levels, queue alloc is based 1967 * on transmit link to which PF_FUNC is mapped to. 1968 */ 1969 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1970 /* A single TL queue is allocated */ 1971 if (rsp->schq_contig[lvl]) { 1972 rsp->schq_contig[lvl] = 1; 1973 rsp->schq_contig_list[lvl][0] = start; 1974 } 1975 1976 /* Both contig and non-contig reqs doesn't make sense here */ 1977 if (rsp->schq_contig[lvl]) 1978 rsp->schq[lvl] = 0; 1979 1980 if (rsp->schq[lvl]) { 1981 rsp->schq[lvl] = 1; 1982 rsp->schq_list[lvl][0] = start; 1983 } 1984 return; 1985 } 1986 1987 /* Adjust the queue request count if HW supports 1988 * only one queue per level configuration. 1989 */ 1990 if (hw->cap.nix_fixed_txschq_mapping) { 1991 idx = pcifunc & RVU_PFVF_FUNC_MASK; 1992 schq = start + idx; 1993 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 1994 rsp->schq_contig[lvl] = 0; 1995 rsp->schq[lvl] = 0; 1996 return; 1997 } 1998 1999 if (rsp->schq_contig[lvl]) { 2000 rsp->schq_contig[lvl] = 1; 2001 set_bit(schq, txsch->schq.bmap); 2002 rsp->schq_contig_list[lvl][0] = schq; 2003 rsp->schq[lvl] = 0; 2004 } else if (rsp->schq[lvl]) { 2005 rsp->schq[lvl] = 1; 2006 set_bit(schq, txsch->schq.bmap); 2007 rsp->schq_list[lvl][0] = schq; 2008 } 2009 return; 2010 } 2011 2012 /* Allocate contiguous queue indices requesty first */ 2013 if (rsp->schq_contig[lvl]) { 2014 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 2015 txsch->schq.max, start, 2016 rsp->schq_contig[lvl], 0); 2017 if (schq >= end) 2018 rsp->schq_contig[lvl] = 0; 2019 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 2020 set_bit(schq, txsch->schq.bmap); 2021 rsp->schq_contig_list[lvl][idx] = schq; 2022 schq++; 2023 } 2024 } 2025 2026 /* Allocate non-contiguous queue indices */ 2027 if (rsp->schq[lvl]) { 2028 idx = 0; 2029 for (schq = start; schq < end; schq++) { 2030 if (!test_bit(schq, txsch->schq.bmap)) { 2031 set_bit(schq, txsch->schq.bmap); 2032 rsp->schq_list[lvl][idx++] = schq; 2033 } 2034 if (idx == rsp->schq[lvl]) 2035 break; 2036 } 2037 /* Update how many were allocated */ 2038 rsp->schq[lvl] = idx; 2039 } 2040 } 2041 2042 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 2043 struct nix_txsch_alloc_req *req, 2044 struct nix_txsch_alloc_rsp *rsp) 2045 { 2046 struct rvu_hwinfo *hw = rvu->hw; 2047 u16 pcifunc = req->hdr.pcifunc; 2048 int link, blkaddr, rc = 0; 2049 int lvl, idx, start, end; 2050 struct nix_txsch *txsch; 2051 struct nix_hw *nix_hw; 2052 u32 *pfvf_map; 2053 int nixlf; 2054 u16 schq; 2055 2056 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2057 if (rc) 2058 return rc; 2059 2060 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2061 if (!nix_hw) 2062 return NIX_AF_ERR_INVALID_NIXBLK; 2063 2064 mutex_lock(&rvu->rsrc_lock); 2065 2066 /* Check if request is valid as per HW capabilities 2067 * and can be accomodated. 2068 */ 2069 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2070 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 2071 if (rc) 2072 goto err; 2073 } 2074 2075 /* Allocate requested Tx scheduler queues */ 2076 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2077 txsch = &nix_hw->txsch[lvl]; 2078 pfvf_map = txsch->pfvf_map; 2079 2080 if (!req->schq[lvl] && !req->schq_contig[lvl]) 2081 continue; 2082 2083 rsp->schq[lvl] = req->schq[lvl]; 2084 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 2085 2086 link = nix_get_tx_link(rvu, pcifunc); 2087 2088 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 2089 start = link; 2090 end = link; 2091 } else if (hw->cap.nix_fixed_txschq_mapping) { 2092 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 2093 } else { 2094 start = 0; 2095 end = txsch->schq.max; 2096 } 2097 2098 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 2099 2100 /* Reset queue config */ 2101 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 2102 schq = rsp->schq_contig_list[lvl][idx]; 2103 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2104 NIX_TXSCHQ_CFG_DONE)) 2105 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2106 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2107 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2108 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2109 } 2110 2111 for (idx = 0; idx < req->schq[lvl]; idx++) { 2112 schq = rsp->schq_list[lvl][idx]; 2113 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 2114 NIX_TXSCHQ_CFG_DONE)) 2115 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 2116 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2117 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2118 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2119 } 2120 } 2121 2122 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 2123 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 2124 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 2125 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 2126 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 2127 goto exit; 2128 err: 2129 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 2130 exit: 2131 mutex_unlock(&rvu->rsrc_lock); 2132 return rc; 2133 } 2134 2135 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, 2136 struct nix_smq_flush_ctx *smq_flush_ctx) 2137 { 2138 struct nix_smq_tree_ctx *smq_tree_ctx; 2139 u64 parent_off, regval; 2140 u16 schq; 2141 int lvl; 2142 2143 smq_flush_ctx->smq = smq; 2144 2145 schq = smq; 2146 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2147 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2148 if (lvl == NIX_TXSCH_LVL_TL1) { 2149 smq_flush_ctx->tl1_schq = schq; 2150 smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); 2151 smq_tree_ctx->pir_off = 0; 2152 smq_tree_ctx->pir_val = 0; 2153 parent_off = 0; 2154 } else if (lvl == NIX_TXSCH_LVL_TL2) { 2155 smq_flush_ctx->tl2_schq = schq; 2156 smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); 2157 smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); 2158 parent_off = NIX_AF_TL2X_PARENT(schq); 2159 } else if (lvl == NIX_TXSCH_LVL_TL3) { 2160 smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq); 2161 smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq); 2162 parent_off = NIX_AF_TL3X_PARENT(schq); 2163 } else if (lvl == NIX_TXSCH_LVL_TL4) { 2164 smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq); 2165 smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq); 2166 parent_off = NIX_AF_TL4X_PARENT(schq); 2167 } else if (lvl == NIX_TXSCH_LVL_MDQ) { 2168 smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq); 2169 smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq); 2170 parent_off = NIX_AF_MDQX_PARENT(schq); 2171 } 2172 /* save cir/pir register values */ 2173 smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off); 2174 if (smq_tree_ctx->pir_off) 2175 smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off); 2176 2177 /* get parent txsch node */ 2178 if (parent_off) { 2179 regval = rvu_read64(rvu, blkaddr, parent_off); 2180 schq = (regval >> 16) & 0x1FF; 2181 } 2182 } 2183 } 2184 2185 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, 2186 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2187 { 2188 struct nix_txsch *txsch; 2189 struct nix_hw *nix_hw; 2190 u64 regoff; 2191 int tl2; 2192 2193 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2194 if (!nix_hw) 2195 return; 2196 2197 /* loop through all TL2s with matching PF_FUNC */ 2198 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; 2199 for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { 2200 /* skip the smq(flush) TL2 */ 2201 if (tl2 == smq_flush_ctx->tl2_schq) 2202 continue; 2203 /* skip unused TL2s */ 2204 if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) 2205 continue; 2206 /* skip if PF_FUNC doesn't match */ 2207 if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != 2208 (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] & 2209 ~RVU_PFVF_FUNC_MASK))) 2210 continue; 2211 /* enable/disable XOFF */ 2212 regoff = NIX_AF_TL2X_SW_XOFF(tl2); 2213 if (enable) 2214 rvu_write64(rvu, blkaddr, regoff, 0x1); 2215 else 2216 rvu_write64(rvu, blkaddr, regoff, 0x0); 2217 } 2218 } 2219 2220 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr, 2221 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) 2222 { 2223 u64 cir_off, pir_off, cir_val, pir_val; 2224 struct nix_smq_tree_ctx *smq_tree_ctx; 2225 int lvl; 2226 2227 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { 2228 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; 2229 cir_off = smq_tree_ctx->cir_off; 2230 cir_val = smq_tree_ctx->cir_val; 2231 pir_off = smq_tree_ctx->pir_off; 2232 pir_val = smq_tree_ctx->pir_val; 2233 2234 if (enable) { 2235 rvu_write64(rvu, blkaddr, cir_off, cir_val); 2236 if (lvl != NIX_TXSCH_LVL_TL1) 2237 rvu_write64(rvu, blkaddr, pir_off, pir_val); 2238 } else { 2239 rvu_write64(rvu, blkaddr, cir_off, 0x0); 2240 if (lvl != NIX_TXSCH_LVL_TL1) 2241 rvu_write64(rvu, blkaddr, pir_off, 0x0); 2242 } 2243 } 2244 } 2245 2246 static int nix_smq_flush(struct rvu *rvu, int blkaddr, 2247 int smq, u16 pcifunc, int nixlf) 2248 { 2249 struct nix_smq_flush_ctx *smq_flush_ctx; 2250 int pf = rvu_get_pf(pcifunc); 2251 u8 cgx_id = 0, lmac_id = 0; 2252 int err, restore_tx_en = 0; 2253 u64 cfg; 2254 2255 if (!is_rvu_otx2(rvu)) { 2256 /* Skip SMQ flush if pkt count is zero */ 2257 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq)); 2258 if (!cfg) 2259 return 0; 2260 } 2261 2262 /* enable cgx tx if disabled */ 2263 if (is_pf_cgxmapped(rvu, pf)) { 2264 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 2265 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), 2266 lmac_id, true); 2267 } 2268 2269 /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */ 2270 smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL); 2271 if (!smq_flush_ctx) 2272 return -ENOMEM; 2273 nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx); 2274 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); 2275 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); 2276 2277 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 2278 /* Do SMQ flush and set enqueue xoff */ 2279 cfg |= BIT_ULL(50) | BIT_ULL(49); 2280 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 2281 2282 /* Disable backpressure from physical link, 2283 * otherwise SMQ flush may stall. 2284 */ 2285 rvu_cgx_enadis_rx_bp(rvu, pf, false); 2286 2287 /* Wait for flush to complete */ 2288 err = rvu_poll_reg(rvu, blkaddr, 2289 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 2290 if (err) 2291 dev_info(rvu->dev, 2292 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n", 2293 nixlf, smq); 2294 2295 /* clear XOFF on TL2s */ 2296 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); 2297 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); 2298 kfree(smq_flush_ctx); 2299 2300 rvu_cgx_enadis_rx_bp(rvu, pf, true); 2301 /* restore cgx tx state */ 2302 if (restore_tx_en) 2303 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 2304 return err; 2305 } 2306 2307 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 2308 { 2309 int blkaddr, nixlf, lvl, schq, err; 2310 struct rvu_hwinfo *hw = rvu->hw; 2311 struct nix_txsch *txsch; 2312 struct nix_hw *nix_hw; 2313 u16 map_func; 2314 2315 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2316 if (blkaddr < 0) 2317 return NIX_AF_ERR_AF_LF_INVALID; 2318 2319 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2320 if (!nix_hw) 2321 return NIX_AF_ERR_INVALID_NIXBLK; 2322 2323 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2324 if (nixlf < 0) 2325 return NIX_AF_ERR_AF_LF_INVALID; 2326 2327 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ 2328 mutex_lock(&rvu->rsrc_lock); 2329 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2330 txsch = &nix_hw->txsch[lvl]; 2331 2332 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2333 continue; 2334 2335 for (schq = 0; schq < txsch->schq.max; schq++) { 2336 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2337 continue; 2338 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2339 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2340 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2341 } 2342 } 2343 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, 2344 nix_get_tx_link(rvu, pcifunc)); 2345 2346 /* On PF cleanup, clear cfg done flag as 2347 * PF would have changed default config. 2348 */ 2349 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 2350 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; 2351 schq = nix_get_tx_link(rvu, pcifunc); 2352 /* Do not clear pcifunc in txsch->pfvf_map[schq] because 2353 * VF might be using this TL1 queue 2354 */ 2355 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 2356 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); 2357 } 2358 2359 /* Flush SMQs */ 2360 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 2361 for (schq = 0; schq < txsch->schq.max; schq++) { 2362 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2363 continue; 2364 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2365 } 2366 2367 /* Now free scheduler queues to free pool */ 2368 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2369 /* TLs above aggregation level are shared across all PF 2370 * and it's VFs, hence skip freeing them. 2371 */ 2372 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2373 continue; 2374 2375 txsch = &nix_hw->txsch[lvl]; 2376 for (schq = 0; schq < txsch->schq.max; schq++) { 2377 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2378 continue; 2379 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2380 rvu_free_rsrc(&txsch->schq, schq); 2381 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2382 } 2383 } 2384 mutex_unlock(&rvu->rsrc_lock); 2385 2386 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ 2387 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); 2388 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); 2389 if (err) 2390 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 2391 2392 return 0; 2393 } 2394 2395 static int nix_txschq_free_one(struct rvu *rvu, 2396 struct nix_txsch_free_req *req) 2397 { 2398 struct rvu_hwinfo *hw = rvu->hw; 2399 u16 pcifunc = req->hdr.pcifunc; 2400 int lvl, schq, nixlf, blkaddr; 2401 struct nix_txsch *txsch; 2402 struct nix_hw *nix_hw; 2403 u32 *pfvf_map; 2404 int rc; 2405 2406 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2407 if (blkaddr < 0) 2408 return NIX_AF_ERR_AF_LF_INVALID; 2409 2410 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2411 if (!nix_hw) 2412 return NIX_AF_ERR_INVALID_NIXBLK; 2413 2414 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2415 if (nixlf < 0) 2416 return NIX_AF_ERR_AF_LF_INVALID; 2417 2418 lvl = req->schq_lvl; 2419 schq = req->schq; 2420 txsch = &nix_hw->txsch[lvl]; 2421 2422 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 2423 return 0; 2424 2425 pfvf_map = txsch->pfvf_map; 2426 mutex_lock(&rvu->rsrc_lock); 2427 2428 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 2429 rc = NIX_AF_ERR_TLX_INVALID; 2430 goto err; 2431 } 2432 2433 /* Clear SW_XOFF of this resource only. 2434 * For SMQ level, all path XOFF's 2435 * need to be made clear by user 2436 */ 2437 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2438 2439 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2440 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 2441 2442 /* Flush if it is a SMQ. Onus of disabling 2443 * TL2/3 queue links before SMQ flush is on user 2444 */ 2445 if (lvl == NIX_TXSCH_LVL_SMQ && 2446 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { 2447 rc = NIX_AF_SMQ_FLUSH_FAILED; 2448 goto err; 2449 } 2450 2451 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); 2452 2453 /* Free the resource */ 2454 rvu_free_rsrc(&txsch->schq, schq); 2455 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2456 mutex_unlock(&rvu->rsrc_lock); 2457 return 0; 2458 err: 2459 mutex_unlock(&rvu->rsrc_lock); 2460 return rc; 2461 } 2462 2463 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 2464 struct nix_txsch_free_req *req, 2465 struct msg_rsp *rsp) 2466 { 2467 if (req->flags & TXSCHQ_FREE_ALL) 2468 return nix_txschq_free(rvu, req->hdr.pcifunc); 2469 else 2470 return nix_txschq_free_one(rvu, req); 2471 } 2472 2473 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 2474 int lvl, u64 reg, u64 regval) 2475 { 2476 u64 regbase = reg & 0xFFFF; 2477 u16 schq, parent; 2478 2479 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 2480 return false; 2481 2482 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2483 /* Check if this schq belongs to this PF/VF or not */ 2484 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 2485 return false; 2486 2487 parent = (regval >> 16) & 0x1FF; 2488 /* Validate MDQ's TL4 parent */ 2489 if (regbase == NIX_AF_MDQX_PARENT(0) && 2490 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 2491 return false; 2492 2493 /* Validate TL4's TL3 parent */ 2494 if (regbase == NIX_AF_TL4X_PARENT(0) && 2495 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 2496 return false; 2497 2498 /* Validate TL3's TL2 parent */ 2499 if (regbase == NIX_AF_TL3X_PARENT(0) && 2500 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 2501 return false; 2502 2503 /* Validate TL2's TL1 parent */ 2504 if (regbase == NIX_AF_TL2X_PARENT(0) && 2505 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 2506 return false; 2507 2508 return true; 2509 } 2510 2511 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 2512 { 2513 u64 regbase; 2514 2515 if (hw->cap.nix_shaping) 2516 return true; 2517 2518 /* If shaping and coloring is not supported, then 2519 * *_CIR and *_PIR registers should not be configured. 2520 */ 2521 regbase = reg & 0xFFFF; 2522 2523 switch (lvl) { 2524 case NIX_TXSCH_LVL_TL1: 2525 if (regbase == NIX_AF_TL1X_CIR(0)) 2526 return false; 2527 break; 2528 case NIX_TXSCH_LVL_TL2: 2529 if (regbase == NIX_AF_TL2X_CIR(0) || 2530 regbase == NIX_AF_TL2X_PIR(0)) 2531 return false; 2532 break; 2533 case NIX_TXSCH_LVL_TL3: 2534 if (regbase == NIX_AF_TL3X_CIR(0) || 2535 regbase == NIX_AF_TL3X_PIR(0)) 2536 return false; 2537 break; 2538 case NIX_TXSCH_LVL_TL4: 2539 if (regbase == NIX_AF_TL4X_CIR(0) || 2540 regbase == NIX_AF_TL4X_PIR(0)) 2541 return false; 2542 break; 2543 case NIX_TXSCH_LVL_MDQ: 2544 if (regbase == NIX_AF_MDQX_CIR(0) || 2545 regbase == NIX_AF_MDQX_PIR(0)) 2546 return false; 2547 break; 2548 } 2549 return true; 2550 } 2551 2552 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 2553 u16 pcifunc, int blkaddr) 2554 { 2555 u32 *pfvf_map; 2556 int schq; 2557 2558 schq = nix_get_tx_link(rvu, pcifunc); 2559 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 2560 /* Skip if PF has already done the config */ 2561 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 2562 return; 2563 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 2564 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 2565 2566 /* On OcteonTx2 the config was in bytes and newer silcons 2567 * it's changed to weight. 2568 */ 2569 if (!rvu->hw->cap.nix_common_dwrr_mtu) 2570 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2571 TXSCH_TL1_DFLT_RR_QTM); 2572 else 2573 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2574 CN10K_MAX_DWRR_WEIGHT); 2575 2576 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 2577 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 2578 } 2579 2580 /* Register offset - [15:0] 2581 * Scheduler Queue number - [25:16] 2582 */ 2583 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) 2584 2585 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, 2586 int blkaddr, struct nix_txschq_config *req, 2587 struct nix_txschq_config *rsp) 2588 { 2589 u16 pcifunc = req->hdr.pcifunc; 2590 int idx, schq; 2591 u64 reg; 2592 2593 for (idx = 0; idx < req->num_regs; idx++) { 2594 reg = req->reg[idx]; 2595 reg &= NIX_TX_SCHQ_MASK; 2596 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2597 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || 2598 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) 2599 return NIX_AF_INVAL_TXSCHQ_CFG; 2600 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); 2601 } 2602 rsp->lvl = req->lvl; 2603 rsp->num_regs = req->num_regs; 2604 return 0; 2605 } 2606 2607 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, 2608 struct nix_txsch *txsch, bool enable) 2609 { 2610 struct rvu_hwinfo *hw = rvu->hw; 2611 int lbk_link_start, lbk_links; 2612 u8 pf = rvu_get_pf(pcifunc); 2613 int schq; 2614 u64 cfg; 2615 2616 if (!is_pf_cgxmapped(rvu, pf)) 2617 return; 2618 2619 cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; 2620 lbk_link_start = hw->cgx_links; 2621 2622 for (schq = 0; schq < txsch->schq.max; schq++) { 2623 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2624 continue; 2625 /* Enable all LBK links with channel 63 by default so that 2626 * packets can be sent to LBK with a NPC TX MCAM rule 2627 */ 2628 lbk_links = hw->lbk_links; 2629 while (lbk_links--) 2630 rvu_write64(rvu, blkaddr, 2631 NIX_AF_TL3_TL2X_LINKX_CFG(schq, 2632 lbk_link_start + 2633 lbk_links), cfg); 2634 } 2635 } 2636 2637 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 2638 struct nix_txschq_config *req, 2639 struct nix_txschq_config *rsp) 2640 { 2641 u64 reg, val, regval, schq_regbase, val_mask; 2642 struct rvu_hwinfo *hw = rvu->hw; 2643 u16 pcifunc = req->hdr.pcifunc; 2644 struct nix_txsch *txsch; 2645 struct nix_hw *nix_hw; 2646 int blkaddr, idx, err; 2647 int nixlf, schq; 2648 u32 *pfvf_map; 2649 2650 if (req->lvl >= NIX_TXSCH_LVL_CNT || 2651 req->num_regs > MAX_REGS_PER_MBOX_MSG) 2652 return NIX_AF_INVAL_TXSCHQ_CFG; 2653 2654 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2655 if (err) 2656 return err; 2657 2658 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2659 if (!nix_hw) 2660 return NIX_AF_ERR_INVALID_NIXBLK; 2661 2662 if (req->read) 2663 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); 2664 2665 txsch = &nix_hw->txsch[req->lvl]; 2666 pfvf_map = txsch->pfvf_map; 2667 2668 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 2669 pcifunc & RVU_PFVF_FUNC_MASK) { 2670 mutex_lock(&rvu->rsrc_lock); 2671 if (req->lvl == NIX_TXSCH_LVL_TL1) 2672 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 2673 mutex_unlock(&rvu->rsrc_lock); 2674 return 0; 2675 } 2676 2677 for (idx = 0; idx < req->num_regs; idx++) { 2678 reg = req->reg[idx]; 2679 reg &= NIX_TX_SCHQ_MASK; 2680 regval = req->regval[idx]; 2681 schq_regbase = reg & 0xFFFF; 2682 val_mask = req->regval_mask[idx]; 2683 2684 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 2685 txsch->lvl, reg, regval)) 2686 return NIX_AF_INVAL_TXSCHQ_CFG; 2687 2688 /* Check if shaping and coloring is supported */ 2689 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 2690 continue; 2691 2692 val = rvu_read64(rvu, blkaddr, reg); 2693 regval = (val & val_mask) | (regval & ~val_mask); 2694 2695 /* Handle shaping state toggle specially */ 2696 if (hw->cap.nix_shaper_toggle_wait && 2697 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 2698 req->lvl, reg, regval)) 2699 continue; 2700 2701 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 2702 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 2703 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 2704 pcifunc, 0); 2705 regval &= ~(0x7FULL << 24); 2706 regval |= ((u64)nixlf << 24); 2707 } 2708 2709 /* Clear 'BP_ENA' config, if it's not allowed */ 2710 if (!hw->cap.nix_tx_link_bp) { 2711 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 2712 (schq_regbase & 0xFF00) == 2713 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 2714 regval &= ~BIT_ULL(13); 2715 } 2716 2717 /* Mark config as done for TL1 by PF */ 2718 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 2719 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 2720 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2721 mutex_lock(&rvu->rsrc_lock); 2722 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 2723 NIX_TXSCHQ_CFG_DONE); 2724 mutex_unlock(&rvu->rsrc_lock); 2725 } 2726 2727 /* SMQ flush is special hence split register writes such 2728 * that flush first and write rest of the bits later. 2729 */ 2730 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 2731 (regval & BIT_ULL(49))) { 2732 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2733 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2734 regval &= ~BIT_ULL(49); 2735 } 2736 rvu_write64(rvu, blkaddr, reg, regval); 2737 } 2738 2739 return 0; 2740 } 2741 2742 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 2743 struct nix_vtag_config *req) 2744 { 2745 u64 regval = req->vtag_size; 2746 2747 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2748 req->vtag_size > VTAGSIZE_T8) 2749 return -EINVAL; 2750 2751 /* RX VTAG Type 7 reserved for vf vlan */ 2752 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2753 return NIX_AF_ERR_RX_VTAG_INUSE; 2754 2755 if (req->rx.capture_vtag) 2756 regval |= BIT_ULL(5); 2757 if (req->rx.strip_vtag) 2758 regval |= BIT_ULL(4); 2759 2760 rvu_write64(rvu, blkaddr, 2761 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2762 return 0; 2763 } 2764 2765 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2766 u16 pcifunc, int index) 2767 { 2768 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2769 struct nix_txvlan *vlan; 2770 2771 if (!nix_hw) 2772 return NIX_AF_ERR_INVALID_NIXBLK; 2773 2774 vlan = &nix_hw->txvlan; 2775 if (vlan->entry2pfvf_map[index] != pcifunc) 2776 return NIX_AF_ERR_PARAM; 2777 2778 rvu_write64(rvu, blkaddr, 2779 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2780 rvu_write64(rvu, blkaddr, 2781 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2782 2783 vlan->entry2pfvf_map[index] = 0; 2784 rvu_free_rsrc(&vlan->rsrc, index); 2785 2786 return 0; 2787 } 2788 2789 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2790 { 2791 struct nix_txvlan *vlan; 2792 struct nix_hw *nix_hw; 2793 int index, blkaddr; 2794 2795 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2796 if (blkaddr < 0) 2797 return; 2798 2799 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2800 if (!nix_hw) 2801 return; 2802 2803 vlan = &nix_hw->txvlan; 2804 2805 mutex_lock(&vlan->rsrc_lock); 2806 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2807 for (index = 0; index < vlan->rsrc.max; index++) { 2808 if (vlan->entry2pfvf_map[index] == pcifunc) 2809 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2810 } 2811 mutex_unlock(&vlan->rsrc_lock); 2812 } 2813 2814 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2815 u64 vtag, u8 size) 2816 { 2817 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2818 struct nix_txvlan *vlan; 2819 u64 regval; 2820 int index; 2821 2822 if (!nix_hw) 2823 return NIX_AF_ERR_INVALID_NIXBLK; 2824 2825 vlan = &nix_hw->txvlan; 2826 2827 mutex_lock(&vlan->rsrc_lock); 2828 2829 index = rvu_alloc_rsrc(&vlan->rsrc); 2830 if (index < 0) { 2831 mutex_unlock(&vlan->rsrc_lock); 2832 return index; 2833 } 2834 2835 mutex_unlock(&vlan->rsrc_lock); 2836 2837 regval = size ? vtag : vtag << 32; 2838 2839 rvu_write64(rvu, blkaddr, 2840 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 2841 rvu_write64(rvu, blkaddr, 2842 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 2843 2844 return index; 2845 } 2846 2847 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 2848 struct nix_vtag_config *req) 2849 { 2850 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2851 u16 pcifunc = req->hdr.pcifunc; 2852 int idx0 = req->tx.vtag0_idx; 2853 int idx1 = req->tx.vtag1_idx; 2854 struct nix_txvlan *vlan; 2855 int err = 0; 2856 2857 if (!nix_hw) 2858 return NIX_AF_ERR_INVALID_NIXBLK; 2859 2860 vlan = &nix_hw->txvlan; 2861 if (req->tx.free_vtag0 && req->tx.free_vtag1) 2862 if (vlan->entry2pfvf_map[idx0] != pcifunc || 2863 vlan->entry2pfvf_map[idx1] != pcifunc) 2864 return NIX_AF_ERR_PARAM; 2865 2866 mutex_lock(&vlan->rsrc_lock); 2867 2868 if (req->tx.free_vtag0) { 2869 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 2870 if (err) 2871 goto exit; 2872 } 2873 2874 if (req->tx.free_vtag1) 2875 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 2876 2877 exit: 2878 mutex_unlock(&vlan->rsrc_lock); 2879 return err; 2880 } 2881 2882 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 2883 struct nix_vtag_config *req, 2884 struct nix_vtag_config_rsp *rsp) 2885 { 2886 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2887 struct nix_txvlan *vlan; 2888 u16 pcifunc = req->hdr.pcifunc; 2889 2890 if (!nix_hw) 2891 return NIX_AF_ERR_INVALID_NIXBLK; 2892 2893 vlan = &nix_hw->txvlan; 2894 if (req->tx.cfg_vtag0) { 2895 rsp->vtag0_idx = 2896 nix_tx_vtag_alloc(rvu, blkaddr, 2897 req->tx.vtag0, req->vtag_size); 2898 2899 if (rsp->vtag0_idx < 0) 2900 return NIX_AF_ERR_TX_VTAG_NOSPC; 2901 2902 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 2903 } 2904 2905 if (req->tx.cfg_vtag1) { 2906 rsp->vtag1_idx = 2907 nix_tx_vtag_alloc(rvu, blkaddr, 2908 req->tx.vtag1, req->vtag_size); 2909 2910 if (rsp->vtag1_idx < 0) 2911 goto err_free; 2912 2913 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 2914 } 2915 2916 return 0; 2917 2918 err_free: 2919 if (req->tx.cfg_vtag0) 2920 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 2921 2922 return NIX_AF_ERR_TX_VTAG_NOSPC; 2923 } 2924 2925 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 2926 struct nix_vtag_config *req, 2927 struct nix_vtag_config_rsp *rsp) 2928 { 2929 u16 pcifunc = req->hdr.pcifunc; 2930 int blkaddr, nixlf, err; 2931 2932 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2933 if (err) 2934 return err; 2935 2936 if (req->cfg_type) { 2937 /* rx vtag configuration */ 2938 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 2939 if (err) 2940 return NIX_AF_ERR_PARAM; 2941 } else { 2942 /* tx vtag configuration */ 2943 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 2944 (req->tx.free_vtag0 || req->tx.free_vtag1)) 2945 return NIX_AF_ERR_PARAM; 2946 2947 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 2948 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 2949 2950 if (req->tx.free_vtag0 || req->tx.free_vtag1) 2951 return nix_tx_vtag_decfg(rvu, blkaddr, req); 2952 } 2953 2954 return 0; 2955 } 2956 2957 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 2958 int mce, u8 op, u16 pcifunc, int next, bool eol) 2959 { 2960 struct nix_aq_enq_req aq_req; 2961 int err; 2962 2963 aq_req.hdr.pcifunc = 0; 2964 aq_req.ctype = NIX_AQ_CTYPE_MCE; 2965 aq_req.op = op; 2966 aq_req.qidx = mce; 2967 2968 /* Use RSS with RSS index 0 */ 2969 aq_req.mce.op = 1; 2970 aq_req.mce.index = 0; 2971 aq_req.mce.eol = eol; 2972 aq_req.mce.pf_func = pcifunc; 2973 aq_req.mce.next = next; 2974 2975 /* All fields valid */ 2976 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 2977 2978 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 2979 if (err) { 2980 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 2981 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 2982 return err; 2983 } 2984 return 0; 2985 } 2986 2987 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, 2988 u16 pcifunc, bool add) 2989 { 2990 struct mce *mce, *tail = NULL; 2991 bool delete = false; 2992 2993 /* Scan through the current list */ 2994 hlist_for_each_entry(mce, &mce_list->head, node) { 2995 /* If already exists, then delete */ 2996 if (mce->pcifunc == pcifunc && !add) { 2997 delete = true; 2998 break; 2999 } else if (mce->pcifunc == pcifunc && add) { 3000 /* entry already exists */ 3001 return 0; 3002 } 3003 tail = mce; 3004 } 3005 3006 if (delete) { 3007 hlist_del(&mce->node); 3008 kfree(mce); 3009 mce_list->count--; 3010 return 0; 3011 } 3012 3013 if (!add) 3014 return 0; 3015 3016 /* Add a new one to the list, at the tail */ 3017 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 3018 if (!mce) 3019 return -ENOMEM; 3020 mce->pcifunc = pcifunc; 3021 if (!tail) 3022 hlist_add_head(&mce->node, &mce_list->head); 3023 else 3024 hlist_add_behind(&mce->node, &tail->node); 3025 mce_list->count++; 3026 return 0; 3027 } 3028 3029 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, 3030 struct nix_mce_list *mce_list, 3031 int mce_idx, int mcam_index, bool add) 3032 { 3033 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; 3034 struct npc_mcam *mcam = &rvu->hw->mcam; 3035 struct nix_mcast *mcast; 3036 struct nix_hw *nix_hw; 3037 struct mce *mce; 3038 3039 if (!mce_list) 3040 return -EINVAL; 3041 3042 /* Get this PF/VF func's MCE index */ 3043 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 3044 3045 if (idx > (mce_idx + mce_list->max)) { 3046 dev_err(rvu->dev, 3047 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 3048 __func__, idx, mce_list->max, 3049 pcifunc >> RVU_PFVF_PF_SHIFT); 3050 return -EINVAL; 3051 } 3052 3053 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 3054 if (err) 3055 return err; 3056 3057 mcast = &nix_hw->mcast; 3058 mutex_lock(&mcast->mce_lock); 3059 3060 err = nix_update_mce_list_entry(mce_list, pcifunc, add); 3061 if (err) 3062 goto end; 3063 3064 /* Disable MCAM entry in NPC */ 3065 if (!mce_list->count) { 3066 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 3067 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); 3068 goto end; 3069 } 3070 3071 /* Dump the updated list to HW */ 3072 idx = mce_idx; 3073 last_idx = idx + mce_list->count - 1; 3074 hlist_for_each_entry(mce, &mce_list->head, node) { 3075 if (idx > last_idx) 3076 break; 3077 3078 next_idx = idx + 1; 3079 /* EOL should be set in last MCE */ 3080 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 3081 mce->pcifunc, next_idx, 3082 (next_idx > last_idx) ? true : false); 3083 if (err) 3084 goto end; 3085 idx++; 3086 } 3087 3088 end: 3089 mutex_unlock(&mcast->mce_lock); 3090 return err; 3091 } 3092 3093 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, 3094 struct nix_mce_list **mce_list, int *mce_idx) 3095 { 3096 struct rvu_hwinfo *hw = rvu->hw; 3097 struct rvu_pfvf *pfvf; 3098 3099 if (!hw->cap.nix_rx_multicast || 3100 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { 3101 *mce_list = NULL; 3102 *mce_idx = 0; 3103 return; 3104 } 3105 3106 /* Get this PF/VF func's MCE index */ 3107 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 3108 3109 if (type == NIXLF_BCAST_ENTRY) { 3110 *mce_list = &pfvf->bcast_mce_list; 3111 *mce_idx = pfvf->bcast_mce_idx; 3112 } else if (type == NIXLF_ALLMULTI_ENTRY) { 3113 *mce_list = &pfvf->mcast_mce_list; 3114 *mce_idx = pfvf->mcast_mce_idx; 3115 } else if (type == NIXLF_PROMISC_ENTRY) { 3116 *mce_list = &pfvf->promisc_mce_list; 3117 *mce_idx = pfvf->promisc_mce_idx; 3118 } else { 3119 *mce_list = NULL; 3120 *mce_idx = 0; 3121 } 3122 } 3123 3124 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 3125 int type, bool add) 3126 { 3127 int err = 0, nixlf, blkaddr, mcam_index, mce_idx; 3128 struct npc_mcam *mcam = &rvu->hw->mcam; 3129 struct rvu_hwinfo *hw = rvu->hw; 3130 struct nix_mce_list *mce_list; 3131 int pf; 3132 3133 /* skip multicast pkt replication for AF's VFs & SDP links */ 3134 if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc)) 3135 return 0; 3136 3137 if (!hw->cap.nix_rx_multicast) 3138 return 0; 3139 3140 pf = rvu_get_pf(pcifunc); 3141 if (!is_pf_cgxmapped(rvu, pf)) 3142 return 0; 3143 3144 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3145 if (blkaddr < 0) 3146 return -EINVAL; 3147 3148 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 3149 if (nixlf < 0) 3150 return -EINVAL; 3151 3152 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 3153 3154 mcam_index = npc_get_nixlf_mcam_index(mcam, 3155 pcifunc & ~RVU_PFVF_FUNC_MASK, 3156 nixlf, type); 3157 err = nix_update_mce_list(rvu, pcifunc, mce_list, 3158 mce_idx, mcam_index, add); 3159 return err; 3160 } 3161 3162 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) 3163 { 3164 struct nix_mcast *mcast = &nix_hw->mcast; 3165 int err, pf, numvfs, idx; 3166 struct rvu_pfvf *pfvf; 3167 u16 pcifunc; 3168 u64 cfg; 3169 3170 /* Skip PF0 (i.e AF) */ 3171 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 3172 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 3173 /* If PF is not enabled, nothing to do */ 3174 if (!((cfg >> 20) & 0x01)) 3175 continue; 3176 /* Get numVFs attached to this PF */ 3177 numvfs = (cfg >> 12) & 0xFF; 3178 3179 pfvf = &rvu->pf[pf]; 3180 3181 /* This NIX0/1 block mapped to PF ? */ 3182 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 3183 continue; 3184 3185 /* save start idx of broadcast mce list */ 3186 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 3187 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 3188 3189 /* save start idx of multicast mce list */ 3190 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 3191 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); 3192 3193 /* save the start idx of promisc mce list */ 3194 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 3195 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); 3196 3197 for (idx = 0; idx < (numvfs + 1); idx++) { 3198 /* idx-0 is for PF, followed by VFs */ 3199 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 3200 pcifunc |= idx; 3201 /* Add dummy entries now, so that we don't have to check 3202 * for whether AQ_OP should be INIT/WRITE later on. 3203 * Will be updated when a NIXLF is attached/detached to 3204 * these PF/VFs. 3205 */ 3206 err = nix_blk_setup_mce(rvu, nix_hw, 3207 pfvf->bcast_mce_idx + idx, 3208 NIX_AQ_INSTOP_INIT, 3209 pcifunc, 0, true); 3210 if (err) 3211 return err; 3212 3213 /* add dummy entries to multicast mce list */ 3214 err = nix_blk_setup_mce(rvu, nix_hw, 3215 pfvf->mcast_mce_idx + idx, 3216 NIX_AQ_INSTOP_INIT, 3217 pcifunc, 0, true); 3218 if (err) 3219 return err; 3220 3221 /* add dummy entries to promisc mce list */ 3222 err = nix_blk_setup_mce(rvu, nix_hw, 3223 pfvf->promisc_mce_idx + idx, 3224 NIX_AQ_INSTOP_INIT, 3225 pcifunc, 0, true); 3226 if (err) 3227 return err; 3228 } 3229 } 3230 return 0; 3231 } 3232 3233 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3234 { 3235 struct nix_mcast *mcast = &nix_hw->mcast; 3236 struct rvu_hwinfo *hw = rvu->hw; 3237 int err, size; 3238 3239 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 3240 size = (1ULL << size); 3241 3242 /* Alloc memory for multicast/mirror replication entries */ 3243 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 3244 (256UL << MC_TBL_SIZE), size); 3245 if (err) 3246 return -ENOMEM; 3247 3248 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 3249 (u64)mcast->mce_ctx->iova); 3250 3251 /* Set max list length equal to max no of VFs per PF + PF itself */ 3252 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 3253 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 3254 3255 /* Alloc memory for multicast replication buffers */ 3256 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 3257 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 3258 (8UL << MC_BUF_CNT), size); 3259 if (err) 3260 return -ENOMEM; 3261 3262 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 3263 (u64)mcast->mcast_buf->iova); 3264 3265 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 3266 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 3267 3268 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 3269 BIT_ULL(63) | (mcast->replay_pkind << 24) | 3270 BIT_ULL(20) | MC_BUF_CNT); 3271 3272 mutex_init(&mcast->mce_lock); 3273 3274 return nix_setup_mce_tables(rvu, nix_hw); 3275 } 3276 3277 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 3278 { 3279 struct nix_txvlan *vlan = &nix_hw->txvlan; 3280 int err; 3281 3282 /* Allocate resource bimap for tx vtag def registers*/ 3283 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 3284 err = rvu_alloc_bitmap(&vlan->rsrc); 3285 if (err) 3286 return -ENOMEM; 3287 3288 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 3289 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 3290 sizeof(u16), GFP_KERNEL); 3291 if (!vlan->entry2pfvf_map) 3292 goto free_mem; 3293 3294 mutex_init(&vlan->rsrc_lock); 3295 return 0; 3296 3297 free_mem: 3298 kfree(vlan->rsrc.bmap); 3299 return -ENOMEM; 3300 } 3301 3302 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3303 { 3304 struct nix_txsch *txsch; 3305 int err, lvl, schq; 3306 u64 cfg, reg; 3307 3308 /* Get scheduler queue count of each type and alloc 3309 * bitmap for each for alloc/free/attach operations. 3310 */ 3311 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3312 txsch = &nix_hw->txsch[lvl]; 3313 txsch->lvl = lvl; 3314 switch (lvl) { 3315 case NIX_TXSCH_LVL_SMQ: 3316 reg = NIX_AF_MDQ_CONST; 3317 break; 3318 case NIX_TXSCH_LVL_TL4: 3319 reg = NIX_AF_TL4_CONST; 3320 break; 3321 case NIX_TXSCH_LVL_TL3: 3322 reg = NIX_AF_TL3_CONST; 3323 break; 3324 case NIX_TXSCH_LVL_TL2: 3325 reg = NIX_AF_TL2_CONST; 3326 break; 3327 case NIX_TXSCH_LVL_TL1: 3328 reg = NIX_AF_TL1_CONST; 3329 break; 3330 } 3331 cfg = rvu_read64(rvu, blkaddr, reg); 3332 txsch->schq.max = cfg & 0xFFFF; 3333 err = rvu_alloc_bitmap(&txsch->schq); 3334 if (err) 3335 return err; 3336 3337 /* Allocate memory for scheduler queues to 3338 * PF/VF pcifunc mapping info. 3339 */ 3340 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 3341 sizeof(u32), GFP_KERNEL); 3342 if (!txsch->pfvf_map) 3343 return -ENOMEM; 3344 for (schq = 0; schq < txsch->schq.max; schq++) 3345 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 3346 } 3347 3348 /* Setup a default value of 8192 as DWRR MTU */ 3349 if (rvu->hw->cap.nix_common_dwrr_mtu || 3350 rvu->hw->cap.nix_multiple_dwrr_mtu) { 3351 rvu_write64(rvu, blkaddr, 3352 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), 3353 convert_bytes_to_dwrr_mtu(8192)); 3354 rvu_write64(rvu, blkaddr, 3355 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK), 3356 convert_bytes_to_dwrr_mtu(8192)); 3357 rvu_write64(rvu, blkaddr, 3358 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP), 3359 convert_bytes_to_dwrr_mtu(8192)); 3360 } 3361 3362 return 0; 3363 } 3364 3365 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 3366 int blkaddr, u32 cfg) 3367 { 3368 int fmt_idx; 3369 3370 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 3371 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 3372 return fmt_idx; 3373 } 3374 if (fmt_idx >= nix_hw->mark_format.total) 3375 return -ERANGE; 3376 3377 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 3378 nix_hw->mark_format.cfg[fmt_idx] = cfg; 3379 nix_hw->mark_format.in_use++; 3380 return fmt_idx; 3381 } 3382 3383 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 3384 int blkaddr) 3385 { 3386 u64 cfgs[] = { 3387 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 3388 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 3389 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 3390 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 3391 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 3392 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 3393 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 3394 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 3395 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 3396 }; 3397 int i, rc; 3398 u64 total; 3399 3400 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 3401 nix_hw->mark_format.total = (u8)total; 3402 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 3403 GFP_KERNEL); 3404 if (!nix_hw->mark_format.cfg) 3405 return -ENOMEM; 3406 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 3407 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 3408 if (rc < 0) 3409 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 3410 i, rc); 3411 } 3412 3413 return 0; 3414 } 3415 3416 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3417 { 3418 /* CN10K supports LBK FIFO size 72 KB */ 3419 if (rvu->hw->lbk_bufsize == 0x12000) 3420 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 3421 else 3422 *max_mtu = NIC_HW_MAX_FRS; 3423 } 3424 3425 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3426 { 3427 int fifo_size = rvu_cgx_get_fifolen(rvu); 3428 3429 /* RPM supports FIFO len 128 KB and RPM2 supports double the 3430 * FIFO len to accommodate 8 LMACS 3431 */ 3432 if (fifo_size == 0x20000 || fifo_size == 0x40000) 3433 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 3434 else 3435 *max_mtu = NIC_HW_MAX_FRS; 3436 } 3437 3438 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 3439 struct nix_hw_info *rsp) 3440 { 3441 u16 pcifunc = req->hdr.pcifunc; 3442 u64 dwrr_mtu; 3443 int blkaddr; 3444 3445 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3446 if (blkaddr < 0) 3447 return NIX_AF_ERR_AF_LF_INVALID; 3448 3449 if (is_afvf(pcifunc)) 3450 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 3451 else 3452 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 3453 3454 rsp->min_mtu = NIC_HW_MIN_FRS; 3455 3456 if (!rvu->hw->cap.nix_common_dwrr_mtu && 3457 !rvu->hw->cap.nix_multiple_dwrr_mtu) { 3458 /* Return '1' on OTx2 */ 3459 rsp->rpm_dwrr_mtu = 1; 3460 rsp->sdp_dwrr_mtu = 1; 3461 rsp->lbk_dwrr_mtu = 1; 3462 return 0; 3463 } 3464 3465 /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */ 3466 dwrr_mtu = rvu_read64(rvu, blkaddr, 3467 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM)); 3468 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3469 3470 dwrr_mtu = rvu_read64(rvu, blkaddr, 3471 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP)); 3472 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3473 3474 dwrr_mtu = rvu_read64(rvu, blkaddr, 3475 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK)); 3476 rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3477 3478 return 0; 3479 } 3480 3481 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 3482 struct msg_rsp *rsp) 3483 { 3484 u16 pcifunc = req->hdr.pcifunc; 3485 int i, nixlf, blkaddr, err; 3486 u64 stats; 3487 3488 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3489 if (err) 3490 return err; 3491 3492 /* Get stats count supported by HW */ 3493 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 3494 3495 /* Reset tx stats */ 3496 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 3497 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 3498 3499 /* Reset rx stats */ 3500 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 3501 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 3502 3503 return 0; 3504 } 3505 3506 /* Returns the ALG index to be set into NPC_RX_ACTION */ 3507 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 3508 { 3509 int i; 3510 3511 /* Scan over exiting algo entries to find a match */ 3512 for (i = 0; i < nix_hw->flowkey.in_use; i++) 3513 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 3514 return i; 3515 3516 return -ERANGE; 3517 } 3518 3519 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 3520 { 3521 int idx, nr_field, key_off, field_marker, keyoff_marker; 3522 int max_key_off, max_bit_pos, group_member; 3523 struct nix_rx_flowkey_alg *field; 3524 struct nix_rx_flowkey_alg tmp; 3525 u32 key_type, valid_key; 3526 u32 l3_l4_src_dst; 3527 int l4_key_offset = 0; 3528 3529 if (!alg) 3530 return -EINVAL; 3531 3532 #define FIELDS_PER_ALG 5 3533 #define MAX_KEY_OFF 40 3534 /* Clear all fields */ 3535 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 3536 3537 /* Each of the 32 possible flow key algorithm definitions should 3538 * fall into above incremental config (except ALG0). Otherwise a 3539 * single NPC MCAM entry is not sufficient for supporting RSS. 3540 * 3541 * If a different definition or combination needed then NPC MCAM 3542 * has to be programmed to filter such pkts and it's action should 3543 * point to this definition to calculate flowtag or hash. 3544 * 3545 * The `for loop` goes over _all_ protocol field and the following 3546 * variables depicts the state machine forward progress logic. 3547 * 3548 * keyoff_marker - Enabled when hash byte length needs to be accounted 3549 * in field->key_offset update. 3550 * field_marker - Enabled when a new field needs to be selected. 3551 * group_member - Enabled when protocol is part of a group. 3552 */ 3553 3554 /* Last 4 bits (31:28) are reserved to specify SRC, DST 3555 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST, 3556 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST 3557 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST 3558 */ 3559 l3_l4_src_dst = flow_cfg; 3560 /* Reset these 4 bits, so that these won't be part of key */ 3561 flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK; 3562 3563 keyoff_marker = 0; max_key_off = 0; group_member = 0; 3564 nr_field = 0; key_off = 0; field_marker = 1; 3565 field = &tmp; max_bit_pos = fls(flow_cfg); 3566 for (idx = 0; 3567 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 3568 key_off < MAX_KEY_OFF; idx++) { 3569 key_type = BIT(idx); 3570 valid_key = flow_cfg & key_type; 3571 /* Found a field marker, reset the field values */ 3572 if (field_marker) 3573 memset(&tmp, 0, sizeof(tmp)); 3574 3575 field_marker = true; 3576 keyoff_marker = true; 3577 switch (key_type) { 3578 case NIX_FLOW_KEY_TYPE_PORT: 3579 field->sel_chan = true; 3580 /* This should be set to 1, when SEL_CHAN is set */ 3581 field->bytesm1 = 1; 3582 break; 3583 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 3584 field->lid = NPC_LID_LC; 3585 field->hdr_offset = 9; /* offset */ 3586 field->bytesm1 = 0; /* 1 byte */ 3587 field->ltype_match = NPC_LT_LC_IP; 3588 field->ltype_mask = 0xF; 3589 break; 3590 case NIX_FLOW_KEY_TYPE_IPV4: 3591 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 3592 field->lid = NPC_LID_LC; 3593 field->ltype_match = NPC_LT_LC_IP; 3594 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 3595 field->lid = NPC_LID_LG; 3596 field->ltype_match = NPC_LT_LG_TU_IP; 3597 } 3598 field->hdr_offset = 12; /* SIP offset */ 3599 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 3600 3601 /* Only SIP */ 3602 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 3603 field->bytesm1 = 3; /* SIP, 4 bytes */ 3604 3605 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 3606 /* Both SIP + DIP */ 3607 if (field->bytesm1 == 3) { 3608 field->bytesm1 = 7; /* SIP + DIP, 8B */ 3609 } else { 3610 /* Only DIP */ 3611 field->hdr_offset = 16; /* DIP off */ 3612 field->bytesm1 = 3; /* DIP, 4 bytes */ 3613 } 3614 } 3615 3616 field->ltype_mask = 0xF; /* Match only IPv4 */ 3617 keyoff_marker = false; 3618 break; 3619 case NIX_FLOW_KEY_TYPE_IPV6: 3620 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 3621 field->lid = NPC_LID_LC; 3622 field->ltype_match = NPC_LT_LC_IP6; 3623 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 3624 field->lid = NPC_LID_LG; 3625 field->ltype_match = NPC_LT_LG_TU_IP6; 3626 } 3627 field->hdr_offset = 8; /* SIP offset */ 3628 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 3629 3630 /* Only SIP */ 3631 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) 3632 field->bytesm1 = 15; /* SIP, 16 bytes */ 3633 3634 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { 3635 /* Both SIP + DIP */ 3636 if (field->bytesm1 == 15) { 3637 /* SIP + DIP, 32 bytes */ 3638 field->bytesm1 = 31; 3639 } else { 3640 /* Only DIP */ 3641 field->hdr_offset = 24; /* DIP off */ 3642 field->bytesm1 = 15; /* DIP,16 bytes */ 3643 } 3644 } 3645 field->ltype_mask = 0xF; /* Match only IPv6 */ 3646 break; 3647 case NIX_FLOW_KEY_TYPE_TCP: 3648 case NIX_FLOW_KEY_TYPE_UDP: 3649 case NIX_FLOW_KEY_TYPE_SCTP: 3650 case NIX_FLOW_KEY_TYPE_INNR_TCP: 3651 case NIX_FLOW_KEY_TYPE_INNR_UDP: 3652 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 3653 field->lid = NPC_LID_LD; 3654 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 3655 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 3656 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 3657 field->lid = NPC_LID_LH; 3658 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 3659 3660 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY) 3661 field->bytesm1 = 1; /* SRC, 2 bytes */ 3662 3663 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) { 3664 /* Both SRC + DST */ 3665 if (field->bytesm1 == 1) { 3666 /* SRC + DST, 4 bytes */ 3667 field->bytesm1 = 3; 3668 } else { 3669 /* Only DIP */ 3670 field->hdr_offset = 2; /* DST off */ 3671 field->bytesm1 = 1; /* DST, 2 bytes */ 3672 } 3673 } 3674 3675 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 3676 * so no need to change the ltype_match, just change 3677 * the lid for inner protocols 3678 */ 3679 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 3680 (int)NPC_LT_LH_TU_TCP); 3681 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 3682 (int)NPC_LT_LH_TU_UDP); 3683 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 3684 (int)NPC_LT_LH_TU_SCTP); 3685 3686 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 3687 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 3688 valid_key) { 3689 field->ltype_match |= NPC_LT_LD_TCP; 3690 group_member = true; 3691 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 3692 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 3693 valid_key) { 3694 field->ltype_match |= NPC_LT_LD_UDP; 3695 group_member = true; 3696 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 3697 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 3698 valid_key) { 3699 field->ltype_match |= NPC_LT_LD_SCTP; 3700 group_member = true; 3701 } 3702 field->ltype_mask = ~field->ltype_match; 3703 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 3704 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 3705 /* Handle the case where any of the group item 3706 * is enabled in the group but not the final one 3707 */ 3708 if (group_member) { 3709 valid_key = true; 3710 group_member = false; 3711 } 3712 } else { 3713 field_marker = false; 3714 keyoff_marker = false; 3715 } 3716 3717 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 3718 * remember the TCP key offset of 40 byte hash key. 3719 */ 3720 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 3721 l4_key_offset = key_off; 3722 break; 3723 case NIX_FLOW_KEY_TYPE_NVGRE: 3724 field->lid = NPC_LID_LD; 3725 field->hdr_offset = 4; /* VSID offset */ 3726 field->bytesm1 = 2; 3727 field->ltype_match = NPC_LT_LD_NVGRE; 3728 field->ltype_mask = 0xF; 3729 break; 3730 case NIX_FLOW_KEY_TYPE_VXLAN: 3731 case NIX_FLOW_KEY_TYPE_GENEVE: 3732 field->lid = NPC_LID_LE; 3733 field->bytesm1 = 2; 3734 field->hdr_offset = 4; 3735 field->ltype_mask = 0xF; 3736 field_marker = false; 3737 keyoff_marker = false; 3738 3739 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 3740 field->ltype_match |= NPC_LT_LE_VXLAN; 3741 group_member = true; 3742 } 3743 3744 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 3745 field->ltype_match |= NPC_LT_LE_GENEVE; 3746 group_member = true; 3747 } 3748 3749 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 3750 if (group_member) { 3751 field->ltype_mask = ~field->ltype_match; 3752 field_marker = true; 3753 keyoff_marker = true; 3754 valid_key = true; 3755 group_member = false; 3756 } 3757 } 3758 break; 3759 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 3760 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 3761 field->lid = NPC_LID_LA; 3762 field->ltype_match = NPC_LT_LA_ETHER; 3763 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 3764 field->lid = NPC_LID_LF; 3765 field->ltype_match = NPC_LT_LF_TU_ETHER; 3766 } 3767 field->hdr_offset = 0; 3768 field->bytesm1 = 5; /* DMAC 6 Byte */ 3769 field->ltype_mask = 0xF; 3770 break; 3771 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 3772 field->lid = NPC_LID_LC; 3773 field->hdr_offset = 40; /* IPV6 hdr */ 3774 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 3775 field->ltype_match = NPC_LT_LC_IP6_EXT; 3776 field->ltype_mask = 0xF; 3777 break; 3778 case NIX_FLOW_KEY_TYPE_GTPU: 3779 field->lid = NPC_LID_LE; 3780 field->hdr_offset = 4; 3781 field->bytesm1 = 3; /* 4 bytes TID*/ 3782 field->ltype_match = NPC_LT_LE_GTPU; 3783 field->ltype_mask = 0xF; 3784 break; 3785 case NIX_FLOW_KEY_TYPE_VLAN: 3786 field->lid = NPC_LID_LB; 3787 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 3788 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 3789 field->ltype_match = NPC_LT_LB_CTAG; 3790 field->ltype_mask = 0xF; 3791 field->fn_mask = 1; /* Mask out the first nibble */ 3792 break; 3793 case NIX_FLOW_KEY_TYPE_AH: 3794 case NIX_FLOW_KEY_TYPE_ESP: 3795 field->hdr_offset = 0; 3796 field->bytesm1 = 7; /* SPI + sequence number */ 3797 field->ltype_mask = 0xF; 3798 field->lid = NPC_LID_LE; 3799 field->ltype_match = NPC_LT_LE_ESP; 3800 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 3801 field->lid = NPC_LID_LD; 3802 field->ltype_match = NPC_LT_LD_AH; 3803 field->hdr_offset = 4; 3804 keyoff_marker = false; 3805 } 3806 break; 3807 } 3808 field->ena = 1; 3809 3810 /* Found a valid flow key type */ 3811 if (valid_key) { 3812 /* Use the key offset of TCP/UDP/SCTP fields 3813 * for ESP/AH fields. 3814 */ 3815 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 3816 key_type == NIX_FLOW_KEY_TYPE_AH) 3817 key_off = l4_key_offset; 3818 field->key_offset = key_off; 3819 memcpy(&alg[nr_field], field, sizeof(*field)); 3820 max_key_off = max(max_key_off, field->bytesm1 + 1); 3821 3822 /* Found a field marker, get the next field */ 3823 if (field_marker) 3824 nr_field++; 3825 } 3826 3827 /* Found a keyoff marker, update the new key_off */ 3828 if (keyoff_marker) { 3829 key_off += max_key_off; 3830 max_key_off = 0; 3831 } 3832 } 3833 /* Processed all the flow key types */ 3834 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 3835 return 0; 3836 else 3837 return NIX_AF_ERR_RSS_NOSPC_FIELD; 3838 } 3839 3840 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 3841 { 3842 u64 field[FIELDS_PER_ALG]; 3843 struct nix_hw *hw; 3844 int fid, rc; 3845 3846 hw = get_nix_hw(rvu->hw, blkaddr); 3847 if (!hw) 3848 return NIX_AF_ERR_INVALID_NIXBLK; 3849 3850 /* No room to add new flow hash algoritham */ 3851 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 3852 return NIX_AF_ERR_RSS_NOSPC_ALGO; 3853 3854 /* Generate algo fields for the given flow_cfg */ 3855 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 3856 if (rc) 3857 return rc; 3858 3859 /* Update ALGX_FIELDX register with generated fields */ 3860 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3861 rvu_write64(rvu, blkaddr, 3862 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 3863 fid), field[fid]); 3864 3865 /* Store the flow_cfg for futher lookup */ 3866 rc = hw->flowkey.in_use; 3867 hw->flowkey.flowkey[rc] = flow_cfg; 3868 hw->flowkey.in_use++; 3869 3870 return rc; 3871 } 3872 3873 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 3874 struct nix_rss_flowkey_cfg *req, 3875 struct nix_rss_flowkey_cfg_rsp *rsp) 3876 { 3877 u16 pcifunc = req->hdr.pcifunc; 3878 int alg_idx, nixlf, blkaddr; 3879 struct nix_hw *nix_hw; 3880 int err; 3881 3882 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3883 if (err) 3884 return err; 3885 3886 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3887 if (!nix_hw) 3888 return NIX_AF_ERR_INVALID_NIXBLK; 3889 3890 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 3891 /* Failed to get algo index from the exiting list, reserve new */ 3892 if (alg_idx < 0) { 3893 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 3894 req->flowkey_cfg); 3895 if (alg_idx < 0) 3896 return alg_idx; 3897 } 3898 rsp->alg_idx = alg_idx; 3899 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 3900 alg_idx, req->mcam_index); 3901 return 0; 3902 } 3903 3904 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 3905 { 3906 u32 flowkey_cfg, minkey_cfg; 3907 int alg, fid, rc; 3908 3909 /* Disable all flow key algx fieldx */ 3910 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 3911 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3912 rvu_write64(rvu, blkaddr, 3913 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 3914 0); 3915 } 3916 3917 /* IPv4/IPv6 SIP/DIPs */ 3918 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 3919 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3920 if (rc < 0) 3921 return rc; 3922 3923 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3924 minkey_cfg = flowkey_cfg; 3925 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 3926 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3927 if (rc < 0) 3928 return rc; 3929 3930 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3931 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 3932 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3933 if (rc < 0) 3934 return rc; 3935 3936 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3937 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 3938 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3939 if (rc < 0) 3940 return rc; 3941 3942 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3943 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3944 NIX_FLOW_KEY_TYPE_UDP; 3945 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3946 if (rc < 0) 3947 return rc; 3948 3949 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3950 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3951 NIX_FLOW_KEY_TYPE_SCTP; 3952 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3953 if (rc < 0) 3954 return rc; 3955 3956 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3957 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 3958 NIX_FLOW_KEY_TYPE_SCTP; 3959 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3960 if (rc < 0) 3961 return rc; 3962 3963 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3964 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3965 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 3966 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3967 if (rc < 0) 3968 return rc; 3969 3970 return 0; 3971 } 3972 3973 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 3974 struct nix_set_mac_addr *req, 3975 struct msg_rsp *rsp) 3976 { 3977 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 3978 u16 pcifunc = req->hdr.pcifunc; 3979 int blkaddr, nixlf, err; 3980 struct rvu_pfvf *pfvf; 3981 3982 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3983 if (err) 3984 return err; 3985 3986 pfvf = rvu_get_pfvf(rvu, pcifunc); 3987 3988 /* untrusted VF can't overwrite admin(PF) changes */ 3989 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 3990 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { 3991 dev_warn(rvu->dev, 3992 "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); 3993 return -EPERM; 3994 } 3995 3996 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 3997 3998 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 3999 pfvf->rx_chan_base, req->mac_addr); 4000 4001 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) 4002 ether_addr_copy(pfvf->default_mac, req->mac_addr); 4003 4004 rvu_switch_update_rules(rvu, pcifunc); 4005 4006 return 0; 4007 } 4008 4009 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 4010 struct msg_req *req, 4011 struct nix_get_mac_addr_rsp *rsp) 4012 { 4013 u16 pcifunc = req->hdr.pcifunc; 4014 struct rvu_pfvf *pfvf; 4015 4016 if (!is_nixlf_attached(rvu, pcifunc)) 4017 return NIX_AF_ERR_AF_LF_INVALID; 4018 4019 pfvf = rvu_get_pfvf(rvu, pcifunc); 4020 4021 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 4022 4023 return 0; 4024 } 4025 4026 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 4027 struct msg_rsp *rsp) 4028 { 4029 bool allmulti, promisc, nix_rx_multicast; 4030 u16 pcifunc = req->hdr.pcifunc; 4031 struct rvu_pfvf *pfvf; 4032 int nixlf, err; 4033 4034 pfvf = rvu_get_pfvf(rvu, pcifunc); 4035 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; 4036 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; 4037 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; 4038 4039 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; 4040 4041 if (is_vf(pcifunc) && !nix_rx_multicast && 4042 (promisc || allmulti)) { 4043 dev_warn_ratelimited(rvu->dev, 4044 "VF promisc/multicast not supported\n"); 4045 return 0; 4046 } 4047 4048 /* untrusted VF can't configure promisc/allmulti */ 4049 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 4050 (promisc || allmulti)) 4051 return 0; 4052 4053 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4054 if (err) 4055 return err; 4056 4057 if (nix_rx_multicast) { 4058 /* add/del this PF_FUNC to/from mcast pkt replication list */ 4059 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, 4060 allmulti); 4061 if (err) { 4062 dev_err(rvu->dev, 4063 "Failed to update pcifunc 0x%x to multicast list\n", 4064 pcifunc); 4065 return err; 4066 } 4067 4068 /* add/del this PF_FUNC to/from promisc pkt replication list */ 4069 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, 4070 promisc); 4071 if (err) { 4072 dev_err(rvu->dev, 4073 "Failed to update pcifunc 0x%x to promisc list\n", 4074 pcifunc); 4075 return err; 4076 } 4077 } 4078 4079 /* install/uninstall allmulti entry */ 4080 if (allmulti) { 4081 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, 4082 pfvf->rx_chan_base); 4083 } else { 4084 if (!nix_rx_multicast) 4085 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); 4086 } 4087 4088 /* install/uninstall promisc entry */ 4089 if (promisc) 4090 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 4091 pfvf->rx_chan_base, 4092 pfvf->rx_chan_cnt); 4093 else 4094 if (!nix_rx_multicast) 4095 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 4096 4097 return 0; 4098 } 4099 4100 static void nix_find_link_frs(struct rvu *rvu, 4101 struct nix_frs_cfg *req, u16 pcifunc) 4102 { 4103 int pf = rvu_get_pf(pcifunc); 4104 struct rvu_pfvf *pfvf; 4105 int maxlen, minlen; 4106 int numvfs, hwvf; 4107 int vf; 4108 4109 /* Update with requester's min/max lengths */ 4110 pfvf = rvu_get_pfvf(rvu, pcifunc); 4111 pfvf->maxlen = req->maxlen; 4112 if (req->update_minlen) 4113 pfvf->minlen = req->minlen; 4114 4115 maxlen = req->maxlen; 4116 minlen = req->update_minlen ? req->minlen : 0; 4117 4118 /* Get this PF's numVFs and starting hwvf */ 4119 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 4120 4121 /* For each VF, compare requested max/minlen */ 4122 for (vf = 0; vf < numvfs; vf++) { 4123 pfvf = &rvu->hwvf[hwvf + vf]; 4124 if (pfvf->maxlen > maxlen) 4125 maxlen = pfvf->maxlen; 4126 if (req->update_minlen && 4127 pfvf->minlen && pfvf->minlen < minlen) 4128 minlen = pfvf->minlen; 4129 } 4130 4131 /* Compare requested max/minlen with PF's max/minlen */ 4132 pfvf = &rvu->pf[pf]; 4133 if (pfvf->maxlen > maxlen) 4134 maxlen = pfvf->maxlen; 4135 if (req->update_minlen && 4136 pfvf->minlen && pfvf->minlen < minlen) 4137 minlen = pfvf->minlen; 4138 4139 /* Update the request with max/min PF's and it's VF's max/min */ 4140 req->maxlen = maxlen; 4141 if (req->update_minlen) 4142 req->minlen = minlen; 4143 } 4144 4145 static int 4146 nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, 4147 u16 pcifunc, u64 tx_credits) 4148 { 4149 struct rvu_hwinfo *hw = rvu->hw; 4150 int pf = rvu_get_pf(pcifunc); 4151 u8 cgx_id = 0, lmac_id = 0; 4152 unsigned long poll_tmo; 4153 bool restore_tx_en = 0; 4154 struct nix_hw *nix_hw; 4155 u64 cfg, sw_xoff = 0; 4156 u32 schq = 0; 4157 u32 credits; 4158 int rc; 4159 4160 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4161 if (!nix_hw) 4162 return NIX_AF_ERR_INVALID_NIXBLK; 4163 4164 if (tx_credits == nix_hw->tx_credits[link]) 4165 return 0; 4166 4167 /* Enable cgx tx if disabled for credits to be back */ 4168 if (is_pf_cgxmapped(rvu, pf)) { 4169 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 4170 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), 4171 lmac_id, true); 4172 } 4173 4174 mutex_lock(&rvu->rsrc_lock); 4175 /* Disable new traffic to link */ 4176 if (hw->cap.nix_shaping) { 4177 schq = nix_get_tx_link(rvu, pcifunc); 4178 sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)); 4179 rvu_write64(rvu, blkaddr, 4180 NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0)); 4181 } 4182 4183 rc = NIX_AF_ERR_LINK_CREDITS; 4184 poll_tmo = jiffies + usecs_to_jiffies(200000); 4185 /* Wait for credits to return */ 4186 do { 4187 if (time_after(jiffies, poll_tmo)) 4188 goto exit; 4189 usleep_range(100, 200); 4190 4191 cfg = rvu_read64(rvu, blkaddr, 4192 NIX_AF_TX_LINKX_NORM_CREDIT(link)); 4193 credits = (cfg >> 12) & 0xFFFFFULL; 4194 } while (credits != nix_hw->tx_credits[link]); 4195 4196 cfg &= ~(0xFFFFFULL << 12); 4197 cfg |= (tx_credits << 12); 4198 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 4199 rc = 0; 4200 4201 nix_hw->tx_credits[link] = tx_credits; 4202 4203 exit: 4204 /* Enable traffic back */ 4205 if (hw->cap.nix_shaping && !sw_xoff) 4206 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0); 4207 4208 /* Restore state of cgx tx */ 4209 if (restore_tx_en) 4210 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 4211 4212 mutex_unlock(&rvu->rsrc_lock); 4213 return rc; 4214 } 4215 4216 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 4217 struct msg_rsp *rsp) 4218 { 4219 struct rvu_hwinfo *hw = rvu->hw; 4220 u16 pcifunc = req->hdr.pcifunc; 4221 int pf = rvu_get_pf(pcifunc); 4222 int blkaddr, schq, link = -1; 4223 struct nix_txsch *txsch; 4224 u64 cfg, lmac_fifo_len; 4225 struct nix_hw *nix_hw; 4226 struct rvu_pfvf *pfvf; 4227 u8 cgx = 0, lmac = 0; 4228 u16 max_mtu; 4229 4230 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4231 if (blkaddr < 0) 4232 return NIX_AF_ERR_AF_LF_INVALID; 4233 4234 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4235 if (!nix_hw) 4236 return NIX_AF_ERR_INVALID_NIXBLK; 4237 4238 if (is_afvf(pcifunc)) 4239 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 4240 else 4241 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 4242 4243 if (!req->sdp_link && req->maxlen > max_mtu) 4244 return NIX_AF_ERR_FRS_INVALID; 4245 4246 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 4247 return NIX_AF_ERR_FRS_INVALID; 4248 4249 /* Check if requester wants to update SMQ's */ 4250 if (!req->update_smq) 4251 goto rx_frscfg; 4252 4253 /* Update min/maxlen in each of the SMQ attached to this PF/VF */ 4254 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 4255 mutex_lock(&rvu->rsrc_lock); 4256 for (schq = 0; schq < txsch->schq.max; schq++) { 4257 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 4258 continue; 4259 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); 4260 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); 4261 if (req->update_minlen) 4262 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); 4263 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); 4264 } 4265 mutex_unlock(&rvu->rsrc_lock); 4266 4267 rx_frscfg: 4268 /* Check if config is for SDP link */ 4269 if (req->sdp_link) { 4270 if (!hw->sdp_links) 4271 return NIX_AF_ERR_RX_LINK_INVALID; 4272 link = hw->cgx_links + hw->lbk_links; 4273 goto linkcfg; 4274 } 4275 4276 /* Check if the request is from CGX mapped RVU PF */ 4277 if (is_pf_cgxmapped(rvu, pf)) { 4278 /* Get CGX and LMAC to which this PF is mapped and find link */ 4279 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 4280 link = (cgx * hw->lmac_per_cgx) + lmac; 4281 } else if (pf == 0) { 4282 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 4283 pfvf = rvu_get_pfvf(rvu, pcifunc); 4284 link = hw->cgx_links + pfvf->lbkid; 4285 } 4286 4287 if (link < 0) 4288 return NIX_AF_ERR_RX_LINK_INVALID; 4289 4290 4291 linkcfg: 4292 nix_find_link_frs(rvu, req, pcifunc); 4293 4294 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 4295 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 4296 if (req->update_minlen) 4297 cfg = (cfg & ~0xFFFFULL) | req->minlen; 4298 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 4299 4300 if (req->sdp_link || pf == 0) 4301 return 0; 4302 4303 /* Update transmit credits for CGX links */ 4304 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac); 4305 if (!lmac_fifo_len) { 4306 dev_err(rvu->dev, 4307 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", 4308 __func__, cgx, lmac); 4309 return 0; 4310 } 4311 return nix_config_link_credits(rvu, blkaddr, link, pcifunc, 4312 (lmac_fifo_len - req->maxlen) / 16); 4313 } 4314 4315 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 4316 struct msg_rsp *rsp) 4317 { 4318 int nixlf, blkaddr, err; 4319 u64 cfg; 4320 4321 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 4322 if (err) 4323 return err; 4324 4325 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 4326 /* Set the interface configuration */ 4327 if (req->len_verify & BIT(0)) 4328 cfg |= BIT_ULL(41); 4329 else 4330 cfg &= ~BIT_ULL(41); 4331 4332 if (req->len_verify & BIT(1)) 4333 cfg |= BIT_ULL(40); 4334 else 4335 cfg &= ~BIT_ULL(40); 4336 4337 if (req->len_verify & NIX_RX_DROP_RE) 4338 cfg |= BIT_ULL(32); 4339 else 4340 cfg &= ~BIT_ULL(32); 4341 4342 if (req->csum_verify & BIT(0)) 4343 cfg |= BIT_ULL(37); 4344 else 4345 cfg &= ~BIT_ULL(37); 4346 4347 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 4348 4349 return 0; 4350 } 4351 4352 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 4353 { 4354 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 4355 } 4356 4357 static void nix_link_config(struct rvu *rvu, int blkaddr, 4358 struct nix_hw *nix_hw) 4359 { 4360 struct rvu_hwinfo *hw = rvu->hw; 4361 int cgx, lmac_cnt, slink, link; 4362 u16 lbk_max_frs, lmac_max_frs; 4363 unsigned long lmac_bmap; 4364 u64 tx_credits, cfg; 4365 u64 lmac_fifo_len; 4366 int iter; 4367 4368 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 4369 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 4370 4371 /* Set default min/max packet lengths allowed on NIX Rx links. 4372 * 4373 * With HW reset minlen value of 60byte, HW will treat ARP pkts 4374 * as undersize and report them to SW as error pkts, hence 4375 * setting it to 40 bytes. 4376 */ 4377 for (link = 0; link < hw->cgx_links; link++) { 4378 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4379 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 4380 } 4381 4382 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 4383 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4384 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 4385 } 4386 if (hw->sdp_links) { 4387 link = hw->cgx_links + hw->lbk_links; 4388 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4389 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 4390 } 4391 4392 /* Set credits for Tx links assuming max packet length allowed. 4393 * This will be reconfigured based on MTU set for PF/VF. 4394 */ 4395 for (cgx = 0; cgx < hw->cgx; cgx++) { 4396 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 4397 /* Skip when cgx is not available or lmac cnt is zero */ 4398 if (lmac_cnt <= 0) 4399 continue; 4400 slink = cgx * hw->lmac_per_cgx; 4401 4402 /* Get LMAC id's from bitmap */ 4403 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); 4404 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { 4405 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter); 4406 if (!lmac_fifo_len) { 4407 dev_err(rvu->dev, 4408 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", 4409 __func__, cgx, iter); 4410 continue; 4411 } 4412 tx_credits = (lmac_fifo_len - lmac_max_frs) / 16; 4413 /* Enable credits and set credit pkt count to max allowed */ 4414 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4415 4416 link = iter + slink; 4417 nix_hw->tx_credits[link] = tx_credits; 4418 rvu_write64(rvu, blkaddr, 4419 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 4420 } 4421 } 4422 4423 /* Set Tx credits for LBK link */ 4424 slink = hw->cgx_links; 4425 for (link = slink; link < (slink + hw->lbk_links); link++) { 4426 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 4427 nix_hw->tx_credits[link] = tx_credits; 4428 /* Enable credits and set credit pkt count to max allowed */ 4429 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4430 rvu_write64(rvu, blkaddr, 4431 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 4432 } 4433 } 4434 4435 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 4436 { 4437 int idx, err; 4438 u64 status; 4439 4440 /* Start X2P bus calibration */ 4441 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4442 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 4443 /* Wait for calibration to complete */ 4444 err = rvu_poll_reg(rvu, blkaddr, 4445 NIX_AF_STATUS, BIT_ULL(10), false); 4446 if (err) { 4447 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 4448 return err; 4449 } 4450 4451 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 4452 /* Check if CGX devices are ready */ 4453 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 4454 /* Skip when cgx port is not available */ 4455 if (!rvu_cgx_pdata(idx, rvu) || 4456 (status & (BIT_ULL(16 + idx)))) 4457 continue; 4458 dev_err(rvu->dev, 4459 "CGX%d didn't respond to NIX X2P calibration\n", idx); 4460 err = -EBUSY; 4461 } 4462 4463 /* Check if LBK is ready */ 4464 if (!(status & BIT_ULL(19))) { 4465 dev_err(rvu->dev, 4466 "LBK didn't respond to NIX X2P calibration\n"); 4467 err = -EBUSY; 4468 } 4469 4470 /* Clear 'calibrate_x2p' bit */ 4471 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4472 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 4473 if (err || (status & 0x3FFULL)) 4474 dev_err(rvu->dev, 4475 "NIX X2P calibration failed, status 0x%llx\n", status); 4476 if (err) 4477 return err; 4478 return 0; 4479 } 4480 4481 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 4482 { 4483 u64 cfg; 4484 int err; 4485 4486 /* Set admin queue endianness */ 4487 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 4488 #ifdef __BIG_ENDIAN 4489 cfg |= BIT_ULL(8); 4490 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4491 #else 4492 cfg &= ~BIT_ULL(8); 4493 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4494 #endif 4495 4496 /* Do not bypass NDC cache */ 4497 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 4498 cfg &= ~0x3FFEULL; 4499 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 4500 /* Disable caching of SQB aka SQEs */ 4501 cfg |= 0x04ULL; 4502 #endif 4503 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 4504 4505 /* Result structure can be followed by RQ/SQ/CQ context at 4506 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 4507 * operation type. Alloc sufficient result memory for all operations. 4508 */ 4509 err = rvu_aq_alloc(rvu, &block->aq, 4510 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 4511 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 4512 if (err) 4513 return err; 4514 4515 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 4516 rvu_write64(rvu, block->addr, 4517 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 4518 return 0; 4519 } 4520 4521 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) 4522 { 4523 struct rvu_hwinfo *hw = rvu->hw; 4524 u64 hw_const; 4525 4526 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 4527 4528 /* On OcteonTx2 DWRR quantum is directly configured into each of 4529 * the transmit scheduler queues. And PF/VF drivers were free to 4530 * config any value upto 2^24. 4531 * On CN10K, HW is modified, the quantum configuration at scheduler 4532 * queues is in terms of weight. And SW needs to setup a base DWRR MTU 4533 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do 4534 * 'DWRR MTU * weight' to get the quantum. 4535 * 4536 * Check if HW uses a common MTU for all DWRR quantum configs. 4537 * On OcteonTx2 this register field is '0'. 4538 */ 4539 if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61))) 4540 hw->cap.nix_common_dwrr_mtu = true; 4541 4542 if (hw_const & BIT_ULL(61)) 4543 hw->cap.nix_multiple_dwrr_mtu = true; 4544 } 4545 4546 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 4547 { 4548 const struct npc_lt_def_cfg *ltdefs; 4549 struct rvu_hwinfo *hw = rvu->hw; 4550 int blkaddr = nix_hw->blkaddr; 4551 struct rvu_block *block; 4552 int err; 4553 u64 cfg; 4554 4555 block = &hw->block[blkaddr]; 4556 4557 if (is_rvu_96xx_B0(rvu)) { 4558 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 4559 * internal state when conditional clocks are turned off. 4560 * Hence enable them. 4561 */ 4562 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4563 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 4564 4565 /* Set chan/link to backpressure TL3 instead of TL2 */ 4566 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 4567 4568 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 4569 * This sticky mode is known to cause SQ stalls when multiple 4570 * SQs are mapped to same SMQ and transmitting pkts at a time. 4571 */ 4572 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 4573 cfg &= ~BIT_ULL(15); 4574 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 4575 } 4576 4577 ltdefs = rvu->kpu.lt_def; 4578 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 4579 err = nix_calibrate_x2p(rvu, blkaddr); 4580 if (err) 4581 return err; 4582 4583 /* Setup capabilities of the NIX block */ 4584 rvu_nix_setup_capabilities(rvu, blkaddr); 4585 4586 /* Initialize admin queue */ 4587 err = nix_aq_init(rvu, block); 4588 if (err) 4589 return err; 4590 4591 /* Restore CINT timer delay to HW reset values */ 4592 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 4593 4594 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG); 4595 4596 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ 4597 cfg |= 1ULL; 4598 if (!is_rvu_otx2(rvu)) 4599 cfg |= NIX_PTP_1STEP_EN; 4600 4601 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg); 4602 4603 if (!is_rvu_otx2(rvu)) 4604 rvu_nix_block_cn10k_init(rvu, nix_hw); 4605 4606 if (is_block_implemented(hw, blkaddr)) { 4607 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 4608 if (err) 4609 return err; 4610 4611 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); 4612 if (err) 4613 return err; 4614 4615 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 4616 if (err) 4617 return err; 4618 4619 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 4620 if (err) 4621 return err; 4622 4623 err = nix_setup_txvlan(rvu, nix_hw); 4624 if (err) 4625 return err; 4626 4627 /* Configure segmentation offload formats */ 4628 nix_setup_lso(rvu, nix_hw, blkaddr); 4629 4630 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 4631 * This helps HW protocol checker to identify headers 4632 * and validate length and checksums. 4633 */ 4634 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 4635 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 4636 ltdefs->rx_ol2.ltype_mask); 4637 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 4638 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 4639 ltdefs->rx_oip4.ltype_mask); 4640 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 4641 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 4642 ltdefs->rx_iip4.ltype_mask); 4643 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 4644 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 4645 ltdefs->rx_oip6.ltype_mask); 4646 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 4647 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 4648 ltdefs->rx_iip6.ltype_mask); 4649 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 4650 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 4651 ltdefs->rx_otcp.ltype_mask); 4652 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 4653 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 4654 ltdefs->rx_itcp.ltype_mask); 4655 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 4656 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 4657 ltdefs->rx_oudp.ltype_mask); 4658 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 4659 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 4660 ltdefs->rx_iudp.ltype_mask); 4661 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 4662 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 4663 ltdefs->rx_osctp.ltype_mask); 4664 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 4665 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 4666 ltdefs->rx_isctp.ltype_mask); 4667 4668 if (!is_rvu_otx2(rvu)) { 4669 /* Enable APAD calculation for other protocols 4670 * matching APAD0 and APAD1 lt def registers. 4671 */ 4672 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, 4673 (ltdefs->rx_apad0.valid << 11) | 4674 (ltdefs->rx_apad0.lid << 8) | 4675 (ltdefs->rx_apad0.ltype_match << 4) | 4676 ltdefs->rx_apad0.ltype_mask); 4677 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, 4678 (ltdefs->rx_apad1.valid << 11) | 4679 (ltdefs->rx_apad1.lid << 8) | 4680 (ltdefs->rx_apad1.ltype_match << 4) | 4681 ltdefs->rx_apad1.ltype_mask); 4682 4683 /* Receive ethertype defination register defines layer 4684 * information in NPC_RESULT_S to identify the Ethertype 4685 * location in L2 header. Used for Ethertype overwriting 4686 * in inline IPsec flow. 4687 */ 4688 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), 4689 (ltdefs->rx_et[0].offset << 12) | 4690 (ltdefs->rx_et[0].valid << 11) | 4691 (ltdefs->rx_et[0].lid << 8) | 4692 (ltdefs->rx_et[0].ltype_match << 4) | 4693 ltdefs->rx_et[0].ltype_mask); 4694 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), 4695 (ltdefs->rx_et[1].offset << 12) | 4696 (ltdefs->rx_et[1].valid << 11) | 4697 (ltdefs->rx_et[1].lid << 8) | 4698 (ltdefs->rx_et[1].ltype_match << 4) | 4699 ltdefs->rx_et[1].ltype_mask); 4700 } 4701 4702 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 4703 if (err) 4704 return err; 4705 4706 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, 4707 sizeof(u64), GFP_KERNEL); 4708 if (!nix_hw->tx_credits) 4709 return -ENOMEM; 4710 4711 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 4712 nix_link_config(rvu, blkaddr, nix_hw); 4713 4714 /* Enable Channel backpressure */ 4715 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 4716 } 4717 return 0; 4718 } 4719 4720 int rvu_nix_init(struct rvu *rvu) 4721 { 4722 struct rvu_hwinfo *hw = rvu->hw; 4723 struct nix_hw *nix_hw; 4724 int blkaddr = 0, err; 4725 int i = 0; 4726 4727 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 4728 GFP_KERNEL); 4729 if (!hw->nix) 4730 return -ENOMEM; 4731 4732 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4733 while (blkaddr) { 4734 nix_hw = &hw->nix[i]; 4735 nix_hw->rvu = rvu; 4736 nix_hw->blkaddr = blkaddr; 4737 err = rvu_nix_block_init(rvu, nix_hw); 4738 if (err) 4739 return err; 4740 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4741 i++; 4742 } 4743 4744 return 0; 4745 } 4746 4747 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 4748 struct rvu_block *block) 4749 { 4750 struct nix_txsch *txsch; 4751 struct nix_mcast *mcast; 4752 struct nix_txvlan *vlan; 4753 struct nix_hw *nix_hw; 4754 int lvl; 4755 4756 rvu_aq_free(rvu, block->aq); 4757 4758 if (is_block_implemented(rvu->hw, blkaddr)) { 4759 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4760 if (!nix_hw) 4761 return; 4762 4763 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 4764 txsch = &nix_hw->txsch[lvl]; 4765 kfree(txsch->schq.bmap); 4766 } 4767 4768 kfree(nix_hw->tx_credits); 4769 4770 nix_ipolicer_freemem(rvu, nix_hw); 4771 4772 vlan = &nix_hw->txvlan; 4773 kfree(vlan->rsrc.bmap); 4774 mutex_destroy(&vlan->rsrc_lock); 4775 4776 mcast = &nix_hw->mcast; 4777 qmem_free(rvu->dev, mcast->mce_ctx); 4778 qmem_free(rvu->dev, mcast->mcast_buf); 4779 mutex_destroy(&mcast->mce_lock); 4780 } 4781 } 4782 4783 void rvu_nix_freemem(struct rvu *rvu) 4784 { 4785 struct rvu_hwinfo *hw = rvu->hw; 4786 struct rvu_block *block; 4787 int blkaddr = 0; 4788 4789 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4790 while (blkaddr) { 4791 block = &hw->block[blkaddr]; 4792 rvu_nix_block_freemem(rvu, blkaddr, block); 4793 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4794 } 4795 } 4796 4797 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 4798 struct msg_rsp *rsp) 4799 { 4800 u16 pcifunc = req->hdr.pcifunc; 4801 struct rvu_pfvf *pfvf; 4802 int nixlf, err; 4803 4804 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4805 if (err) 4806 return err; 4807 4808 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 4809 4810 npc_mcam_enable_flows(rvu, pcifunc); 4811 4812 pfvf = rvu_get_pfvf(rvu, pcifunc); 4813 set_bit(NIXLF_INITIALIZED, &pfvf->flags); 4814 4815 rvu_switch_update_rules(rvu, pcifunc); 4816 4817 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 4818 } 4819 4820 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 4821 struct msg_rsp *rsp) 4822 { 4823 u16 pcifunc = req->hdr.pcifunc; 4824 struct rvu_pfvf *pfvf; 4825 int nixlf, err; 4826 4827 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4828 if (err) 4829 return err; 4830 4831 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 4832 4833 pfvf = rvu_get_pfvf(rvu, pcifunc); 4834 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 4835 4836 return rvu_cgx_start_stop_io(rvu, pcifunc, false); 4837 } 4838 4839 #define RX_SA_BASE GENMASK_ULL(52, 7) 4840 4841 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 4842 { 4843 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 4844 struct hwctx_disable_req ctx_req; 4845 int pf = rvu_get_pf(pcifunc); 4846 struct mac_ops *mac_ops; 4847 u8 cgx_id, lmac_id; 4848 u64 sa_base; 4849 void *cgxd; 4850 int err; 4851 4852 ctx_req.hdr.pcifunc = pcifunc; 4853 4854 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 4855 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 4856 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 4857 nix_interface_deinit(rvu, pcifunc, nixlf); 4858 nix_rx_sync(rvu, blkaddr); 4859 nix_txschq_free(rvu, pcifunc); 4860 4861 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 4862 4863 rvu_cgx_start_stop_io(rvu, pcifunc, false); 4864 4865 if (pfvf->sq_ctx) { 4866 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 4867 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4868 if (err) 4869 dev_err(rvu->dev, "SQ ctx disable failed\n"); 4870 } 4871 4872 if (pfvf->rq_ctx) { 4873 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 4874 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4875 if (err) 4876 dev_err(rvu->dev, "RQ ctx disable failed\n"); 4877 } 4878 4879 if (pfvf->cq_ctx) { 4880 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 4881 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4882 if (err) 4883 dev_err(rvu->dev, "CQ ctx disable failed\n"); 4884 } 4885 4886 /* reset HW config done for Switch headers */ 4887 rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, 4888 (PKIND_TX | PKIND_RX), 0, 0, 0, 0); 4889 4890 /* Disabling CGX and NPC config done for PTP */ 4891 if (pfvf->hw_rx_tstamp_en) { 4892 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 4893 cgxd = rvu_cgx_pdata(cgx_id, rvu); 4894 mac_ops = get_mac_ops(cgxd); 4895 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); 4896 /* Undo NPC config done for PTP */ 4897 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) 4898 dev_err(rvu->dev, "NPC config for PTP failed\n"); 4899 pfvf->hw_rx_tstamp_en = false; 4900 } 4901 4902 /* reset priority flow control config */ 4903 rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0); 4904 4905 /* reset 802.3x flow control config */ 4906 rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0); 4907 4908 nix_ctx_free(rvu, pfvf); 4909 4910 nix_free_all_bandprof(rvu, pcifunc); 4911 4912 sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf)); 4913 if (FIELD_GET(RX_SA_BASE, sa_base)) { 4914 err = rvu_cpt_ctx_flush(rvu, pcifunc); 4915 if (err) 4916 dev_err(rvu->dev, 4917 "CPT ctx flush failed with error: %d\n", err); 4918 } 4919 } 4920 4921 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 4922 4923 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 4924 { 4925 struct rvu_hwinfo *hw = rvu->hw; 4926 struct rvu_block *block; 4927 int blkaddr, pf; 4928 int nixlf; 4929 u64 cfg; 4930 4931 pf = rvu_get_pf(pcifunc); 4932 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 4933 return 0; 4934 4935 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4936 if (blkaddr < 0) 4937 return NIX_AF_ERR_AF_LF_INVALID; 4938 4939 block = &hw->block[blkaddr]; 4940 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 4941 if (nixlf < 0) 4942 return NIX_AF_ERR_AF_LF_INVALID; 4943 4944 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 4945 4946 if (enable) 4947 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 4948 else 4949 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 4950 4951 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 4952 4953 return 0; 4954 } 4955 4956 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 4957 struct msg_rsp *rsp) 4958 { 4959 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 4960 } 4961 4962 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 4963 struct msg_rsp *rsp) 4964 { 4965 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 4966 } 4967 4968 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 4969 struct nix_lso_format_cfg *req, 4970 struct nix_lso_format_cfg_rsp *rsp) 4971 { 4972 u16 pcifunc = req->hdr.pcifunc; 4973 struct nix_hw *nix_hw; 4974 struct rvu_pfvf *pfvf; 4975 int blkaddr, idx, f; 4976 u64 reg; 4977 4978 pfvf = rvu_get_pfvf(rvu, pcifunc); 4979 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4980 if (!pfvf->nixlf || blkaddr < 0) 4981 return NIX_AF_ERR_AF_LF_INVALID; 4982 4983 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4984 if (!nix_hw) 4985 return NIX_AF_ERR_INVALID_NIXBLK; 4986 4987 /* Find existing matching LSO format, if any */ 4988 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 4989 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 4990 reg = rvu_read64(rvu, blkaddr, 4991 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 4992 if (req->fields[f] != (reg & req->field_mask)) 4993 break; 4994 } 4995 4996 if (f == NIX_LSO_FIELD_MAX) 4997 break; 4998 } 4999 5000 if (idx < nix_hw->lso.in_use) { 5001 /* Match found */ 5002 rsp->lso_format_idx = idx; 5003 return 0; 5004 } 5005 5006 if (nix_hw->lso.in_use == nix_hw->lso.total) 5007 return NIX_AF_ERR_LSO_CFG_FAIL; 5008 5009 rsp->lso_format_idx = nix_hw->lso.in_use++; 5010 5011 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 5012 rvu_write64(rvu, blkaddr, 5013 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 5014 req->fields[f]); 5015 5016 return 0; 5017 } 5018 5019 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) 5020 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) 5021 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) 5022 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) 5023 5024 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) 5025 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) 5026 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) 5027 5028 #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32) 5029 #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22) 5030 #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0) 5031 5032 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, 5033 int blkaddr) 5034 { 5035 u8 cpt_idx, cpt_blkaddr; 5036 u64 val; 5037 5038 cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; 5039 if (req->enable) { 5040 val = 0; 5041 /* Enable context prefetching */ 5042 if (!is_rvu_otx2(rvu)) 5043 val |= BIT_ULL(51); 5044 5045 /* Set OPCODE and EGRP */ 5046 val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); 5047 val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); 5048 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); 5049 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); 5050 5051 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); 5052 5053 /* Set CPT queue for inline IPSec */ 5054 val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); 5055 val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, 5056 req->inst_qsel.cpt_pf_func); 5057 5058 if (!is_rvu_otx2(rvu)) { 5059 cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : 5060 BLKADDR_CPT1; 5061 val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); 5062 } 5063 5064 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 5065 val); 5066 5067 /* Set CPT credit */ 5068 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 5069 if ((val & 0x3FFFFF) != 0x3FFFFF) 5070 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 5071 0x3FFFFF - val); 5072 5073 val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit); 5074 val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid); 5075 val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th); 5076 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val); 5077 } else { 5078 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); 5079 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 5080 0x0); 5081 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); 5082 if ((val & 0x3FFFFF) != 0x3FFFFF) 5083 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 5084 0x3FFFFF - val); 5085 } 5086 } 5087 5088 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, 5089 struct nix_inline_ipsec_cfg *req, 5090 struct msg_rsp *rsp) 5091 { 5092 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5093 return 0; 5094 5095 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); 5096 if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) 5097 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); 5098 5099 return 0; 5100 } 5101 5102 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, 5103 struct msg_req *req, 5104 struct nix_inline_ipsec_cfg *rsp) 5105 5106 { 5107 u64 val; 5108 5109 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5110 return 0; 5111 5112 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG); 5113 rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val); 5114 rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val); 5115 rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val); 5116 rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val); 5117 5118 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0)); 5119 rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val); 5120 rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val); 5121 rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val); 5122 5123 return 0; 5124 } 5125 5126 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, 5127 struct nix_inline_ipsec_lf_cfg *req, 5128 struct msg_rsp *rsp) 5129 { 5130 int lf, blkaddr, err; 5131 u64 val; 5132 5133 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) 5134 return 0; 5135 5136 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); 5137 if (err) 5138 return err; 5139 5140 if (req->enable) { 5141 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ 5142 val = (u64)req->ipsec_cfg0.tt << 44 | 5143 (u64)req->ipsec_cfg0.tag_const << 20 | 5144 (u64)req->ipsec_cfg0.sa_pow2_size << 16 | 5145 req->ipsec_cfg0.lenm1_max; 5146 5147 if (blkaddr == BLKADDR_NIX1) 5148 val |= BIT_ULL(46); 5149 5150 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); 5151 5152 /* Set SA_IDX_W and SA_IDX_MAX */ 5153 val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | 5154 req->ipsec_cfg1.sa_idx_max; 5155 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); 5156 5157 /* Set SA base address */ 5158 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5159 req->sa_base_addr); 5160 } else { 5161 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); 5162 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); 5163 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 5164 0x0); 5165 } 5166 5167 return 0; 5168 } 5169 5170 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 5171 { 5172 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 5173 5174 /* overwrite vf mac address with default_mac */ 5175 if (from_vf) 5176 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 5177 } 5178 5179 /* NIX ingress policers or bandwidth profiles APIs */ 5180 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) 5181 { 5182 struct npc_lt_def_cfg defs, *ltdefs; 5183 5184 ltdefs = &defs; 5185 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); 5186 5187 /* Extract PCP and DEI fields from outer VLAN from byte offset 5188 * 2 from the start of LB_PTR (ie TAG). 5189 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN 5190 * fields are considered when 'Tunnel enable' is set in profile. 5191 */ 5192 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, 5193 (2UL << 12) | (ltdefs->ovlan.lid << 8) | 5194 (ltdefs->ovlan.ltype_match << 4) | 5195 ltdefs->ovlan.ltype_mask); 5196 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, 5197 (2UL << 12) | (ltdefs->ivlan.lid << 8) | 5198 (ltdefs->ivlan.ltype_match << 4) | 5199 ltdefs->ivlan.ltype_mask); 5200 5201 /* DSCP field in outer and tunneled IPv4 packets */ 5202 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, 5203 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | 5204 (ltdefs->rx_oip4.ltype_match << 4) | 5205 ltdefs->rx_oip4.ltype_mask); 5206 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, 5207 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | 5208 (ltdefs->rx_iip4.ltype_match << 4) | 5209 ltdefs->rx_iip4.ltype_mask); 5210 5211 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ 5212 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, 5213 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | 5214 (ltdefs->rx_oip6.ltype_match << 4) | 5215 ltdefs->rx_oip6.ltype_mask); 5216 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, 5217 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | 5218 (ltdefs->rx_iip6.ltype_match << 4) | 5219 ltdefs->rx_iip6.ltype_mask); 5220 } 5221 5222 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, 5223 int layer, int prof_idx) 5224 { 5225 struct nix_cn10k_aq_enq_req aq_req; 5226 int rc; 5227 5228 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5229 5230 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); 5231 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5232 aq_req.op = NIX_AQ_INSTOP_INIT; 5233 5234 /* Context is all zeros, submit to AQ */ 5235 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5236 (struct nix_aq_enq_req *)&aq_req, NULL); 5237 if (rc) 5238 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", 5239 layer, prof_idx); 5240 return rc; 5241 } 5242 5243 static int nix_setup_ipolicers(struct rvu *rvu, 5244 struct nix_hw *nix_hw, int blkaddr) 5245 { 5246 struct rvu_hwinfo *hw = rvu->hw; 5247 struct nix_ipolicer *ipolicer; 5248 int err, layer, prof_idx; 5249 u64 cfg; 5250 5251 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 5252 if (!(cfg & BIT_ULL(61))) { 5253 hw->cap.ipolicer = false; 5254 return 0; 5255 } 5256 5257 hw->cap.ipolicer = true; 5258 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, 5259 sizeof(*ipolicer), GFP_KERNEL); 5260 if (!nix_hw->ipolicer) 5261 return -ENOMEM; 5262 5263 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); 5264 5265 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5266 ipolicer = &nix_hw->ipolicer[layer]; 5267 switch (layer) { 5268 case BAND_PROF_LEAF_LAYER: 5269 ipolicer->band_prof.max = cfg & 0XFFFF; 5270 break; 5271 case BAND_PROF_MID_LAYER: 5272 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; 5273 break; 5274 case BAND_PROF_TOP_LAYER: 5275 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; 5276 break; 5277 } 5278 5279 if (!ipolicer->band_prof.max) 5280 continue; 5281 5282 err = rvu_alloc_bitmap(&ipolicer->band_prof); 5283 if (err) 5284 return err; 5285 5286 ipolicer->pfvf_map = devm_kcalloc(rvu->dev, 5287 ipolicer->band_prof.max, 5288 sizeof(u16), GFP_KERNEL); 5289 if (!ipolicer->pfvf_map) 5290 return -ENOMEM; 5291 5292 ipolicer->match_id = devm_kcalloc(rvu->dev, 5293 ipolicer->band_prof.max, 5294 sizeof(u16), GFP_KERNEL); 5295 if (!ipolicer->match_id) 5296 return -ENOMEM; 5297 5298 for (prof_idx = 0; 5299 prof_idx < ipolicer->band_prof.max; prof_idx++) { 5300 /* Set AF as current owner for INIT ops to succeed */ 5301 ipolicer->pfvf_map[prof_idx] = 0x00; 5302 5303 /* There is no enable bit in the profile context, 5304 * so no context disable. So let's INIT them here 5305 * so that PF/VF later on have to just do WRITE to 5306 * setup policer rates and config. 5307 */ 5308 err = nix_init_policer_context(rvu, nix_hw, 5309 layer, prof_idx); 5310 if (err) 5311 return err; 5312 } 5313 5314 /* Allocate memory for maintaining ref_counts for MID level 5315 * profiles, this will be needed for leaf layer profiles' 5316 * aggregation. 5317 */ 5318 if (layer != BAND_PROF_MID_LAYER) 5319 continue; 5320 5321 ipolicer->ref_count = devm_kcalloc(rvu->dev, 5322 ipolicer->band_prof.max, 5323 sizeof(u16), GFP_KERNEL); 5324 if (!ipolicer->ref_count) 5325 return -ENOMEM; 5326 } 5327 5328 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ 5329 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); 5330 5331 nix_config_rx_pkt_policer_precolor(rvu, blkaddr); 5332 5333 return 0; 5334 } 5335 5336 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) 5337 { 5338 struct nix_ipolicer *ipolicer; 5339 int layer; 5340 5341 if (!rvu->hw->cap.ipolicer) 5342 return; 5343 5344 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5345 ipolicer = &nix_hw->ipolicer[layer]; 5346 5347 if (!ipolicer->band_prof.max) 5348 continue; 5349 5350 kfree(ipolicer->band_prof.bmap); 5351 } 5352 } 5353 5354 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 5355 struct nix_hw *nix_hw, u16 pcifunc) 5356 { 5357 struct nix_ipolicer *ipolicer; 5358 int layer, hi_layer, prof_idx; 5359 5360 /* Bits [15:14] in profile index represent layer */ 5361 layer = (req->qidx >> 14) & 0x03; 5362 prof_idx = req->qidx & 0x3FFF; 5363 5364 ipolicer = &nix_hw->ipolicer[layer]; 5365 if (prof_idx >= ipolicer->band_prof.max) 5366 return -EINVAL; 5367 5368 /* Check if the profile is allocated to the requesting PCIFUNC or not 5369 * with the exception of AF. AF is allowed to read and update contexts. 5370 */ 5371 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) 5372 return -EINVAL; 5373 5374 /* If this profile is linked to higher layer profile then check 5375 * if that profile is also allocated to the requesting PCIFUNC 5376 * or not. 5377 */ 5378 if (!req->prof.hl_en) 5379 return 0; 5380 5381 /* Leaf layer profile can link only to mid layer and 5382 * mid layer to top layer. 5383 */ 5384 if (layer == BAND_PROF_LEAF_LAYER) 5385 hi_layer = BAND_PROF_MID_LAYER; 5386 else if (layer == BAND_PROF_MID_LAYER) 5387 hi_layer = BAND_PROF_TOP_LAYER; 5388 else 5389 return -EINVAL; 5390 5391 ipolicer = &nix_hw->ipolicer[hi_layer]; 5392 prof_idx = req->prof.band_prof_id; 5393 if (prof_idx >= ipolicer->band_prof.max || 5394 ipolicer->pfvf_map[prof_idx] != pcifunc) 5395 return -EINVAL; 5396 5397 return 0; 5398 } 5399 5400 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, 5401 struct nix_bandprof_alloc_req *req, 5402 struct nix_bandprof_alloc_rsp *rsp) 5403 { 5404 int blkaddr, layer, prof, idx, err; 5405 u16 pcifunc = req->hdr.pcifunc; 5406 struct nix_ipolicer *ipolicer; 5407 struct nix_hw *nix_hw; 5408 5409 if (!rvu->hw->cap.ipolicer) 5410 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5411 5412 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5413 if (err) 5414 return err; 5415 5416 mutex_lock(&rvu->rsrc_lock); 5417 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5418 if (layer == BAND_PROF_INVAL_LAYER) 5419 continue; 5420 if (!req->prof_count[layer]) 5421 continue; 5422 5423 ipolicer = &nix_hw->ipolicer[layer]; 5424 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5425 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ 5426 if (idx == MAX_BANDPROF_PER_PFFUNC) 5427 break; 5428 5429 prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5430 if (prof < 0) 5431 break; 5432 rsp->prof_count[layer]++; 5433 rsp->prof_idx[layer][idx] = prof; 5434 ipolicer->pfvf_map[prof] = pcifunc; 5435 } 5436 } 5437 mutex_unlock(&rvu->rsrc_lock); 5438 return 0; 5439 } 5440 5441 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) 5442 { 5443 int blkaddr, layer, prof_idx, err; 5444 struct nix_ipolicer *ipolicer; 5445 struct nix_hw *nix_hw; 5446 5447 if (!rvu->hw->cap.ipolicer) 5448 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5449 5450 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5451 if (err) 5452 return err; 5453 5454 mutex_lock(&rvu->rsrc_lock); 5455 /* Free all the profiles allocated to the PCIFUNC */ 5456 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5457 if (layer == BAND_PROF_INVAL_LAYER) 5458 continue; 5459 ipolicer = &nix_hw->ipolicer[layer]; 5460 5461 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { 5462 if (ipolicer->pfvf_map[prof_idx] != pcifunc) 5463 continue; 5464 5465 /* Clear ratelimit aggregation, if any */ 5466 if (layer == BAND_PROF_LEAF_LAYER && 5467 ipolicer->match_id[prof_idx]) 5468 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5469 5470 ipolicer->pfvf_map[prof_idx] = 0x00; 5471 ipolicer->match_id[prof_idx] = 0; 5472 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5473 } 5474 } 5475 mutex_unlock(&rvu->rsrc_lock); 5476 return 0; 5477 } 5478 5479 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, 5480 struct nix_bandprof_free_req *req, 5481 struct msg_rsp *rsp) 5482 { 5483 int blkaddr, layer, prof_idx, idx, err; 5484 u16 pcifunc = req->hdr.pcifunc; 5485 struct nix_ipolicer *ipolicer; 5486 struct nix_hw *nix_hw; 5487 5488 if (req->free_all) 5489 return nix_free_all_bandprof(rvu, pcifunc); 5490 5491 if (!rvu->hw->cap.ipolicer) 5492 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5493 5494 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5495 if (err) 5496 return err; 5497 5498 mutex_lock(&rvu->rsrc_lock); 5499 /* Free the requested profile indices */ 5500 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5501 if (layer == BAND_PROF_INVAL_LAYER) 5502 continue; 5503 if (!req->prof_count[layer]) 5504 continue; 5505 5506 ipolicer = &nix_hw->ipolicer[layer]; 5507 for (idx = 0; idx < req->prof_count[layer]; idx++) { 5508 prof_idx = req->prof_idx[layer][idx]; 5509 if (prof_idx >= ipolicer->band_prof.max || 5510 ipolicer->pfvf_map[prof_idx] != pcifunc) 5511 continue; 5512 5513 /* Clear ratelimit aggregation, if any */ 5514 if (layer == BAND_PROF_LEAF_LAYER && 5515 ipolicer->match_id[prof_idx]) 5516 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 5517 5518 ipolicer->pfvf_map[prof_idx] = 0x00; 5519 ipolicer->match_id[prof_idx] = 0; 5520 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 5521 if (idx == MAX_BANDPROF_PER_PFFUNC) 5522 break; 5523 } 5524 } 5525 mutex_unlock(&rvu->rsrc_lock); 5526 return 0; 5527 } 5528 5529 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, 5530 struct nix_cn10k_aq_enq_req *aq_req, 5531 struct nix_cn10k_aq_enq_rsp *aq_rsp, 5532 u16 pcifunc, u8 ctype, u32 qidx) 5533 { 5534 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5535 aq_req->hdr.pcifunc = pcifunc; 5536 aq_req->ctype = ctype; 5537 aq_req->op = NIX_AQ_INSTOP_READ; 5538 aq_req->qidx = qidx; 5539 5540 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5541 (struct nix_aq_enq_req *)aq_req, 5542 (struct nix_aq_enq_rsp *)aq_rsp); 5543 } 5544 5545 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, 5546 struct nix_hw *nix_hw, 5547 struct nix_cn10k_aq_enq_req *aq_req, 5548 struct nix_cn10k_aq_enq_rsp *aq_rsp, 5549 u32 leaf_prof, u16 mid_prof) 5550 { 5551 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5552 aq_req->hdr.pcifunc = 0x00; 5553 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; 5554 aq_req->op = NIX_AQ_INSTOP_WRITE; 5555 aq_req->qidx = leaf_prof; 5556 5557 aq_req->prof.band_prof_id = mid_prof; 5558 aq_req->prof_mask.band_prof_id = GENMASK(6, 0); 5559 aq_req->prof.hl_en = 1; 5560 aq_req->prof_mask.hl_en = 1; 5561 5562 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5563 (struct nix_aq_enq_req *)aq_req, 5564 (struct nix_aq_enq_rsp *)aq_rsp); 5565 } 5566 5567 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, 5568 u16 rq_idx, u16 match_id) 5569 { 5570 int leaf_prof, mid_prof, leaf_match; 5571 struct nix_cn10k_aq_enq_req aq_req; 5572 struct nix_cn10k_aq_enq_rsp aq_rsp; 5573 struct nix_ipolicer *ipolicer; 5574 struct nix_hw *nix_hw; 5575 int blkaddr, idx, rc; 5576 5577 if (!rvu->hw->cap.ipolicer) 5578 return 0; 5579 5580 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 5581 if (rc) 5582 return rc; 5583 5584 /* Fetch the RQ's context to see if policing is enabled */ 5585 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, 5586 NIX_AQ_CTYPE_RQ, rq_idx); 5587 if (rc) { 5588 dev_err(rvu->dev, 5589 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", 5590 __func__, rq_idx, pcifunc); 5591 return rc; 5592 } 5593 5594 if (!aq_rsp.rq.policer_ena) 5595 return 0; 5596 5597 /* Get the bandwidth profile ID mapped to this RQ */ 5598 leaf_prof = aq_rsp.rq.band_prof_id; 5599 5600 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; 5601 ipolicer->match_id[leaf_prof] = match_id; 5602 5603 /* Check if any other leaf profile is marked with same match_id */ 5604 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 5605 if (idx == leaf_prof) 5606 continue; 5607 if (ipolicer->match_id[idx] != match_id) 5608 continue; 5609 5610 leaf_match = idx; 5611 break; 5612 } 5613 5614 if (idx == ipolicer->band_prof.max) 5615 return 0; 5616 5617 /* Fetch the matching profile's context to check if it's already 5618 * mapped to a mid level profile. 5619 */ 5620 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5621 NIX_AQ_CTYPE_BANDPROF, leaf_match); 5622 if (rc) { 5623 dev_err(rvu->dev, 5624 "%s: Failed to fetch context of leaf profile %d\n", 5625 __func__, leaf_match); 5626 return rc; 5627 } 5628 5629 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5630 if (aq_rsp.prof.hl_en) { 5631 /* Get Mid layer prof index and map leaf_prof index 5632 * also such that flows that are being steered 5633 * to different RQs and marked with same match_id 5634 * are rate limited in a aggregate fashion 5635 */ 5636 mid_prof = aq_rsp.prof.band_prof_id; 5637 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5638 &aq_req, &aq_rsp, 5639 leaf_prof, mid_prof); 5640 if (rc) { 5641 dev_err(rvu->dev, 5642 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5643 __func__, leaf_prof, mid_prof); 5644 goto exit; 5645 } 5646 5647 mutex_lock(&rvu->rsrc_lock); 5648 ipolicer->ref_count[mid_prof]++; 5649 mutex_unlock(&rvu->rsrc_lock); 5650 goto exit; 5651 } 5652 5653 /* Allocate a mid layer profile and 5654 * map both 'leaf_prof' and 'leaf_match' profiles to it. 5655 */ 5656 mutex_lock(&rvu->rsrc_lock); 5657 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5658 if (mid_prof < 0) { 5659 dev_err(rvu->dev, 5660 "%s: Unable to allocate mid layer profile\n", __func__); 5661 mutex_unlock(&rvu->rsrc_lock); 5662 goto exit; 5663 } 5664 mutex_unlock(&rvu->rsrc_lock); 5665 ipolicer->pfvf_map[mid_prof] = 0x00; 5666 ipolicer->ref_count[mid_prof] = 0; 5667 5668 /* Initialize mid layer profile same as 'leaf_prof' */ 5669 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5670 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 5671 if (rc) { 5672 dev_err(rvu->dev, 5673 "%s: Failed to fetch context of leaf profile %d\n", 5674 __func__, leaf_prof); 5675 goto exit; 5676 } 5677 5678 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5679 aq_req.hdr.pcifunc = 0x00; 5680 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); 5681 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5682 aq_req.op = NIX_AQ_INSTOP_WRITE; 5683 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); 5684 memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s)); 5685 /* Clear higher layer enable bit in the mid profile, just in case */ 5686 aq_req.prof.hl_en = 0; 5687 aq_req.prof_mask.hl_en = 1; 5688 5689 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5690 (struct nix_aq_enq_req *)&aq_req, NULL); 5691 if (rc) { 5692 dev_err(rvu->dev, 5693 "%s: Failed to INIT context of mid layer profile %d\n", 5694 __func__, mid_prof); 5695 goto exit; 5696 } 5697 5698 /* Map both leaf profiles to this mid layer profile */ 5699 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5700 &aq_req, &aq_rsp, 5701 leaf_prof, mid_prof); 5702 if (rc) { 5703 dev_err(rvu->dev, 5704 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5705 __func__, leaf_prof, mid_prof); 5706 goto exit; 5707 } 5708 5709 mutex_lock(&rvu->rsrc_lock); 5710 ipolicer->ref_count[mid_prof]++; 5711 mutex_unlock(&rvu->rsrc_lock); 5712 5713 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5714 &aq_req, &aq_rsp, 5715 leaf_match, mid_prof); 5716 if (rc) { 5717 dev_err(rvu->dev, 5718 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5719 __func__, leaf_match, mid_prof); 5720 ipolicer->ref_count[mid_prof]--; 5721 goto exit; 5722 } 5723 5724 mutex_lock(&rvu->rsrc_lock); 5725 ipolicer->ref_count[mid_prof]++; 5726 mutex_unlock(&rvu->rsrc_lock); 5727 5728 exit: 5729 return rc; 5730 } 5731 5732 /* Called with mutex rsrc_lock */ 5733 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 5734 u32 leaf_prof) 5735 { 5736 struct nix_cn10k_aq_enq_req aq_req; 5737 struct nix_cn10k_aq_enq_rsp aq_rsp; 5738 struct nix_ipolicer *ipolicer; 5739 u16 mid_prof; 5740 int rc; 5741 5742 mutex_unlock(&rvu->rsrc_lock); 5743 5744 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5745 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 5746 5747 mutex_lock(&rvu->rsrc_lock); 5748 if (rc) { 5749 dev_err(rvu->dev, 5750 "%s: Failed to fetch context of leaf profile %d\n", 5751 __func__, leaf_prof); 5752 return; 5753 } 5754 5755 if (!aq_rsp.prof.hl_en) 5756 return; 5757 5758 mid_prof = aq_rsp.prof.band_prof_id; 5759 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5760 ipolicer->ref_count[mid_prof]--; 5761 /* If ref_count is zero, free mid layer profile */ 5762 if (!ipolicer->ref_count[mid_prof]) { 5763 ipolicer->pfvf_map[mid_prof] = 0x00; 5764 rvu_free_rsrc(&ipolicer->band_prof, mid_prof); 5765 } 5766 } 5767 5768 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, 5769 struct nix_bandprof_get_hwinfo_rsp *rsp) 5770 { 5771 struct nix_ipolicer *ipolicer; 5772 int blkaddr, layer, err; 5773 struct nix_hw *nix_hw; 5774 u64 tu; 5775 5776 if (!rvu->hw->cap.ipolicer) 5777 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5778 5779 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 5780 if (err) 5781 return err; 5782 5783 /* Return number of bandwidth profiles free at each layer */ 5784 mutex_lock(&rvu->rsrc_lock); 5785 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5786 if (layer == BAND_PROF_INVAL_LAYER) 5787 continue; 5788 5789 ipolicer = &nix_hw->ipolicer[layer]; 5790 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); 5791 } 5792 mutex_unlock(&rvu->rsrc_lock); 5793 5794 /* Set the policer timeunit in nanosec */ 5795 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); 5796 rsp->policer_timeunit = (tu + 1) * 100; 5797 5798 return 0; 5799 } 5800