1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 14 #include "rvu_struct.h" 15 #include "rvu_reg.h" 16 #include "rvu.h" 17 #include "npc.h" 18 #include "cgx.h" 19 #include "lmac_common.h" 20 21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 23 int type, int chan_id); 24 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 25 int type, bool add); 26 static int nix_setup_ipolicers(struct rvu *rvu, 27 struct nix_hw *nix_hw, int blkaddr); 28 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); 29 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 30 struct nix_hw *nix_hw, u16 pcifunc); 31 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); 32 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 33 u32 leaf_prof); 34 35 enum mc_tbl_sz { 36 MC_TBL_SZ_256, 37 MC_TBL_SZ_512, 38 MC_TBL_SZ_1K, 39 MC_TBL_SZ_2K, 40 MC_TBL_SZ_4K, 41 MC_TBL_SZ_8K, 42 MC_TBL_SZ_16K, 43 MC_TBL_SZ_32K, 44 MC_TBL_SZ_64K, 45 }; 46 47 enum mc_buf_cnt { 48 MC_BUF_CNT_8, 49 MC_BUF_CNT_16, 50 MC_BUF_CNT_32, 51 MC_BUF_CNT_64, 52 MC_BUF_CNT_128, 53 MC_BUF_CNT_256, 54 MC_BUF_CNT_512, 55 MC_BUF_CNT_1024, 56 MC_BUF_CNT_2048, 57 }; 58 59 enum nix_makr_fmt_indexes { 60 NIX_MARK_CFG_IP_DSCP_RED, 61 NIX_MARK_CFG_IP_DSCP_YELLOW, 62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 63 NIX_MARK_CFG_IP_ECN_RED, 64 NIX_MARK_CFG_IP_ECN_YELLOW, 65 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 66 NIX_MARK_CFG_VLAN_DEI_RED, 67 NIX_MARK_CFG_VLAN_DEI_YELLOW, 68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 69 NIX_MARK_CFG_MAX, 70 }; 71 72 /* For now considering MC resources needed for broadcast 73 * pkt replication only. i.e 256 HWVFs + 12 PFs. 74 */ 75 #define MC_TBL_SIZE MC_TBL_SZ_512 76 #define MC_BUF_CNT MC_BUF_CNT_128 77 78 struct mce { 79 struct hlist_node node; 80 u16 pcifunc; 81 }; 82 83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 84 { 85 int i = 0; 86 87 /*If blkaddr is 0, return the first nix block address*/ 88 if (blkaddr == 0) 89 return rvu->nix_blkaddr[blkaddr]; 90 91 while (i + 1 < MAX_NIX_BLKS) { 92 if (rvu->nix_blkaddr[i] == blkaddr) 93 return rvu->nix_blkaddr[i + 1]; 94 i++; 95 } 96 97 return 0; 98 } 99 100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 101 { 102 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 103 int blkaddr; 104 105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 106 if (!pfvf->nixlf || blkaddr < 0) 107 return false; 108 return true; 109 } 110 111 int rvu_get_nixlf_count(struct rvu *rvu) 112 { 113 int blkaddr = 0, max = 0; 114 struct rvu_block *block; 115 116 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 117 while (blkaddr) { 118 block = &rvu->hw->block[blkaddr]; 119 max += block->lf.max; 120 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 121 } 122 return max; 123 } 124 125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 126 { 127 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 128 struct rvu_hwinfo *hw = rvu->hw; 129 int blkaddr; 130 131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 132 if (!pfvf->nixlf || blkaddr < 0) 133 return NIX_AF_ERR_AF_LF_INVALID; 134 135 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 136 if (*nixlf < 0) 137 return NIX_AF_ERR_AF_LF_INVALID; 138 139 if (nix_blkaddr) 140 *nix_blkaddr = blkaddr; 141 142 return 0; 143 } 144 145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, 146 struct nix_hw **nix_hw, int *blkaddr) 147 { 148 struct rvu_pfvf *pfvf; 149 150 pfvf = rvu_get_pfvf(rvu, pcifunc); 151 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 152 if (!pfvf->nixlf || *blkaddr < 0) 153 return NIX_AF_ERR_AF_LF_INVALID; 154 155 *nix_hw = get_nix_hw(rvu->hw, *blkaddr); 156 if (!*nix_hw) 157 return NIX_AF_ERR_INVALID_NIXBLK; 158 return 0; 159 } 160 161 static void nix_mce_list_init(struct nix_mce_list *list, int max) 162 { 163 INIT_HLIST_HEAD(&list->head); 164 list->count = 0; 165 list->max = max; 166 } 167 168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) 169 { 170 int idx; 171 172 if (!mcast) 173 return 0; 174 175 idx = mcast->next_free_mce; 176 mcast->next_free_mce += count; 177 return idx; 178 } 179 180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 181 { 182 int nix_blkaddr = 0, i = 0; 183 struct rvu *rvu = hw->rvu; 184 185 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 186 while (nix_blkaddr) { 187 if (blkaddr == nix_blkaddr && hw->nix) 188 return &hw->nix[i]; 189 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 190 i++; 191 } 192 return NULL; 193 } 194 195 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) 196 { 197 dwrr_mtu &= 0x1FULL; 198 199 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 200 * Value of 4 is reserved for MTU value of 9728 bytes. 201 * Value of 5 is reserved for MTU value of 10240 bytes. 202 */ 203 switch (dwrr_mtu) { 204 case 4: 205 return 9728; 206 case 5: 207 return 10240; 208 default: 209 return BIT_ULL(dwrr_mtu); 210 } 211 212 return 0; 213 } 214 215 u32 convert_bytes_to_dwrr_mtu(u32 bytes) 216 { 217 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. 218 * Value of 4 is reserved for MTU value of 9728 bytes. 219 * Value of 5 is reserved for MTU value of 10240 bytes. 220 */ 221 if (bytes > BIT_ULL(16)) 222 return 0; 223 224 switch (bytes) { 225 case 9728: 226 return 4; 227 case 10240: 228 return 5; 229 default: 230 return ilog2(bytes); 231 } 232 233 return 0; 234 } 235 236 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 237 { 238 int err; 239 240 /* Sync all in flight RX packets to LLC/DRAM */ 241 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 242 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 243 if (err) 244 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); 245 246 /* SW_SYNC ensures all existing transactions are finished and pkts 247 * are written to LLC/DRAM, queues should be teared down after 248 * successful SW_SYNC. Due to a HW errata, in some rare scenarios 249 * an existing transaction might end after SW_SYNC operation. To 250 * ensure operation is fully done, do the SW_SYNC twice. 251 */ 252 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 253 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 254 if (err) 255 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); 256 } 257 258 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 259 int lvl, u16 pcifunc, u16 schq) 260 { 261 struct rvu_hwinfo *hw = rvu->hw; 262 struct nix_txsch *txsch; 263 struct nix_hw *nix_hw; 264 u16 map_func; 265 266 nix_hw = get_nix_hw(rvu->hw, blkaddr); 267 if (!nix_hw) 268 return false; 269 270 txsch = &nix_hw->txsch[lvl]; 271 /* Check out of bounds */ 272 if (schq >= txsch->schq.max) 273 return false; 274 275 mutex_lock(&rvu->rsrc_lock); 276 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 277 mutex_unlock(&rvu->rsrc_lock); 278 279 /* TLs aggegating traffic are shared across PF and VFs */ 280 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 281 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 282 return false; 283 else 284 return true; 285 } 286 287 if (map_func != pcifunc) 288 return false; 289 290 return true; 291 } 292 293 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, 294 struct nix_lf_alloc_rsp *rsp, bool loop) 295 { 296 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); 297 u16 req_chan_base, req_chan_end, req_chan_cnt; 298 struct rvu_hwinfo *hw = rvu->hw; 299 struct sdp_node_info *sdp_info; 300 int pkind, pf, vf, lbkid, vfid; 301 struct mac_ops *mac_ops; 302 u8 cgx_id, lmac_id; 303 bool from_vf; 304 int err; 305 306 pf = rvu_get_pf(pcifunc); 307 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 308 type != NIX_INTF_TYPE_SDP) 309 return 0; 310 311 switch (type) { 312 case NIX_INTF_TYPE_CGX: 313 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 314 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 315 316 pkind = rvu_npc_get_pkind(rvu, pf); 317 if (pkind < 0) { 318 dev_err(rvu->dev, 319 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 320 return -EINVAL; 321 } 322 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 323 pfvf->tx_chan_base = pfvf->rx_chan_base; 324 pfvf->rx_chan_cnt = 1; 325 pfvf->tx_chan_cnt = 1; 326 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; 327 328 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 329 rvu_npc_set_pkind(rvu, pkind, pfvf); 330 331 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); 332 333 /* By default we enable pause frames */ 334 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) 335 mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id, 336 rvu), 337 lmac_id, true, true); 338 break; 339 case NIX_INTF_TYPE_LBK: 340 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 341 342 /* If NIX1 block is present on the silicon then NIXes are 343 * assigned alternatively for lbk interfaces. NIX0 should 344 * send packets on lbk link 1 channels and NIX1 should send 345 * on lbk link 0 channels for the communication between 346 * NIX0 and NIX1. 347 */ 348 lbkid = 0; 349 if (rvu->hw->lbk_links > 1) 350 lbkid = vf & 0x1 ? 0 : 1; 351 352 /* By default NIX0 is configured to send packet on lbk link 1 353 * (which corresponds to LBK1), same packet will receive on 354 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 355 * (which corresponds to LBK2) packet will receive on NIX0 lbk 356 * link 1. 357 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 358 * transmits and receives on lbk link 0, whick corresponds 359 * to LBK1 block, back to back connectivity between NIX and 360 * LBK can be achieved (which is similar to 96xx) 361 * 362 * RX TX 363 * NIX0 lbk link 1 (LBK2) 1 (LBK1) 364 * NIX0 lbk link 0 (LBK0) 0 (LBK0) 365 * NIX1 lbk link 0 (LBK1) 0 (LBK2) 366 * NIX1 lbk link 1 (LBK3) 1 (LBK3) 367 */ 368 if (loop) 369 lbkid = !lbkid; 370 371 /* Note that AF's VFs work in pairs and talk over consecutive 372 * loopback channels.Therefore if odd number of AF VFs are 373 * enabled then the last VF remains with no pair. 374 */ 375 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 376 pfvf->tx_chan_base = vf & 0x1 ? 377 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 378 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 379 pfvf->rx_chan_cnt = 1; 380 pfvf->tx_chan_cnt = 1; 381 rsp->tx_link = hw->cgx_links + lbkid; 382 pfvf->lbkid = lbkid; 383 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); 384 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 385 pfvf->rx_chan_base, 386 pfvf->rx_chan_cnt); 387 388 break; 389 case NIX_INTF_TYPE_SDP: 390 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 391 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; 392 sdp_info = parent_pf->sdp_info; 393 if (!sdp_info) { 394 dev_err(rvu->dev, "Invalid sdp_info pointer\n"); 395 return -EINVAL; 396 } 397 if (from_vf) { 398 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + 399 sdp_info->num_pf_rings; 400 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 401 for (vfid = 0; vfid < vf; vfid++) 402 req_chan_base += sdp_info->vf_rings[vfid]; 403 req_chan_cnt = sdp_info->vf_rings[vf]; 404 req_chan_end = req_chan_base + req_chan_cnt - 1; 405 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || 406 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { 407 dev_err(rvu->dev, 408 "PF_Func 0x%x: Invalid channel base and count\n", 409 pcifunc); 410 return -EINVAL; 411 } 412 } else { 413 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; 414 req_chan_cnt = sdp_info->num_pf_rings; 415 } 416 417 pfvf->rx_chan_base = req_chan_base; 418 pfvf->rx_chan_cnt = req_chan_cnt; 419 pfvf->tx_chan_base = pfvf->rx_chan_base; 420 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; 421 422 rsp->tx_link = hw->cgx_links + hw->lbk_links; 423 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 424 pfvf->rx_chan_base, 425 pfvf->rx_chan_cnt); 426 break; 427 } 428 429 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 430 * RVU PF/VF's MAC address. 431 */ 432 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 433 pfvf->rx_chan_base, pfvf->mac_addr); 434 435 /* Add this PF_FUNC to bcast pkt replication list */ 436 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); 437 if (err) { 438 dev_err(rvu->dev, 439 "Bcast list, failed to enable PF_FUNC 0x%x\n", 440 pcifunc); 441 return err; 442 } 443 /* Install MCAM rule matching Ethernet broadcast mac address */ 444 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 445 nixlf, pfvf->rx_chan_base); 446 447 pfvf->maxlen = NIC_HW_MIN_FRS; 448 pfvf->minlen = NIC_HW_MIN_FRS; 449 450 return 0; 451 } 452 453 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 454 { 455 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 456 int err; 457 458 pfvf->maxlen = 0; 459 pfvf->minlen = 0; 460 461 /* Remove this PF_FUNC from bcast pkt replication list */ 462 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); 463 if (err) { 464 dev_err(rvu->dev, 465 "Bcast list, failed to disable PF_FUNC 0x%x\n", 466 pcifunc); 467 } 468 469 /* Free and disable any MCAM entries used by this NIX LF */ 470 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 471 472 /* Disable DMAC filters used */ 473 rvu_cgx_disable_dmac_entries(rvu, pcifunc); 474 } 475 476 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 477 struct nix_bp_cfg_req *req, 478 struct msg_rsp *rsp) 479 { 480 u16 pcifunc = req->hdr.pcifunc; 481 struct rvu_pfvf *pfvf; 482 int blkaddr, pf, type; 483 u16 chan_base, chan; 484 u64 cfg; 485 486 pf = rvu_get_pf(pcifunc); 487 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 488 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 489 return 0; 490 491 pfvf = rvu_get_pfvf(rvu, pcifunc); 492 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 493 494 chan_base = pfvf->rx_chan_base + req->chan_base; 495 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 496 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 497 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 498 cfg & ~BIT_ULL(16)); 499 } 500 return 0; 501 } 502 503 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 504 int type, int chan_id) 505 { 506 int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; 507 u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt; 508 struct rvu_hwinfo *hw = rvu->hw; 509 struct rvu_pfvf *pfvf; 510 u8 cgx_id, lmac_id; 511 u64 cfg; 512 513 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 514 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 515 lmac_chan_cnt = cfg & 0xFF; 516 517 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 518 sdp_chan_cnt = cfg & 0xFFF; 519 520 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; 521 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); 522 sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; 523 524 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 525 526 /* Backpressure IDs range division 527 * CGX channles are mapped to (0 - 191) BPIDs 528 * LBK channles are mapped to (192 - 255) BPIDs 529 * SDP channles are mapped to (256 - 511) BPIDs 530 * 531 * Lmac channles and bpids mapped as follows 532 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 533 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 534 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 535 */ 536 switch (type) { 537 case NIX_INTF_TYPE_CGX: 538 if ((req->chan_base + req->chan_cnt) > 15) 539 return -EINVAL; 540 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 541 /* Assign bpid based on cgx, lmac and chan id */ 542 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + 543 (lmac_id * lmac_chan_cnt) + req->chan_base; 544 545 if (req->bpid_per_chan) 546 bpid += chan_id; 547 if (bpid > cgx_bpid_cnt) 548 return -EINVAL; 549 break; 550 551 case NIX_INTF_TYPE_LBK: 552 if ((req->chan_base + req->chan_cnt) > 63) 553 return -EINVAL; 554 bpid = cgx_bpid_cnt + req->chan_base; 555 if (req->bpid_per_chan) 556 bpid += chan_id; 557 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) 558 return -EINVAL; 559 break; 560 case NIX_INTF_TYPE_SDP: 561 if ((req->chan_base + req->chan_cnt) > 255) 562 return -EINVAL; 563 564 bpid = sdp_bpid_cnt + req->chan_base; 565 if (req->bpid_per_chan) 566 bpid += chan_id; 567 568 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) 569 return -EINVAL; 570 break; 571 default: 572 return -EINVAL; 573 } 574 return bpid; 575 } 576 577 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 578 struct nix_bp_cfg_req *req, 579 struct nix_bp_cfg_rsp *rsp) 580 { 581 int blkaddr, pf, type, chan_id = 0; 582 u16 pcifunc = req->hdr.pcifunc; 583 struct rvu_pfvf *pfvf; 584 u16 chan_base, chan; 585 s16 bpid, bpid_base; 586 u64 cfg; 587 588 pf = rvu_get_pf(pcifunc); 589 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 590 if (is_sdp_pfvf(pcifunc)) 591 type = NIX_INTF_TYPE_SDP; 592 593 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ 594 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && 595 type != NIX_INTF_TYPE_SDP) 596 return 0; 597 598 pfvf = rvu_get_pfvf(rvu, pcifunc); 599 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 600 601 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 602 chan_base = pfvf->rx_chan_base + req->chan_base; 603 bpid = bpid_base; 604 605 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 606 if (bpid < 0) { 607 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 608 return -EINVAL; 609 } 610 611 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 612 cfg &= ~GENMASK_ULL(8, 0); 613 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 614 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); 615 chan_id++; 616 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 617 } 618 619 for (chan = 0; chan < req->chan_cnt; chan++) { 620 /* Map channel and bpid assign to it */ 621 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 622 (bpid_base & 0x3FF); 623 if (req->bpid_per_chan) 624 bpid_base++; 625 } 626 rsp->chan_cnt = req->chan_cnt; 627 628 return 0; 629 } 630 631 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 632 u64 format, bool v4, u64 *fidx) 633 { 634 struct nix_lso_format field = {0}; 635 636 /* IP's Length field */ 637 field.layer = NIX_TXLAYER_OL3; 638 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 639 field.offset = v4 ? 2 : 4; 640 field.sizem1 = 1; /* i.e 2 bytes */ 641 field.alg = NIX_LSOALG_ADD_PAYLEN; 642 rvu_write64(rvu, blkaddr, 643 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 644 *(u64 *)&field); 645 646 /* No ID field in IPv6 header */ 647 if (!v4) 648 return; 649 650 /* IP's ID field */ 651 field.layer = NIX_TXLAYER_OL3; 652 field.offset = 4; 653 field.sizem1 = 1; /* i.e 2 bytes */ 654 field.alg = NIX_LSOALG_ADD_SEGNUM; 655 rvu_write64(rvu, blkaddr, 656 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 657 *(u64 *)&field); 658 } 659 660 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 661 u64 format, u64 *fidx) 662 { 663 struct nix_lso_format field = {0}; 664 665 /* TCP's sequence number field */ 666 field.layer = NIX_TXLAYER_OL4; 667 field.offset = 4; 668 field.sizem1 = 3; /* i.e 4 bytes */ 669 field.alg = NIX_LSOALG_ADD_OFFSET; 670 rvu_write64(rvu, blkaddr, 671 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 672 *(u64 *)&field); 673 674 /* TCP's flags field */ 675 field.layer = NIX_TXLAYER_OL4; 676 field.offset = 12; 677 field.sizem1 = 1; /* 2 bytes */ 678 field.alg = NIX_LSOALG_TCP_FLAGS; 679 rvu_write64(rvu, blkaddr, 680 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 681 *(u64 *)&field); 682 } 683 684 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 685 { 686 u64 cfg, idx, fidx = 0; 687 688 /* Get max HW supported format indices */ 689 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 690 nix_hw->lso.total = cfg; 691 692 /* Enable LSO */ 693 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 694 /* For TSO, set first and middle segment flags to 695 * mask out PSH, RST & FIN flags in TCP packet 696 */ 697 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 698 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 699 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 700 701 /* Setup default static LSO formats 702 * 703 * Configure format fields for TCPv4 segmentation offload 704 */ 705 idx = NIX_LSO_FORMAT_IDX_TSOV4; 706 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 707 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 708 709 /* Set rest of the fields to NOP */ 710 for (; fidx < 8; fidx++) { 711 rvu_write64(rvu, blkaddr, 712 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 713 } 714 nix_hw->lso.in_use++; 715 716 /* Configure format fields for TCPv6 segmentation offload */ 717 idx = NIX_LSO_FORMAT_IDX_TSOV6; 718 fidx = 0; 719 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 720 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 721 722 /* Set rest of the fields to NOP */ 723 for (; fidx < 8; fidx++) { 724 rvu_write64(rvu, blkaddr, 725 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 726 } 727 nix_hw->lso.in_use++; 728 } 729 730 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 731 { 732 kfree(pfvf->rq_bmap); 733 kfree(pfvf->sq_bmap); 734 kfree(pfvf->cq_bmap); 735 if (pfvf->rq_ctx) 736 qmem_free(rvu->dev, pfvf->rq_ctx); 737 if (pfvf->sq_ctx) 738 qmem_free(rvu->dev, pfvf->sq_ctx); 739 if (pfvf->cq_ctx) 740 qmem_free(rvu->dev, pfvf->cq_ctx); 741 if (pfvf->rss_ctx) 742 qmem_free(rvu->dev, pfvf->rss_ctx); 743 if (pfvf->nix_qints_ctx) 744 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 745 if (pfvf->cq_ints_ctx) 746 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 747 748 pfvf->rq_bmap = NULL; 749 pfvf->cq_bmap = NULL; 750 pfvf->sq_bmap = NULL; 751 pfvf->rq_ctx = NULL; 752 pfvf->sq_ctx = NULL; 753 pfvf->cq_ctx = NULL; 754 pfvf->rss_ctx = NULL; 755 pfvf->nix_qints_ctx = NULL; 756 pfvf->cq_ints_ctx = NULL; 757 } 758 759 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 760 struct rvu_pfvf *pfvf, int nixlf, 761 int rss_sz, int rss_grps, int hwctx_size, 762 u64 way_mask, bool tag_lsb_as_adder) 763 { 764 int err, grp, num_indices; 765 u64 val; 766 767 /* RSS is not requested for this NIXLF */ 768 if (!rss_sz) 769 return 0; 770 num_indices = rss_sz * rss_grps; 771 772 /* Alloc NIX RSS HW context memory and config the base */ 773 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 774 if (err) 775 return err; 776 777 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 778 (u64)pfvf->rss_ctx->iova); 779 780 /* Config full RSS table size, enable RSS and caching */ 781 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | 782 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); 783 784 if (tag_lsb_as_adder) 785 val |= BIT_ULL(5); 786 787 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); 788 /* Config RSS group offset and sizes */ 789 for (grp = 0; grp < rss_grps; grp++) 790 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 791 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 792 return 0; 793 } 794 795 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 796 struct nix_aq_inst_s *inst) 797 { 798 struct admin_queue *aq = block->aq; 799 struct nix_aq_res_s *result; 800 int timeout = 1000; 801 u64 reg, head; 802 803 result = (struct nix_aq_res_s *)aq->res->base; 804 805 /* Get current head pointer where to append this instruction */ 806 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 807 head = (reg >> 4) & AQ_PTR_MASK; 808 809 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 810 (void *)inst, aq->inst->entry_sz); 811 memset(result, 0, sizeof(*result)); 812 /* sync into memory */ 813 wmb(); 814 815 /* Ring the doorbell and wait for result */ 816 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 817 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 818 cpu_relax(); 819 udelay(1); 820 timeout--; 821 if (!timeout) 822 return -EBUSY; 823 } 824 825 if (result->compcode != NIX_AQ_COMP_GOOD) 826 /* TODO: Replace this with some error code */ 827 return -EBUSY; 828 829 return 0; 830 } 831 832 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 833 struct nix_aq_enq_req *req, 834 struct nix_aq_enq_rsp *rsp) 835 { 836 struct rvu_hwinfo *hw = rvu->hw; 837 u16 pcifunc = req->hdr.pcifunc; 838 int nixlf, blkaddr, rc = 0; 839 struct nix_aq_inst_s inst; 840 struct rvu_block *block; 841 struct admin_queue *aq; 842 struct rvu_pfvf *pfvf; 843 void *ctx, *mask; 844 bool ena; 845 u64 cfg; 846 847 blkaddr = nix_hw->blkaddr; 848 block = &hw->block[blkaddr]; 849 aq = block->aq; 850 if (!aq) { 851 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 852 return NIX_AF_ERR_AQ_ENQUEUE; 853 } 854 855 pfvf = rvu_get_pfvf(rvu, pcifunc); 856 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 857 858 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile 859 * operations done by AF itself. 860 */ 861 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || 862 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { 863 if (!pfvf->nixlf || nixlf < 0) 864 return NIX_AF_ERR_AF_LF_INVALID; 865 } 866 867 switch (req->ctype) { 868 case NIX_AQ_CTYPE_RQ: 869 /* Check if index exceeds max no of queues */ 870 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 871 rc = NIX_AF_ERR_AQ_ENQUEUE; 872 break; 873 case NIX_AQ_CTYPE_SQ: 874 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 875 rc = NIX_AF_ERR_AQ_ENQUEUE; 876 break; 877 case NIX_AQ_CTYPE_CQ: 878 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 879 rc = NIX_AF_ERR_AQ_ENQUEUE; 880 break; 881 case NIX_AQ_CTYPE_RSS: 882 /* Check if RSS is enabled and qidx is within range */ 883 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 884 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 885 (req->qidx >= (256UL << (cfg & 0xF)))) 886 rc = NIX_AF_ERR_AQ_ENQUEUE; 887 break; 888 case NIX_AQ_CTYPE_MCE: 889 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 890 891 /* Check if index exceeds MCE list length */ 892 if (!nix_hw->mcast.mce_ctx || 893 (req->qidx >= (256UL << (cfg & 0xF)))) 894 rc = NIX_AF_ERR_AQ_ENQUEUE; 895 896 /* Adding multicast lists for requests from PF/VFs is not 897 * yet supported, so ignore this. 898 */ 899 if (rsp) 900 rc = NIX_AF_ERR_AQ_ENQUEUE; 901 break; 902 case NIX_AQ_CTYPE_BANDPROF: 903 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, 904 nix_hw, pcifunc)) 905 rc = NIX_AF_ERR_INVALID_BANDPROF; 906 break; 907 default: 908 rc = NIX_AF_ERR_AQ_ENQUEUE; 909 } 910 911 if (rc) 912 return rc; 913 914 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 915 if (req->ctype == NIX_AQ_CTYPE_SQ && 916 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 917 (req->op == NIX_AQ_INSTOP_WRITE && 918 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { 919 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 920 pcifunc, req->sq.smq)) 921 return NIX_AF_ERR_AQ_ENQUEUE; 922 } 923 924 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 925 inst.lf = nixlf; 926 inst.cindex = req->qidx; 927 inst.ctype = req->ctype; 928 inst.op = req->op; 929 /* Currently we are not supporting enqueuing multiple instructions, 930 * so always choose first entry in result memory. 931 */ 932 inst.res_addr = (u64)aq->res->iova; 933 934 /* Hardware uses same aq->res->base for updating result of 935 * previous instruction hence wait here till it is done. 936 */ 937 spin_lock(&aq->lock); 938 939 /* Clean result + context memory */ 940 memset(aq->res->base, 0, aq->res->entry_sz); 941 /* Context needs to be written at RES_ADDR + 128 */ 942 ctx = aq->res->base + 128; 943 /* Mask needs to be written at RES_ADDR + 256 */ 944 mask = aq->res->base + 256; 945 946 switch (req->op) { 947 case NIX_AQ_INSTOP_WRITE: 948 if (req->ctype == NIX_AQ_CTYPE_RQ) 949 memcpy(mask, &req->rq_mask, 950 sizeof(struct nix_rq_ctx_s)); 951 else if (req->ctype == NIX_AQ_CTYPE_SQ) 952 memcpy(mask, &req->sq_mask, 953 sizeof(struct nix_sq_ctx_s)); 954 else if (req->ctype == NIX_AQ_CTYPE_CQ) 955 memcpy(mask, &req->cq_mask, 956 sizeof(struct nix_cq_ctx_s)); 957 else if (req->ctype == NIX_AQ_CTYPE_RSS) 958 memcpy(mask, &req->rss_mask, 959 sizeof(struct nix_rsse_s)); 960 else if (req->ctype == NIX_AQ_CTYPE_MCE) 961 memcpy(mask, &req->mce_mask, 962 sizeof(struct nix_rx_mce_s)); 963 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 964 memcpy(mask, &req->prof_mask, 965 sizeof(struct nix_bandprof_s)); 966 fallthrough; 967 case NIX_AQ_INSTOP_INIT: 968 if (req->ctype == NIX_AQ_CTYPE_RQ) 969 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 970 else if (req->ctype == NIX_AQ_CTYPE_SQ) 971 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 972 else if (req->ctype == NIX_AQ_CTYPE_CQ) 973 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 974 else if (req->ctype == NIX_AQ_CTYPE_RSS) 975 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 976 else if (req->ctype == NIX_AQ_CTYPE_MCE) 977 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 978 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 979 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); 980 break; 981 case NIX_AQ_INSTOP_NOP: 982 case NIX_AQ_INSTOP_READ: 983 case NIX_AQ_INSTOP_LOCK: 984 case NIX_AQ_INSTOP_UNLOCK: 985 break; 986 default: 987 rc = NIX_AF_ERR_AQ_ENQUEUE; 988 spin_unlock(&aq->lock); 989 return rc; 990 } 991 992 /* Submit the instruction to AQ */ 993 rc = nix_aq_enqueue_wait(rvu, block, &inst); 994 if (rc) { 995 spin_unlock(&aq->lock); 996 return rc; 997 } 998 999 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 1000 if (req->op == NIX_AQ_INSTOP_INIT) { 1001 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 1002 __set_bit(req->qidx, pfvf->rq_bmap); 1003 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 1004 __set_bit(req->qidx, pfvf->sq_bmap); 1005 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 1006 __set_bit(req->qidx, pfvf->cq_bmap); 1007 } 1008 1009 if (req->op == NIX_AQ_INSTOP_WRITE) { 1010 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1011 ena = (req->rq.ena & req->rq_mask.ena) | 1012 (test_bit(req->qidx, pfvf->rq_bmap) & 1013 ~req->rq_mask.ena); 1014 if (ena) 1015 __set_bit(req->qidx, pfvf->rq_bmap); 1016 else 1017 __clear_bit(req->qidx, pfvf->rq_bmap); 1018 } 1019 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1020 ena = (req->rq.ena & req->sq_mask.ena) | 1021 (test_bit(req->qidx, pfvf->sq_bmap) & 1022 ~req->sq_mask.ena); 1023 if (ena) 1024 __set_bit(req->qidx, pfvf->sq_bmap); 1025 else 1026 __clear_bit(req->qidx, pfvf->sq_bmap); 1027 } 1028 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1029 ena = (req->rq.ena & req->cq_mask.ena) | 1030 (test_bit(req->qidx, pfvf->cq_bmap) & 1031 ~req->cq_mask.ena); 1032 if (ena) 1033 __set_bit(req->qidx, pfvf->cq_bmap); 1034 else 1035 __clear_bit(req->qidx, pfvf->cq_bmap); 1036 } 1037 } 1038 1039 if (rsp) { 1040 /* Copy read context into mailbox */ 1041 if (req->op == NIX_AQ_INSTOP_READ) { 1042 if (req->ctype == NIX_AQ_CTYPE_RQ) 1043 memcpy(&rsp->rq, ctx, 1044 sizeof(struct nix_rq_ctx_s)); 1045 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1046 memcpy(&rsp->sq, ctx, 1047 sizeof(struct nix_sq_ctx_s)); 1048 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1049 memcpy(&rsp->cq, ctx, 1050 sizeof(struct nix_cq_ctx_s)); 1051 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1052 memcpy(&rsp->rss, ctx, 1053 sizeof(struct nix_rsse_s)); 1054 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1055 memcpy(&rsp->mce, ctx, 1056 sizeof(struct nix_rx_mce_s)); 1057 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1058 memcpy(&rsp->prof, ctx, 1059 sizeof(struct nix_bandprof_s)); 1060 } 1061 } 1062 1063 spin_unlock(&aq->lock); 1064 return 0; 1065 } 1066 1067 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 1068 struct nix_aq_enq_rsp *rsp) 1069 { 1070 struct nix_hw *nix_hw; 1071 int blkaddr; 1072 1073 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 1074 if (blkaddr < 0) 1075 return NIX_AF_ERR_AF_LF_INVALID; 1076 1077 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1078 if (!nix_hw) 1079 return NIX_AF_ERR_INVALID_NIXBLK; 1080 1081 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 1082 } 1083 1084 static const char *nix_get_ctx_name(int ctype) 1085 { 1086 switch (ctype) { 1087 case NIX_AQ_CTYPE_CQ: 1088 return "CQ"; 1089 case NIX_AQ_CTYPE_SQ: 1090 return "SQ"; 1091 case NIX_AQ_CTYPE_RQ: 1092 return "RQ"; 1093 case NIX_AQ_CTYPE_RSS: 1094 return "RSS"; 1095 } 1096 return ""; 1097 } 1098 1099 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 1100 { 1101 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1102 struct nix_aq_enq_req aq_req; 1103 unsigned long *bmap; 1104 int qidx, q_cnt = 0; 1105 int err = 0, rc; 1106 1107 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 1108 return NIX_AF_ERR_AQ_ENQUEUE; 1109 1110 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 1111 aq_req.hdr.pcifunc = req->hdr.pcifunc; 1112 1113 if (req->ctype == NIX_AQ_CTYPE_CQ) { 1114 aq_req.cq.ena = 0; 1115 aq_req.cq_mask.ena = 1; 1116 aq_req.cq.bp_ena = 0; 1117 aq_req.cq_mask.bp_ena = 1; 1118 q_cnt = pfvf->cq_ctx->qsize; 1119 bmap = pfvf->cq_bmap; 1120 } 1121 if (req->ctype == NIX_AQ_CTYPE_SQ) { 1122 aq_req.sq.ena = 0; 1123 aq_req.sq_mask.ena = 1; 1124 q_cnt = pfvf->sq_ctx->qsize; 1125 bmap = pfvf->sq_bmap; 1126 } 1127 if (req->ctype == NIX_AQ_CTYPE_RQ) { 1128 aq_req.rq.ena = 0; 1129 aq_req.rq_mask.ena = 1; 1130 q_cnt = pfvf->rq_ctx->qsize; 1131 bmap = pfvf->rq_bmap; 1132 } 1133 1134 aq_req.ctype = req->ctype; 1135 aq_req.op = NIX_AQ_INSTOP_WRITE; 1136 1137 for (qidx = 0; qidx < q_cnt; qidx++) { 1138 if (!test_bit(qidx, bmap)) 1139 continue; 1140 aq_req.qidx = qidx; 1141 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 1142 if (rc) { 1143 err = rc; 1144 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 1145 nix_get_ctx_name(req->ctype), qidx); 1146 } 1147 } 1148 1149 return err; 1150 } 1151 1152 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 1153 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 1154 { 1155 struct nix_aq_enq_req lock_ctx_req; 1156 int err; 1157 1158 if (req->op != NIX_AQ_INSTOP_INIT) 1159 return 0; 1160 1161 if (req->ctype == NIX_AQ_CTYPE_MCE || 1162 req->ctype == NIX_AQ_CTYPE_DYNO) 1163 return 0; 1164 1165 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 1166 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 1167 lock_ctx_req.ctype = req->ctype; 1168 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 1169 lock_ctx_req.qidx = req->qidx; 1170 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 1171 if (err) 1172 dev_err(rvu->dev, 1173 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 1174 req->hdr.pcifunc, 1175 nix_get_ctx_name(req->ctype), req->qidx); 1176 return err; 1177 } 1178 1179 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1180 struct nix_aq_enq_req *req, 1181 struct nix_aq_enq_rsp *rsp) 1182 { 1183 int err; 1184 1185 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 1186 if (!err) 1187 err = nix_lf_hwctx_lockdown(rvu, req); 1188 return err; 1189 } 1190 #else 1191 1192 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1193 struct nix_aq_enq_req *req, 1194 struct nix_aq_enq_rsp *rsp) 1195 { 1196 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1197 } 1198 #endif 1199 /* CN10K mbox handler */ 1200 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1201 struct nix_cn10k_aq_enq_req *req, 1202 struct nix_cn10k_aq_enq_rsp *rsp) 1203 { 1204 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1205 (struct nix_aq_enq_rsp *)rsp); 1206 } 1207 1208 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1209 struct hwctx_disable_req *req, 1210 struct msg_rsp *rsp) 1211 { 1212 return nix_lf_hwctx_disable(rvu, req); 1213 } 1214 1215 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1216 struct nix_lf_alloc_req *req, 1217 struct nix_lf_alloc_rsp *rsp) 1218 { 1219 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1220 struct rvu_hwinfo *hw = rvu->hw; 1221 u16 pcifunc = req->hdr.pcifunc; 1222 struct rvu_block *block; 1223 struct rvu_pfvf *pfvf; 1224 u64 cfg, ctx_cfg; 1225 int blkaddr; 1226 1227 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1228 return NIX_AF_ERR_PARAM; 1229 1230 if (req->way_mask) 1231 req->way_mask &= 0xFFFF; 1232 1233 pfvf = rvu_get_pfvf(rvu, pcifunc); 1234 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1235 if (!pfvf->nixlf || blkaddr < 0) 1236 return NIX_AF_ERR_AF_LF_INVALID; 1237 1238 block = &hw->block[blkaddr]; 1239 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1240 if (nixlf < 0) 1241 return NIX_AF_ERR_AF_LF_INVALID; 1242 1243 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1244 if (req->npa_func) { 1245 /* If default, use 'this' NIXLF's PFFUNC */ 1246 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1247 req->npa_func = pcifunc; 1248 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1249 return NIX_AF_INVAL_NPA_PF_FUNC; 1250 } 1251 1252 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1253 if (req->sso_func) { 1254 /* If default, use 'this' NIXLF's PFFUNC */ 1255 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1256 req->sso_func = pcifunc; 1257 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1258 return NIX_AF_INVAL_SSO_PF_FUNC; 1259 } 1260 1261 /* If RSS is being enabled, check if requested config is valid. 1262 * RSS table size should be power of two, otherwise 1263 * RSS_GRP::OFFSET + adder might go beyond that group or 1264 * won't be able to use entire table. 1265 */ 1266 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1267 !is_power_of_2(req->rss_sz))) 1268 return NIX_AF_ERR_RSS_SIZE_INVALID; 1269 1270 if (req->rss_sz && 1271 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1272 return NIX_AF_ERR_RSS_GRPS_INVALID; 1273 1274 /* Reset this NIX LF */ 1275 err = rvu_lf_reset(rvu, block, nixlf); 1276 if (err) { 1277 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1278 block->addr - BLKADDR_NIX0, nixlf); 1279 return NIX_AF_ERR_LF_RESET; 1280 } 1281 1282 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1283 1284 /* Alloc NIX RQ HW context memory and config the base */ 1285 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1286 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1287 if (err) 1288 goto free_mem; 1289 1290 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1291 if (!pfvf->rq_bmap) 1292 goto free_mem; 1293 1294 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1295 (u64)pfvf->rq_ctx->iova); 1296 1297 /* Set caching and queue count in HW */ 1298 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1299 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1300 1301 /* Alloc NIX SQ HW context memory and config the base */ 1302 hwctx_size = 1UL << (ctx_cfg & 0xF); 1303 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1304 if (err) 1305 goto free_mem; 1306 1307 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1308 if (!pfvf->sq_bmap) 1309 goto free_mem; 1310 1311 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1312 (u64)pfvf->sq_ctx->iova); 1313 1314 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1315 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1316 1317 /* Alloc NIX CQ HW context memory and config the base */ 1318 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1319 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1320 if (err) 1321 goto free_mem; 1322 1323 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1324 if (!pfvf->cq_bmap) 1325 goto free_mem; 1326 1327 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1328 (u64)pfvf->cq_ctx->iova); 1329 1330 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1331 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1332 1333 /* Initialize receive side scaling (RSS) */ 1334 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1335 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1336 req->rss_grps, hwctx_size, req->way_mask, 1337 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); 1338 if (err) 1339 goto free_mem; 1340 1341 /* Alloc memory for CQINT's HW contexts */ 1342 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1343 qints = (cfg >> 24) & 0xFFF; 1344 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1345 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1346 if (err) 1347 goto free_mem; 1348 1349 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1350 (u64)pfvf->cq_ints_ctx->iova); 1351 1352 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1353 BIT_ULL(36) | req->way_mask << 20); 1354 1355 /* Alloc memory for QINT's HW contexts */ 1356 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1357 qints = (cfg >> 12) & 0xFFF; 1358 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1359 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1360 if (err) 1361 goto free_mem; 1362 1363 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1364 (u64)pfvf->nix_qints_ctx->iova); 1365 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1366 BIT_ULL(36) | req->way_mask << 20); 1367 1368 /* Setup VLANX TPID's. 1369 * Use VLAN1 for 802.1Q 1370 * and VLAN0 for 802.1AD. 1371 */ 1372 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1373 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1374 1375 /* Enable LMTST for this NIX LF */ 1376 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1377 1378 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1379 if (req->npa_func) 1380 cfg = req->npa_func; 1381 if (req->sso_func) 1382 cfg |= (u64)req->sso_func << 16; 1383 1384 cfg |= (u64)req->xqe_sz << 33; 1385 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1386 1387 /* Config Rx pkt length, csum checks and apad enable / disable */ 1388 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1389 1390 /* Configure pkind for TX parse config */ 1391 cfg = NPC_TX_DEF_PKIND; 1392 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1393 1394 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1395 if (is_sdp_pfvf(pcifunc)) 1396 intf = NIX_INTF_TYPE_SDP; 1397 1398 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, 1399 !!(req->flags & NIX_LF_LBK_BLK_SEL)); 1400 if (err) 1401 goto free_mem; 1402 1403 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1404 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1405 1406 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1407 rvu_write64(rvu, blkaddr, 1408 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1409 VTAGSIZE_T4 | VTAG_STRIP); 1410 1411 goto exit; 1412 1413 free_mem: 1414 nix_ctx_free(rvu, pfvf); 1415 rc = -ENOMEM; 1416 1417 exit: 1418 /* Set macaddr of this PF/VF */ 1419 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1420 1421 /* set SQB size info */ 1422 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1423 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1424 rsp->rx_chan_base = pfvf->rx_chan_base; 1425 rsp->tx_chan_base = pfvf->tx_chan_base; 1426 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1427 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1428 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1429 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1430 /* Get HW supported stat count */ 1431 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1432 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1433 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1434 /* Get count of CQ IRQs and error IRQs supported per LF */ 1435 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1436 rsp->qints = ((cfg >> 12) & 0xFFF); 1437 rsp->cints = ((cfg >> 24) & 0xFFF); 1438 rsp->cgx_links = hw->cgx_links; 1439 rsp->lbk_links = hw->lbk_links; 1440 rsp->sdp_links = hw->sdp_links; 1441 1442 return rc; 1443 } 1444 1445 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1446 struct msg_rsp *rsp) 1447 { 1448 struct rvu_hwinfo *hw = rvu->hw; 1449 u16 pcifunc = req->hdr.pcifunc; 1450 struct rvu_block *block; 1451 int blkaddr, nixlf, err; 1452 struct rvu_pfvf *pfvf; 1453 1454 pfvf = rvu_get_pfvf(rvu, pcifunc); 1455 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1456 if (!pfvf->nixlf || blkaddr < 0) 1457 return NIX_AF_ERR_AF_LF_INVALID; 1458 1459 block = &hw->block[blkaddr]; 1460 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1461 if (nixlf < 0) 1462 return NIX_AF_ERR_AF_LF_INVALID; 1463 1464 if (req->flags & NIX_LF_DISABLE_FLOWS) 1465 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1466 else 1467 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1468 1469 /* Free any tx vtag def entries used by this NIX LF */ 1470 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1471 nix_free_tx_vtag_entries(rvu, pcifunc); 1472 1473 nix_interface_deinit(rvu, pcifunc, nixlf); 1474 1475 /* Reset this NIX LF */ 1476 err = rvu_lf_reset(rvu, block, nixlf); 1477 if (err) { 1478 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1479 block->addr - BLKADDR_NIX0, nixlf); 1480 return NIX_AF_ERR_LF_RESET; 1481 } 1482 1483 nix_ctx_free(rvu, pfvf); 1484 1485 return 0; 1486 } 1487 1488 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1489 struct nix_mark_format_cfg *req, 1490 struct nix_mark_format_cfg_rsp *rsp) 1491 { 1492 u16 pcifunc = req->hdr.pcifunc; 1493 struct nix_hw *nix_hw; 1494 struct rvu_pfvf *pfvf; 1495 int blkaddr, rc; 1496 u32 cfg; 1497 1498 pfvf = rvu_get_pfvf(rvu, pcifunc); 1499 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1500 if (!pfvf->nixlf || blkaddr < 0) 1501 return NIX_AF_ERR_AF_LF_INVALID; 1502 1503 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1504 if (!nix_hw) 1505 return NIX_AF_ERR_INVALID_NIXBLK; 1506 1507 cfg = (((u32)req->offset & 0x7) << 16) | 1508 (((u32)req->y_mask & 0xF) << 12) | 1509 (((u32)req->y_val & 0xF) << 8) | 1510 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1511 1512 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1513 if (rc < 0) { 1514 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1515 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1516 return NIX_AF_ERR_MARK_CFG_FAIL; 1517 } 1518 1519 rsp->mark_format_idx = rc; 1520 return 0; 1521 } 1522 1523 /* Handle shaper update specially for few revisions */ 1524 static bool 1525 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, 1526 int lvl, u64 reg, u64 regval) 1527 { 1528 u64 regbase, oldval, sw_xoff = 0; 1529 u64 dbgval, md_debug0 = 0; 1530 unsigned long poll_tmo; 1531 bool rate_reg = 0; 1532 u32 schq; 1533 1534 regbase = reg & 0xFFFF; 1535 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1536 1537 /* Check for rate register */ 1538 switch (lvl) { 1539 case NIX_TXSCH_LVL_TL1: 1540 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); 1541 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); 1542 1543 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); 1544 break; 1545 case NIX_TXSCH_LVL_TL2: 1546 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); 1547 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); 1548 1549 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || 1550 regbase == NIX_AF_TL2X_PIR(0)); 1551 break; 1552 case NIX_TXSCH_LVL_TL3: 1553 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); 1554 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); 1555 1556 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || 1557 regbase == NIX_AF_TL3X_PIR(0)); 1558 break; 1559 case NIX_TXSCH_LVL_TL4: 1560 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); 1561 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); 1562 1563 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || 1564 regbase == NIX_AF_TL4X_PIR(0)); 1565 break; 1566 case NIX_TXSCH_LVL_MDQ: 1567 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); 1568 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || 1569 regbase == NIX_AF_MDQX_PIR(0)); 1570 break; 1571 } 1572 1573 if (!rate_reg) 1574 return false; 1575 1576 /* Nothing special to do when state is not toggled */ 1577 oldval = rvu_read64(rvu, blkaddr, reg); 1578 if ((oldval & 0x1) == (regval & 0x1)) { 1579 rvu_write64(rvu, blkaddr, reg, regval); 1580 return true; 1581 } 1582 1583 /* PIR/CIR disable */ 1584 if (!(regval & 0x1)) { 1585 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1586 rvu_write64(rvu, blkaddr, reg, 0); 1587 udelay(4); 1588 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1589 return true; 1590 } 1591 1592 /* PIR/CIR enable */ 1593 rvu_write64(rvu, blkaddr, sw_xoff, 1); 1594 if (md_debug0) { 1595 poll_tmo = jiffies + usecs_to_jiffies(10000); 1596 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ 1597 do { 1598 if (time_after(jiffies, poll_tmo)) { 1599 dev_err(rvu->dev, 1600 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", 1601 nixlf, schq, lvl); 1602 goto exit; 1603 } 1604 usleep_range(1, 5); 1605 dbgval = rvu_read64(rvu, blkaddr, md_debug0); 1606 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); 1607 } 1608 rvu_write64(rvu, blkaddr, reg, regval); 1609 exit: 1610 rvu_write64(rvu, blkaddr, sw_xoff, 0); 1611 return true; 1612 } 1613 1614 /* Disable shaping of pkts by a scheduler queue 1615 * at a given scheduler level. 1616 */ 1617 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1618 int nixlf, int lvl, int schq) 1619 { 1620 struct rvu_hwinfo *hw = rvu->hw; 1621 u64 cir_reg = 0, pir_reg = 0; 1622 u64 cfg; 1623 1624 switch (lvl) { 1625 case NIX_TXSCH_LVL_TL1: 1626 cir_reg = NIX_AF_TL1X_CIR(schq); 1627 pir_reg = 0; /* PIR not available at TL1 */ 1628 break; 1629 case NIX_TXSCH_LVL_TL2: 1630 cir_reg = NIX_AF_TL2X_CIR(schq); 1631 pir_reg = NIX_AF_TL2X_PIR(schq); 1632 break; 1633 case NIX_TXSCH_LVL_TL3: 1634 cir_reg = NIX_AF_TL3X_CIR(schq); 1635 pir_reg = NIX_AF_TL3X_PIR(schq); 1636 break; 1637 case NIX_TXSCH_LVL_TL4: 1638 cir_reg = NIX_AF_TL4X_CIR(schq); 1639 pir_reg = NIX_AF_TL4X_PIR(schq); 1640 break; 1641 case NIX_TXSCH_LVL_MDQ: 1642 cir_reg = NIX_AF_MDQX_CIR(schq); 1643 pir_reg = NIX_AF_MDQX_PIR(schq); 1644 break; 1645 } 1646 1647 /* Shaper state toggle needs wait/poll */ 1648 if (hw->cap.nix_shaper_toggle_wait) { 1649 if (cir_reg) 1650 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1651 lvl, cir_reg, 0); 1652 if (pir_reg) 1653 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 1654 lvl, pir_reg, 0); 1655 return; 1656 } 1657 1658 if (!cir_reg) 1659 return; 1660 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1661 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1662 1663 if (!pir_reg) 1664 return; 1665 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1666 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1667 } 1668 1669 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1670 int lvl, int schq) 1671 { 1672 struct rvu_hwinfo *hw = rvu->hw; 1673 int link_level; 1674 int link; 1675 1676 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1677 return; 1678 1679 /* Reset TL4's SDP link config */ 1680 if (lvl == NIX_TXSCH_LVL_TL4) 1681 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1682 1683 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1684 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1685 if (lvl != link_level) 1686 return; 1687 1688 /* Reset TL2's CGX or LBK link config */ 1689 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1690 rvu_write64(rvu, blkaddr, 1691 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1692 } 1693 1694 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, 1695 int lvl, int schq) 1696 { 1697 struct rvu_hwinfo *hw = rvu->hw; 1698 u64 reg; 1699 1700 /* Skip this if shaping is not supported */ 1701 if (!hw->cap.nix_shaping) 1702 return; 1703 1704 /* Clear level specific SW_XOFF */ 1705 switch (lvl) { 1706 case NIX_TXSCH_LVL_TL1: 1707 reg = NIX_AF_TL1X_SW_XOFF(schq); 1708 break; 1709 case NIX_TXSCH_LVL_TL2: 1710 reg = NIX_AF_TL2X_SW_XOFF(schq); 1711 break; 1712 case NIX_TXSCH_LVL_TL3: 1713 reg = NIX_AF_TL3X_SW_XOFF(schq); 1714 break; 1715 case NIX_TXSCH_LVL_TL4: 1716 reg = NIX_AF_TL4X_SW_XOFF(schq); 1717 break; 1718 case NIX_TXSCH_LVL_MDQ: 1719 reg = NIX_AF_MDQX_SW_XOFF(schq); 1720 break; 1721 default: 1722 return; 1723 } 1724 1725 rvu_write64(rvu, blkaddr, reg, 0x0); 1726 } 1727 1728 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1729 { 1730 struct rvu_hwinfo *hw = rvu->hw; 1731 int pf = rvu_get_pf(pcifunc); 1732 u8 cgx_id = 0, lmac_id = 0; 1733 1734 if (is_afvf(pcifunc)) {/* LBK links */ 1735 return hw->cgx_links; 1736 } else if (is_pf_cgxmapped(rvu, pf)) { 1737 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1738 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1739 } 1740 1741 /* SDP link */ 1742 return hw->cgx_links + hw->lbk_links; 1743 } 1744 1745 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 1746 int link, int *start, int *end) 1747 { 1748 struct rvu_hwinfo *hw = rvu->hw; 1749 int pf = rvu_get_pf(pcifunc); 1750 1751 if (is_afvf(pcifunc)) { /* LBK links */ 1752 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1753 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 1754 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 1755 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1756 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 1757 } else { /* SDP link */ 1758 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 1759 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 1760 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 1761 } 1762 } 1763 1764 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 1765 struct nix_hw *nix_hw, 1766 struct nix_txsch_alloc_req *req) 1767 { 1768 struct rvu_hwinfo *hw = rvu->hw; 1769 int schq, req_schq, free_cnt; 1770 struct nix_txsch *txsch; 1771 int link, start, end; 1772 1773 txsch = &nix_hw->txsch[lvl]; 1774 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 1775 1776 if (!req_schq) 1777 return 0; 1778 1779 link = nix_get_tx_link(rvu, pcifunc); 1780 1781 /* For traffic aggregating scheduler level, one queue is enough */ 1782 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1783 if (req_schq != 1) 1784 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1785 return 0; 1786 } 1787 1788 /* Get free SCHQ count and check if request can be accomodated */ 1789 if (hw->cap.nix_fixed_txschq_mapping) { 1790 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1791 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 1792 if (end <= txsch->schq.max && schq < end && 1793 !test_bit(schq, txsch->schq.bmap)) 1794 free_cnt = 1; 1795 else 1796 free_cnt = 0; 1797 } else { 1798 free_cnt = rvu_rsrc_free_count(&txsch->schq); 1799 } 1800 1801 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) 1802 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1803 1804 /* If contiguous queues are needed, check for availability */ 1805 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 1806 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 1807 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1808 1809 return 0; 1810 } 1811 1812 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 1813 struct nix_txsch_alloc_rsp *rsp, 1814 int lvl, int start, int end) 1815 { 1816 struct rvu_hwinfo *hw = rvu->hw; 1817 u16 pcifunc = rsp->hdr.pcifunc; 1818 int idx, schq; 1819 1820 /* For traffic aggregating levels, queue alloc is based 1821 * on transmit link to which PF_FUNC is mapped to. 1822 */ 1823 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1824 /* A single TL queue is allocated */ 1825 if (rsp->schq_contig[lvl]) { 1826 rsp->schq_contig[lvl] = 1; 1827 rsp->schq_contig_list[lvl][0] = start; 1828 } 1829 1830 /* Both contig and non-contig reqs doesn't make sense here */ 1831 if (rsp->schq_contig[lvl]) 1832 rsp->schq[lvl] = 0; 1833 1834 if (rsp->schq[lvl]) { 1835 rsp->schq[lvl] = 1; 1836 rsp->schq_list[lvl][0] = start; 1837 } 1838 return; 1839 } 1840 1841 /* Adjust the queue request count if HW supports 1842 * only one queue per level configuration. 1843 */ 1844 if (hw->cap.nix_fixed_txschq_mapping) { 1845 idx = pcifunc & RVU_PFVF_FUNC_MASK; 1846 schq = start + idx; 1847 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 1848 rsp->schq_contig[lvl] = 0; 1849 rsp->schq[lvl] = 0; 1850 return; 1851 } 1852 1853 if (rsp->schq_contig[lvl]) { 1854 rsp->schq_contig[lvl] = 1; 1855 set_bit(schq, txsch->schq.bmap); 1856 rsp->schq_contig_list[lvl][0] = schq; 1857 rsp->schq[lvl] = 0; 1858 } else if (rsp->schq[lvl]) { 1859 rsp->schq[lvl] = 1; 1860 set_bit(schq, txsch->schq.bmap); 1861 rsp->schq_list[lvl][0] = schq; 1862 } 1863 return; 1864 } 1865 1866 /* Allocate contiguous queue indices requesty first */ 1867 if (rsp->schq_contig[lvl]) { 1868 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 1869 txsch->schq.max, start, 1870 rsp->schq_contig[lvl], 0); 1871 if (schq >= end) 1872 rsp->schq_contig[lvl] = 0; 1873 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 1874 set_bit(schq, txsch->schq.bmap); 1875 rsp->schq_contig_list[lvl][idx] = schq; 1876 schq++; 1877 } 1878 } 1879 1880 /* Allocate non-contiguous queue indices */ 1881 if (rsp->schq[lvl]) { 1882 idx = 0; 1883 for (schq = start; schq < end; schq++) { 1884 if (!test_bit(schq, txsch->schq.bmap)) { 1885 set_bit(schq, txsch->schq.bmap); 1886 rsp->schq_list[lvl][idx++] = schq; 1887 } 1888 if (idx == rsp->schq[lvl]) 1889 break; 1890 } 1891 /* Update how many were allocated */ 1892 rsp->schq[lvl] = idx; 1893 } 1894 } 1895 1896 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 1897 struct nix_txsch_alloc_req *req, 1898 struct nix_txsch_alloc_rsp *rsp) 1899 { 1900 struct rvu_hwinfo *hw = rvu->hw; 1901 u16 pcifunc = req->hdr.pcifunc; 1902 int link, blkaddr, rc = 0; 1903 int lvl, idx, start, end; 1904 struct nix_txsch *txsch; 1905 struct nix_hw *nix_hw; 1906 u32 *pfvf_map; 1907 int nixlf; 1908 u16 schq; 1909 1910 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 1911 if (rc) 1912 return rc; 1913 1914 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1915 if (!nix_hw) 1916 return NIX_AF_ERR_INVALID_NIXBLK; 1917 1918 mutex_lock(&rvu->rsrc_lock); 1919 1920 /* Check if request is valid as per HW capabilities 1921 * and can be accomodated. 1922 */ 1923 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1924 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 1925 if (rc) 1926 goto err; 1927 } 1928 1929 /* Allocate requested Tx scheduler queues */ 1930 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1931 txsch = &nix_hw->txsch[lvl]; 1932 pfvf_map = txsch->pfvf_map; 1933 1934 if (!req->schq[lvl] && !req->schq_contig[lvl]) 1935 continue; 1936 1937 rsp->schq[lvl] = req->schq[lvl]; 1938 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 1939 1940 link = nix_get_tx_link(rvu, pcifunc); 1941 1942 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1943 start = link; 1944 end = link; 1945 } else if (hw->cap.nix_fixed_txschq_mapping) { 1946 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1947 } else { 1948 start = 0; 1949 end = txsch->schq.max; 1950 } 1951 1952 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 1953 1954 /* Reset queue config */ 1955 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 1956 schq = rsp->schq_contig_list[lvl][idx]; 1957 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1958 NIX_TXSCHQ_CFG_DONE)) 1959 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1960 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1961 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 1962 } 1963 1964 for (idx = 0; idx < req->schq[lvl]; idx++) { 1965 schq = rsp->schq_list[lvl][idx]; 1966 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1967 NIX_TXSCHQ_CFG_DONE)) 1968 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1969 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1970 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); 1971 } 1972 } 1973 1974 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 1975 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 1976 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 1977 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1978 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1979 goto exit; 1980 err: 1981 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 1982 exit: 1983 mutex_unlock(&rvu->rsrc_lock); 1984 return rc; 1985 } 1986 1987 static int nix_smq_flush(struct rvu *rvu, int blkaddr, 1988 int smq, u16 pcifunc, int nixlf) 1989 { 1990 int pf = rvu_get_pf(pcifunc); 1991 u8 cgx_id = 0, lmac_id = 0; 1992 int err, restore_tx_en = 0; 1993 u64 cfg; 1994 1995 /* enable cgx tx if disabled */ 1996 if (is_pf_cgxmapped(rvu, pf)) { 1997 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1998 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), 1999 lmac_id, true); 2000 } 2001 2002 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 2003 /* Do SMQ flush and set enqueue xoff */ 2004 cfg |= BIT_ULL(50) | BIT_ULL(49); 2005 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 2006 2007 /* Disable backpressure from physical link, 2008 * otherwise SMQ flush may stall. 2009 */ 2010 rvu_cgx_enadis_rx_bp(rvu, pf, false); 2011 2012 /* Wait for flush to complete */ 2013 err = rvu_poll_reg(rvu, blkaddr, 2014 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 2015 if (err) 2016 dev_err(rvu->dev, 2017 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq); 2018 2019 rvu_cgx_enadis_rx_bp(rvu, pf, true); 2020 /* restore cgx tx state */ 2021 if (restore_tx_en) 2022 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 2023 return err; 2024 } 2025 2026 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 2027 { 2028 int blkaddr, nixlf, lvl, schq, err; 2029 struct rvu_hwinfo *hw = rvu->hw; 2030 struct nix_txsch *txsch; 2031 struct nix_hw *nix_hw; 2032 u16 map_func; 2033 2034 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2035 if (blkaddr < 0) 2036 return NIX_AF_ERR_AF_LF_INVALID; 2037 2038 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2039 if (!nix_hw) 2040 return NIX_AF_ERR_INVALID_NIXBLK; 2041 2042 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2043 if (nixlf < 0) 2044 return NIX_AF_ERR_AF_LF_INVALID; 2045 2046 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ 2047 mutex_lock(&rvu->rsrc_lock); 2048 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2049 txsch = &nix_hw->txsch[lvl]; 2050 2051 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2052 continue; 2053 2054 for (schq = 0; schq < txsch->schq.max; schq++) { 2055 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2056 continue; 2057 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 2058 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2059 } 2060 } 2061 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, 2062 nix_get_tx_link(rvu, pcifunc)); 2063 2064 /* On PF cleanup, clear cfg done flag as 2065 * PF would have changed default config. 2066 */ 2067 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { 2068 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; 2069 schq = nix_get_tx_link(rvu, pcifunc); 2070 /* Do not clear pcifunc in txsch->pfvf_map[schq] because 2071 * VF might be using this TL1 queue 2072 */ 2073 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 2074 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); 2075 } 2076 2077 /* Flush SMQs */ 2078 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 2079 for (schq = 0; schq < txsch->schq.max; schq++) { 2080 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2081 continue; 2082 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2083 } 2084 2085 /* Now free scheduler queues to free pool */ 2086 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2087 /* TLs above aggregation level are shared across all PF 2088 * and it's VFs, hence skip freeing them. 2089 */ 2090 if (lvl >= hw->cap.nix_tx_aggr_lvl) 2091 continue; 2092 2093 txsch = &nix_hw->txsch[lvl]; 2094 for (schq = 0; schq < txsch->schq.max; schq++) { 2095 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2096 continue; 2097 rvu_free_rsrc(&txsch->schq, schq); 2098 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2099 } 2100 } 2101 mutex_unlock(&rvu->rsrc_lock); 2102 2103 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ 2104 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); 2105 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); 2106 if (err) 2107 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 2108 2109 return 0; 2110 } 2111 2112 static int nix_txschq_free_one(struct rvu *rvu, 2113 struct nix_txsch_free_req *req) 2114 { 2115 struct rvu_hwinfo *hw = rvu->hw; 2116 u16 pcifunc = req->hdr.pcifunc; 2117 int lvl, schq, nixlf, blkaddr; 2118 struct nix_txsch *txsch; 2119 struct nix_hw *nix_hw; 2120 u32 *pfvf_map; 2121 int rc; 2122 2123 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2124 if (blkaddr < 0) 2125 return NIX_AF_ERR_AF_LF_INVALID; 2126 2127 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2128 if (!nix_hw) 2129 return NIX_AF_ERR_INVALID_NIXBLK; 2130 2131 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2132 if (nixlf < 0) 2133 return NIX_AF_ERR_AF_LF_INVALID; 2134 2135 lvl = req->schq_lvl; 2136 schq = req->schq; 2137 txsch = &nix_hw->txsch[lvl]; 2138 2139 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 2140 return 0; 2141 2142 pfvf_map = txsch->pfvf_map; 2143 mutex_lock(&rvu->rsrc_lock); 2144 2145 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 2146 rc = NIX_AF_ERR_TLX_INVALID; 2147 goto err; 2148 } 2149 2150 /* Clear SW_XOFF of this resource only. 2151 * For SMQ level, all path XOFF's 2152 * need to be made clear by user 2153 */ 2154 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); 2155 2156 /* Flush if it is a SMQ. Onus of disabling 2157 * TL2/3 queue links before SMQ flush is on user 2158 */ 2159 if (lvl == NIX_TXSCH_LVL_SMQ && 2160 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { 2161 rc = NIX_AF_SMQ_FLUSH_FAILED; 2162 goto err; 2163 } 2164 2165 /* Free the resource */ 2166 rvu_free_rsrc(&txsch->schq, schq); 2167 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2168 mutex_unlock(&rvu->rsrc_lock); 2169 return 0; 2170 err: 2171 mutex_unlock(&rvu->rsrc_lock); 2172 return rc; 2173 } 2174 2175 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 2176 struct nix_txsch_free_req *req, 2177 struct msg_rsp *rsp) 2178 { 2179 if (req->flags & TXSCHQ_FREE_ALL) 2180 return nix_txschq_free(rvu, req->hdr.pcifunc); 2181 else 2182 return nix_txschq_free_one(rvu, req); 2183 } 2184 2185 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 2186 int lvl, u64 reg, u64 regval) 2187 { 2188 u64 regbase = reg & 0xFFFF; 2189 u16 schq, parent; 2190 2191 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 2192 return false; 2193 2194 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2195 /* Check if this schq belongs to this PF/VF or not */ 2196 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 2197 return false; 2198 2199 parent = (regval >> 16) & 0x1FF; 2200 /* Validate MDQ's TL4 parent */ 2201 if (regbase == NIX_AF_MDQX_PARENT(0) && 2202 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 2203 return false; 2204 2205 /* Validate TL4's TL3 parent */ 2206 if (regbase == NIX_AF_TL4X_PARENT(0) && 2207 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 2208 return false; 2209 2210 /* Validate TL3's TL2 parent */ 2211 if (regbase == NIX_AF_TL3X_PARENT(0) && 2212 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 2213 return false; 2214 2215 /* Validate TL2's TL1 parent */ 2216 if (regbase == NIX_AF_TL2X_PARENT(0) && 2217 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 2218 return false; 2219 2220 return true; 2221 } 2222 2223 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 2224 { 2225 u64 regbase; 2226 2227 if (hw->cap.nix_shaping) 2228 return true; 2229 2230 /* If shaping and coloring is not supported, then 2231 * *_CIR and *_PIR registers should not be configured. 2232 */ 2233 regbase = reg & 0xFFFF; 2234 2235 switch (lvl) { 2236 case NIX_TXSCH_LVL_TL1: 2237 if (regbase == NIX_AF_TL1X_CIR(0)) 2238 return false; 2239 break; 2240 case NIX_TXSCH_LVL_TL2: 2241 if (regbase == NIX_AF_TL2X_CIR(0) || 2242 regbase == NIX_AF_TL2X_PIR(0)) 2243 return false; 2244 break; 2245 case NIX_TXSCH_LVL_TL3: 2246 if (regbase == NIX_AF_TL3X_CIR(0) || 2247 regbase == NIX_AF_TL3X_PIR(0)) 2248 return false; 2249 break; 2250 case NIX_TXSCH_LVL_TL4: 2251 if (regbase == NIX_AF_TL4X_CIR(0) || 2252 regbase == NIX_AF_TL4X_PIR(0)) 2253 return false; 2254 break; 2255 case NIX_TXSCH_LVL_MDQ: 2256 if (regbase == NIX_AF_MDQX_CIR(0) || 2257 regbase == NIX_AF_MDQX_PIR(0)) 2258 return false; 2259 break; 2260 } 2261 return true; 2262 } 2263 2264 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 2265 u16 pcifunc, int blkaddr) 2266 { 2267 u32 *pfvf_map; 2268 int schq; 2269 2270 schq = nix_get_tx_link(rvu, pcifunc); 2271 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 2272 /* Skip if PF has already done the config */ 2273 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 2274 return; 2275 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 2276 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 2277 2278 /* On OcteonTx2 the config was in bytes and newer silcons 2279 * it's changed to weight. 2280 */ 2281 if (!rvu->hw->cap.nix_common_dwrr_mtu) 2282 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2283 TXSCH_TL1_DFLT_RR_QTM); 2284 else 2285 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 2286 CN10K_MAX_DWRR_WEIGHT); 2287 2288 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 2289 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 2290 } 2291 2292 /* Register offset - [15:0] 2293 * Scheduler Queue number - [25:16] 2294 */ 2295 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) 2296 2297 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, 2298 int blkaddr, struct nix_txschq_config *req, 2299 struct nix_txschq_config *rsp) 2300 { 2301 u16 pcifunc = req->hdr.pcifunc; 2302 int idx, schq; 2303 u64 reg; 2304 2305 for (idx = 0; idx < req->num_regs; idx++) { 2306 reg = req->reg[idx]; 2307 reg &= NIX_TX_SCHQ_MASK; 2308 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2309 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || 2310 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) 2311 return NIX_AF_INVAL_TXSCHQ_CFG; 2312 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); 2313 } 2314 rsp->lvl = req->lvl; 2315 rsp->num_regs = req->num_regs; 2316 return 0; 2317 } 2318 2319 static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, 2320 u16 pcifunc, struct nix_txsch *txsch) 2321 { 2322 struct rvu_hwinfo *hw = rvu->hw; 2323 int lbk_link_start, lbk_links; 2324 u8 pf = rvu_get_pf(pcifunc); 2325 int schq; 2326 2327 if (!is_pf_cgxmapped(rvu, pf)) 2328 return; 2329 2330 lbk_link_start = hw->cgx_links; 2331 2332 for (schq = 0; schq < txsch->schq.max; schq++) { 2333 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2334 continue; 2335 /* Enable all LBK links with channel 63 by default so that 2336 * packets can be sent to LBK with a NPC TX MCAM rule 2337 */ 2338 lbk_links = hw->lbk_links; 2339 while (lbk_links--) 2340 rvu_write64(rvu, blkaddr, 2341 NIX_AF_TL3_TL2X_LINKX_CFG(schq, 2342 lbk_link_start + 2343 lbk_links), 2344 BIT_ULL(12) | RVU_SWITCH_LBK_CHAN); 2345 } 2346 } 2347 2348 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 2349 struct nix_txschq_config *req, 2350 struct nix_txschq_config *rsp) 2351 { 2352 u64 reg, val, regval, schq_regbase, val_mask; 2353 struct rvu_hwinfo *hw = rvu->hw; 2354 u16 pcifunc = req->hdr.pcifunc; 2355 struct nix_txsch *txsch; 2356 struct nix_hw *nix_hw; 2357 int blkaddr, idx, err; 2358 int nixlf, schq; 2359 u32 *pfvf_map; 2360 2361 if (req->lvl >= NIX_TXSCH_LVL_CNT || 2362 req->num_regs > MAX_REGS_PER_MBOX_MSG) 2363 return NIX_AF_INVAL_TXSCHQ_CFG; 2364 2365 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2366 if (err) 2367 return err; 2368 2369 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2370 if (!nix_hw) 2371 return NIX_AF_ERR_INVALID_NIXBLK; 2372 2373 if (req->read) 2374 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); 2375 2376 txsch = &nix_hw->txsch[req->lvl]; 2377 pfvf_map = txsch->pfvf_map; 2378 2379 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 2380 pcifunc & RVU_PFVF_FUNC_MASK) { 2381 mutex_lock(&rvu->rsrc_lock); 2382 if (req->lvl == NIX_TXSCH_LVL_TL1) 2383 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 2384 mutex_unlock(&rvu->rsrc_lock); 2385 return 0; 2386 } 2387 2388 for (idx = 0; idx < req->num_regs; idx++) { 2389 reg = req->reg[idx]; 2390 reg &= NIX_TX_SCHQ_MASK; 2391 regval = req->regval[idx]; 2392 schq_regbase = reg & 0xFFFF; 2393 val_mask = req->regval_mask[idx]; 2394 2395 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 2396 txsch->lvl, reg, regval)) 2397 return NIX_AF_INVAL_TXSCHQ_CFG; 2398 2399 /* Check if shaping and coloring is supported */ 2400 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 2401 continue; 2402 2403 val = rvu_read64(rvu, blkaddr, reg); 2404 regval = (val & val_mask) | (regval & ~val_mask); 2405 2406 /* Handle shaping state toggle specially */ 2407 if (hw->cap.nix_shaper_toggle_wait && 2408 handle_txschq_shaper_update(rvu, blkaddr, nixlf, 2409 req->lvl, reg, regval)) 2410 continue; 2411 2412 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 2413 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 2414 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 2415 pcifunc, 0); 2416 regval &= ~(0x7FULL << 24); 2417 regval |= ((u64)nixlf << 24); 2418 } 2419 2420 /* Clear 'BP_ENA' config, if it's not allowed */ 2421 if (!hw->cap.nix_tx_link_bp) { 2422 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 2423 (schq_regbase & 0xFF00) == 2424 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 2425 regval &= ~BIT_ULL(13); 2426 } 2427 2428 /* Mark config as done for TL1 by PF */ 2429 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 2430 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 2431 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2432 mutex_lock(&rvu->rsrc_lock); 2433 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 2434 NIX_TXSCHQ_CFG_DONE); 2435 mutex_unlock(&rvu->rsrc_lock); 2436 } 2437 2438 /* SMQ flush is special hence split register writes such 2439 * that flush first and write rest of the bits later. 2440 */ 2441 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 2442 (regval & BIT_ULL(49))) { 2443 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2444 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2445 regval &= ~BIT_ULL(49); 2446 } 2447 rvu_write64(rvu, blkaddr, reg, regval); 2448 } 2449 2450 rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc, 2451 &nix_hw->txsch[NIX_TXSCH_LVL_TL2]); 2452 return 0; 2453 } 2454 2455 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 2456 struct nix_vtag_config *req) 2457 { 2458 u64 regval = req->vtag_size; 2459 2460 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2461 req->vtag_size > VTAGSIZE_T8) 2462 return -EINVAL; 2463 2464 /* RX VTAG Type 7 reserved for vf vlan */ 2465 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2466 return NIX_AF_ERR_RX_VTAG_INUSE; 2467 2468 if (req->rx.capture_vtag) 2469 regval |= BIT_ULL(5); 2470 if (req->rx.strip_vtag) 2471 regval |= BIT_ULL(4); 2472 2473 rvu_write64(rvu, blkaddr, 2474 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2475 return 0; 2476 } 2477 2478 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2479 u16 pcifunc, int index) 2480 { 2481 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2482 struct nix_txvlan *vlan; 2483 2484 if (!nix_hw) 2485 return NIX_AF_ERR_INVALID_NIXBLK; 2486 2487 vlan = &nix_hw->txvlan; 2488 if (vlan->entry2pfvf_map[index] != pcifunc) 2489 return NIX_AF_ERR_PARAM; 2490 2491 rvu_write64(rvu, blkaddr, 2492 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2493 rvu_write64(rvu, blkaddr, 2494 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2495 2496 vlan->entry2pfvf_map[index] = 0; 2497 rvu_free_rsrc(&vlan->rsrc, index); 2498 2499 return 0; 2500 } 2501 2502 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2503 { 2504 struct nix_txvlan *vlan; 2505 struct nix_hw *nix_hw; 2506 int index, blkaddr; 2507 2508 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2509 if (blkaddr < 0) 2510 return; 2511 2512 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2513 vlan = &nix_hw->txvlan; 2514 2515 mutex_lock(&vlan->rsrc_lock); 2516 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2517 for (index = 0; index < vlan->rsrc.max; index++) { 2518 if (vlan->entry2pfvf_map[index] == pcifunc) 2519 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2520 } 2521 mutex_unlock(&vlan->rsrc_lock); 2522 } 2523 2524 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2525 u64 vtag, u8 size) 2526 { 2527 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2528 struct nix_txvlan *vlan; 2529 u64 regval; 2530 int index; 2531 2532 if (!nix_hw) 2533 return NIX_AF_ERR_INVALID_NIXBLK; 2534 2535 vlan = &nix_hw->txvlan; 2536 2537 mutex_lock(&vlan->rsrc_lock); 2538 2539 index = rvu_alloc_rsrc(&vlan->rsrc); 2540 if (index < 0) { 2541 mutex_unlock(&vlan->rsrc_lock); 2542 return index; 2543 } 2544 2545 mutex_unlock(&vlan->rsrc_lock); 2546 2547 regval = size ? vtag : vtag << 32; 2548 2549 rvu_write64(rvu, blkaddr, 2550 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 2551 rvu_write64(rvu, blkaddr, 2552 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 2553 2554 return index; 2555 } 2556 2557 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 2558 struct nix_vtag_config *req) 2559 { 2560 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2561 u16 pcifunc = req->hdr.pcifunc; 2562 int idx0 = req->tx.vtag0_idx; 2563 int idx1 = req->tx.vtag1_idx; 2564 struct nix_txvlan *vlan; 2565 int err = 0; 2566 2567 if (!nix_hw) 2568 return NIX_AF_ERR_INVALID_NIXBLK; 2569 2570 vlan = &nix_hw->txvlan; 2571 if (req->tx.free_vtag0 && req->tx.free_vtag1) 2572 if (vlan->entry2pfvf_map[idx0] != pcifunc || 2573 vlan->entry2pfvf_map[idx1] != pcifunc) 2574 return NIX_AF_ERR_PARAM; 2575 2576 mutex_lock(&vlan->rsrc_lock); 2577 2578 if (req->tx.free_vtag0) { 2579 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 2580 if (err) 2581 goto exit; 2582 } 2583 2584 if (req->tx.free_vtag1) 2585 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 2586 2587 exit: 2588 mutex_unlock(&vlan->rsrc_lock); 2589 return err; 2590 } 2591 2592 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 2593 struct nix_vtag_config *req, 2594 struct nix_vtag_config_rsp *rsp) 2595 { 2596 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2597 struct nix_txvlan *vlan; 2598 u16 pcifunc = req->hdr.pcifunc; 2599 2600 if (!nix_hw) 2601 return NIX_AF_ERR_INVALID_NIXBLK; 2602 2603 vlan = &nix_hw->txvlan; 2604 if (req->tx.cfg_vtag0) { 2605 rsp->vtag0_idx = 2606 nix_tx_vtag_alloc(rvu, blkaddr, 2607 req->tx.vtag0, req->vtag_size); 2608 2609 if (rsp->vtag0_idx < 0) 2610 return NIX_AF_ERR_TX_VTAG_NOSPC; 2611 2612 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 2613 } 2614 2615 if (req->tx.cfg_vtag1) { 2616 rsp->vtag1_idx = 2617 nix_tx_vtag_alloc(rvu, blkaddr, 2618 req->tx.vtag1, req->vtag_size); 2619 2620 if (rsp->vtag1_idx < 0) 2621 goto err_free; 2622 2623 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 2624 } 2625 2626 return 0; 2627 2628 err_free: 2629 if (req->tx.cfg_vtag0) 2630 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 2631 2632 return NIX_AF_ERR_TX_VTAG_NOSPC; 2633 } 2634 2635 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 2636 struct nix_vtag_config *req, 2637 struct nix_vtag_config_rsp *rsp) 2638 { 2639 u16 pcifunc = req->hdr.pcifunc; 2640 int blkaddr, nixlf, err; 2641 2642 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2643 if (err) 2644 return err; 2645 2646 if (req->cfg_type) { 2647 /* rx vtag configuration */ 2648 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 2649 if (err) 2650 return NIX_AF_ERR_PARAM; 2651 } else { 2652 /* tx vtag configuration */ 2653 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 2654 (req->tx.free_vtag0 || req->tx.free_vtag1)) 2655 return NIX_AF_ERR_PARAM; 2656 2657 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 2658 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 2659 2660 if (req->tx.free_vtag0 || req->tx.free_vtag1) 2661 return nix_tx_vtag_decfg(rvu, blkaddr, req); 2662 } 2663 2664 return 0; 2665 } 2666 2667 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 2668 int mce, u8 op, u16 pcifunc, int next, bool eol) 2669 { 2670 struct nix_aq_enq_req aq_req; 2671 int err; 2672 2673 aq_req.hdr.pcifunc = 0; 2674 aq_req.ctype = NIX_AQ_CTYPE_MCE; 2675 aq_req.op = op; 2676 aq_req.qidx = mce; 2677 2678 /* Use RSS with RSS index 0 */ 2679 aq_req.mce.op = 1; 2680 aq_req.mce.index = 0; 2681 aq_req.mce.eol = eol; 2682 aq_req.mce.pf_func = pcifunc; 2683 aq_req.mce.next = next; 2684 2685 /* All fields valid */ 2686 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 2687 2688 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 2689 if (err) { 2690 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 2691 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 2692 return err; 2693 } 2694 return 0; 2695 } 2696 2697 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, 2698 u16 pcifunc, bool add) 2699 { 2700 struct mce *mce, *tail = NULL; 2701 bool delete = false; 2702 2703 /* Scan through the current list */ 2704 hlist_for_each_entry(mce, &mce_list->head, node) { 2705 /* If already exists, then delete */ 2706 if (mce->pcifunc == pcifunc && !add) { 2707 delete = true; 2708 break; 2709 } else if (mce->pcifunc == pcifunc && add) { 2710 /* entry already exists */ 2711 return 0; 2712 } 2713 tail = mce; 2714 } 2715 2716 if (delete) { 2717 hlist_del(&mce->node); 2718 kfree(mce); 2719 mce_list->count--; 2720 return 0; 2721 } 2722 2723 if (!add) 2724 return 0; 2725 2726 /* Add a new one to the list, at the tail */ 2727 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 2728 if (!mce) 2729 return -ENOMEM; 2730 mce->pcifunc = pcifunc; 2731 if (!tail) 2732 hlist_add_head(&mce->node, &mce_list->head); 2733 else 2734 hlist_add_behind(&mce->node, &tail->node); 2735 mce_list->count++; 2736 return 0; 2737 } 2738 2739 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, 2740 struct nix_mce_list *mce_list, 2741 int mce_idx, int mcam_index, bool add) 2742 { 2743 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; 2744 struct npc_mcam *mcam = &rvu->hw->mcam; 2745 struct nix_mcast *mcast; 2746 struct nix_hw *nix_hw; 2747 struct mce *mce; 2748 2749 if (!mce_list) 2750 return -EINVAL; 2751 2752 /* Get this PF/VF func's MCE index */ 2753 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 2754 2755 if (idx > (mce_idx + mce_list->max)) { 2756 dev_err(rvu->dev, 2757 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 2758 __func__, idx, mce_list->max, 2759 pcifunc >> RVU_PFVF_PF_SHIFT); 2760 return -EINVAL; 2761 } 2762 2763 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 2764 if (err) 2765 return err; 2766 2767 mcast = &nix_hw->mcast; 2768 mutex_lock(&mcast->mce_lock); 2769 2770 err = nix_update_mce_list_entry(mce_list, pcifunc, add); 2771 if (err) 2772 goto end; 2773 2774 /* Disable MCAM entry in NPC */ 2775 if (!mce_list->count) { 2776 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2777 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); 2778 goto end; 2779 } 2780 2781 /* Dump the updated list to HW */ 2782 idx = mce_idx; 2783 last_idx = idx + mce_list->count - 1; 2784 hlist_for_each_entry(mce, &mce_list->head, node) { 2785 if (idx > last_idx) 2786 break; 2787 2788 next_idx = idx + 1; 2789 /* EOL should be set in last MCE */ 2790 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 2791 mce->pcifunc, next_idx, 2792 (next_idx > last_idx) ? true : false); 2793 if (err) 2794 goto end; 2795 idx++; 2796 } 2797 2798 end: 2799 mutex_unlock(&mcast->mce_lock); 2800 return err; 2801 } 2802 2803 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, 2804 struct nix_mce_list **mce_list, int *mce_idx) 2805 { 2806 struct rvu_hwinfo *hw = rvu->hw; 2807 struct rvu_pfvf *pfvf; 2808 2809 if (!hw->cap.nix_rx_multicast || 2810 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { 2811 *mce_list = NULL; 2812 *mce_idx = 0; 2813 return; 2814 } 2815 2816 /* Get this PF/VF func's MCE index */ 2817 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 2818 2819 if (type == NIXLF_BCAST_ENTRY) { 2820 *mce_list = &pfvf->bcast_mce_list; 2821 *mce_idx = pfvf->bcast_mce_idx; 2822 } else if (type == NIXLF_ALLMULTI_ENTRY) { 2823 *mce_list = &pfvf->mcast_mce_list; 2824 *mce_idx = pfvf->mcast_mce_idx; 2825 } else if (type == NIXLF_PROMISC_ENTRY) { 2826 *mce_list = &pfvf->promisc_mce_list; 2827 *mce_idx = pfvf->promisc_mce_idx; 2828 } else { 2829 *mce_list = NULL; 2830 *mce_idx = 0; 2831 } 2832 } 2833 2834 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 2835 int type, bool add) 2836 { 2837 int err = 0, nixlf, blkaddr, mcam_index, mce_idx; 2838 struct npc_mcam *mcam = &rvu->hw->mcam; 2839 struct rvu_hwinfo *hw = rvu->hw; 2840 struct nix_mce_list *mce_list; 2841 int pf; 2842 2843 /* skip multicast pkt replication for AF's VFs & SDP links */ 2844 if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc)) 2845 return 0; 2846 2847 if (!hw->cap.nix_rx_multicast) 2848 return 0; 2849 2850 pf = rvu_get_pf(pcifunc); 2851 if (!is_pf_cgxmapped(rvu, pf)) 2852 return 0; 2853 2854 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2855 if (blkaddr < 0) 2856 return -EINVAL; 2857 2858 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2859 if (nixlf < 0) 2860 return -EINVAL; 2861 2862 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 2863 2864 mcam_index = npc_get_nixlf_mcam_index(mcam, 2865 pcifunc & ~RVU_PFVF_FUNC_MASK, 2866 nixlf, type); 2867 err = nix_update_mce_list(rvu, pcifunc, mce_list, 2868 mce_idx, mcam_index, add); 2869 return err; 2870 } 2871 2872 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) 2873 { 2874 struct nix_mcast *mcast = &nix_hw->mcast; 2875 int err, pf, numvfs, idx; 2876 struct rvu_pfvf *pfvf; 2877 u16 pcifunc; 2878 u64 cfg; 2879 2880 /* Skip PF0 (i.e AF) */ 2881 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 2882 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2883 /* If PF is not enabled, nothing to do */ 2884 if (!((cfg >> 20) & 0x01)) 2885 continue; 2886 /* Get numVFs attached to this PF */ 2887 numvfs = (cfg >> 12) & 0xFF; 2888 2889 pfvf = &rvu->pf[pf]; 2890 2891 /* This NIX0/1 block mapped to PF ? */ 2892 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 2893 continue; 2894 2895 /* save start idx of broadcast mce list */ 2896 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2897 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 2898 2899 /* save start idx of multicast mce list */ 2900 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2901 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); 2902 2903 /* save the start idx of promisc mce list */ 2904 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2905 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); 2906 2907 for (idx = 0; idx < (numvfs + 1); idx++) { 2908 /* idx-0 is for PF, followed by VFs */ 2909 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 2910 pcifunc |= idx; 2911 /* Add dummy entries now, so that we don't have to check 2912 * for whether AQ_OP should be INIT/WRITE later on. 2913 * Will be updated when a NIXLF is attached/detached to 2914 * these PF/VFs. 2915 */ 2916 err = nix_blk_setup_mce(rvu, nix_hw, 2917 pfvf->bcast_mce_idx + idx, 2918 NIX_AQ_INSTOP_INIT, 2919 pcifunc, 0, true); 2920 if (err) 2921 return err; 2922 2923 /* add dummy entries to multicast mce list */ 2924 err = nix_blk_setup_mce(rvu, nix_hw, 2925 pfvf->mcast_mce_idx + idx, 2926 NIX_AQ_INSTOP_INIT, 2927 pcifunc, 0, true); 2928 if (err) 2929 return err; 2930 2931 /* add dummy entries to promisc mce list */ 2932 err = nix_blk_setup_mce(rvu, nix_hw, 2933 pfvf->promisc_mce_idx + idx, 2934 NIX_AQ_INSTOP_INIT, 2935 pcifunc, 0, true); 2936 if (err) 2937 return err; 2938 } 2939 } 2940 return 0; 2941 } 2942 2943 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2944 { 2945 struct nix_mcast *mcast = &nix_hw->mcast; 2946 struct rvu_hwinfo *hw = rvu->hw; 2947 int err, size; 2948 2949 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 2950 size = (1ULL << size); 2951 2952 /* Alloc memory for multicast/mirror replication entries */ 2953 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 2954 (256UL << MC_TBL_SIZE), size); 2955 if (err) 2956 return -ENOMEM; 2957 2958 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 2959 (u64)mcast->mce_ctx->iova); 2960 2961 /* Set max list length equal to max no of VFs per PF + PF itself */ 2962 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 2963 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 2964 2965 /* Alloc memory for multicast replication buffers */ 2966 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 2967 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 2968 (8UL << MC_BUF_CNT), size); 2969 if (err) 2970 return -ENOMEM; 2971 2972 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 2973 (u64)mcast->mcast_buf->iova); 2974 2975 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 2976 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 2977 2978 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 2979 BIT_ULL(63) | (mcast->replay_pkind << 24) | 2980 BIT_ULL(20) | MC_BUF_CNT); 2981 2982 mutex_init(&mcast->mce_lock); 2983 2984 return nix_setup_mce_tables(rvu, nix_hw); 2985 } 2986 2987 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 2988 { 2989 struct nix_txvlan *vlan = &nix_hw->txvlan; 2990 int err; 2991 2992 /* Allocate resource bimap for tx vtag def registers*/ 2993 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 2994 err = rvu_alloc_bitmap(&vlan->rsrc); 2995 if (err) 2996 return -ENOMEM; 2997 2998 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 2999 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 3000 sizeof(u16), GFP_KERNEL); 3001 if (!vlan->entry2pfvf_map) 3002 goto free_mem; 3003 3004 mutex_init(&vlan->rsrc_lock); 3005 return 0; 3006 3007 free_mem: 3008 kfree(vlan->rsrc.bmap); 3009 return -ENOMEM; 3010 } 3011 3012 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 3013 { 3014 struct nix_txsch *txsch; 3015 int err, lvl, schq; 3016 u64 cfg, reg; 3017 3018 /* Get scheduler queue count of each type and alloc 3019 * bitmap for each for alloc/free/attach operations. 3020 */ 3021 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3022 txsch = &nix_hw->txsch[lvl]; 3023 txsch->lvl = lvl; 3024 switch (lvl) { 3025 case NIX_TXSCH_LVL_SMQ: 3026 reg = NIX_AF_MDQ_CONST; 3027 break; 3028 case NIX_TXSCH_LVL_TL4: 3029 reg = NIX_AF_TL4_CONST; 3030 break; 3031 case NIX_TXSCH_LVL_TL3: 3032 reg = NIX_AF_TL3_CONST; 3033 break; 3034 case NIX_TXSCH_LVL_TL2: 3035 reg = NIX_AF_TL2_CONST; 3036 break; 3037 case NIX_TXSCH_LVL_TL1: 3038 reg = NIX_AF_TL1_CONST; 3039 break; 3040 } 3041 cfg = rvu_read64(rvu, blkaddr, reg); 3042 txsch->schq.max = cfg & 0xFFFF; 3043 err = rvu_alloc_bitmap(&txsch->schq); 3044 if (err) 3045 return err; 3046 3047 /* Allocate memory for scheduler queues to 3048 * PF/VF pcifunc mapping info. 3049 */ 3050 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 3051 sizeof(u32), GFP_KERNEL); 3052 if (!txsch->pfvf_map) 3053 return -ENOMEM; 3054 for (schq = 0; schq < txsch->schq.max; schq++) 3055 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 3056 } 3057 3058 /* Setup a default value of 8192 as DWRR MTU */ 3059 if (rvu->hw->cap.nix_common_dwrr_mtu) { 3060 rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU, 3061 convert_bytes_to_dwrr_mtu(8192)); 3062 rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU, 3063 convert_bytes_to_dwrr_mtu(8192)); 3064 } 3065 3066 return 0; 3067 } 3068 3069 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 3070 int blkaddr, u32 cfg) 3071 { 3072 int fmt_idx; 3073 3074 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 3075 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 3076 return fmt_idx; 3077 } 3078 if (fmt_idx >= nix_hw->mark_format.total) 3079 return -ERANGE; 3080 3081 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 3082 nix_hw->mark_format.cfg[fmt_idx] = cfg; 3083 nix_hw->mark_format.in_use++; 3084 return fmt_idx; 3085 } 3086 3087 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 3088 int blkaddr) 3089 { 3090 u64 cfgs[] = { 3091 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 3092 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 3093 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 3094 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 3095 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 3096 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 3097 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 3098 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 3099 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 3100 }; 3101 int i, rc; 3102 u64 total; 3103 3104 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 3105 nix_hw->mark_format.total = (u8)total; 3106 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 3107 GFP_KERNEL); 3108 if (!nix_hw->mark_format.cfg) 3109 return -ENOMEM; 3110 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 3111 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 3112 if (rc < 0) 3113 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 3114 i, rc); 3115 } 3116 3117 return 0; 3118 } 3119 3120 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3121 { 3122 /* CN10K supports LBK FIFO size 72 KB */ 3123 if (rvu->hw->lbk_bufsize == 0x12000) 3124 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 3125 else 3126 *max_mtu = NIC_HW_MAX_FRS; 3127 } 3128 3129 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 3130 { 3131 /* RPM supports FIFO len 128 KB */ 3132 if (rvu_cgx_get_fifolen(rvu) == 0x20000) 3133 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 3134 else 3135 *max_mtu = NIC_HW_MAX_FRS; 3136 } 3137 3138 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 3139 struct nix_hw_info *rsp) 3140 { 3141 u16 pcifunc = req->hdr.pcifunc; 3142 u64 dwrr_mtu; 3143 int blkaddr; 3144 3145 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3146 if (blkaddr < 0) 3147 return NIX_AF_ERR_AF_LF_INVALID; 3148 3149 if (is_afvf(pcifunc)) 3150 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 3151 else 3152 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 3153 3154 rsp->min_mtu = NIC_HW_MIN_FRS; 3155 3156 if (!rvu->hw->cap.nix_common_dwrr_mtu) { 3157 /* Return '1' on OTx2 */ 3158 rsp->rpm_dwrr_mtu = 1; 3159 rsp->sdp_dwrr_mtu = 1; 3160 return 0; 3161 } 3162 3163 dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU); 3164 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3165 3166 dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU); 3167 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); 3168 3169 return 0; 3170 } 3171 3172 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 3173 struct msg_rsp *rsp) 3174 { 3175 u16 pcifunc = req->hdr.pcifunc; 3176 int i, nixlf, blkaddr, err; 3177 u64 stats; 3178 3179 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3180 if (err) 3181 return err; 3182 3183 /* Get stats count supported by HW */ 3184 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 3185 3186 /* Reset tx stats */ 3187 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 3188 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 3189 3190 /* Reset rx stats */ 3191 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 3192 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 3193 3194 return 0; 3195 } 3196 3197 /* Returns the ALG index to be set into NPC_RX_ACTION */ 3198 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 3199 { 3200 int i; 3201 3202 /* Scan over exiting algo entries to find a match */ 3203 for (i = 0; i < nix_hw->flowkey.in_use; i++) 3204 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 3205 return i; 3206 3207 return -ERANGE; 3208 } 3209 3210 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 3211 { 3212 int idx, nr_field, key_off, field_marker, keyoff_marker; 3213 int max_key_off, max_bit_pos, group_member; 3214 struct nix_rx_flowkey_alg *field; 3215 struct nix_rx_flowkey_alg tmp; 3216 u32 key_type, valid_key; 3217 int l4_key_offset = 0; 3218 3219 if (!alg) 3220 return -EINVAL; 3221 3222 #define FIELDS_PER_ALG 5 3223 #define MAX_KEY_OFF 40 3224 /* Clear all fields */ 3225 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 3226 3227 /* Each of the 32 possible flow key algorithm definitions should 3228 * fall into above incremental config (except ALG0). Otherwise a 3229 * single NPC MCAM entry is not sufficient for supporting RSS. 3230 * 3231 * If a different definition or combination needed then NPC MCAM 3232 * has to be programmed to filter such pkts and it's action should 3233 * point to this definition to calculate flowtag or hash. 3234 * 3235 * The `for loop` goes over _all_ protocol field and the following 3236 * variables depicts the state machine forward progress logic. 3237 * 3238 * keyoff_marker - Enabled when hash byte length needs to be accounted 3239 * in field->key_offset update. 3240 * field_marker - Enabled when a new field needs to be selected. 3241 * group_member - Enabled when protocol is part of a group. 3242 */ 3243 3244 keyoff_marker = 0; max_key_off = 0; group_member = 0; 3245 nr_field = 0; key_off = 0; field_marker = 1; 3246 field = &tmp; max_bit_pos = fls(flow_cfg); 3247 for (idx = 0; 3248 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 3249 key_off < MAX_KEY_OFF; idx++) { 3250 key_type = BIT(idx); 3251 valid_key = flow_cfg & key_type; 3252 /* Found a field marker, reset the field values */ 3253 if (field_marker) 3254 memset(&tmp, 0, sizeof(tmp)); 3255 3256 field_marker = true; 3257 keyoff_marker = true; 3258 switch (key_type) { 3259 case NIX_FLOW_KEY_TYPE_PORT: 3260 field->sel_chan = true; 3261 /* This should be set to 1, when SEL_CHAN is set */ 3262 field->bytesm1 = 1; 3263 break; 3264 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 3265 field->lid = NPC_LID_LC; 3266 field->hdr_offset = 9; /* offset */ 3267 field->bytesm1 = 0; /* 1 byte */ 3268 field->ltype_match = NPC_LT_LC_IP; 3269 field->ltype_mask = 0xF; 3270 break; 3271 case NIX_FLOW_KEY_TYPE_IPV4: 3272 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 3273 field->lid = NPC_LID_LC; 3274 field->ltype_match = NPC_LT_LC_IP; 3275 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 3276 field->lid = NPC_LID_LG; 3277 field->ltype_match = NPC_LT_LG_TU_IP; 3278 } 3279 field->hdr_offset = 12; /* SIP offset */ 3280 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 3281 field->ltype_mask = 0xF; /* Match only IPv4 */ 3282 keyoff_marker = false; 3283 break; 3284 case NIX_FLOW_KEY_TYPE_IPV6: 3285 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 3286 field->lid = NPC_LID_LC; 3287 field->ltype_match = NPC_LT_LC_IP6; 3288 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 3289 field->lid = NPC_LID_LG; 3290 field->ltype_match = NPC_LT_LG_TU_IP6; 3291 } 3292 field->hdr_offset = 8; /* SIP offset */ 3293 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 3294 field->ltype_mask = 0xF; /* Match only IPv6 */ 3295 break; 3296 case NIX_FLOW_KEY_TYPE_TCP: 3297 case NIX_FLOW_KEY_TYPE_UDP: 3298 case NIX_FLOW_KEY_TYPE_SCTP: 3299 case NIX_FLOW_KEY_TYPE_INNR_TCP: 3300 case NIX_FLOW_KEY_TYPE_INNR_UDP: 3301 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 3302 field->lid = NPC_LID_LD; 3303 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 3304 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 3305 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 3306 field->lid = NPC_LID_LH; 3307 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 3308 3309 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 3310 * so no need to change the ltype_match, just change 3311 * the lid for inner protocols 3312 */ 3313 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 3314 (int)NPC_LT_LH_TU_TCP); 3315 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 3316 (int)NPC_LT_LH_TU_UDP); 3317 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 3318 (int)NPC_LT_LH_TU_SCTP); 3319 3320 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 3321 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 3322 valid_key) { 3323 field->ltype_match |= NPC_LT_LD_TCP; 3324 group_member = true; 3325 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 3326 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 3327 valid_key) { 3328 field->ltype_match |= NPC_LT_LD_UDP; 3329 group_member = true; 3330 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 3331 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 3332 valid_key) { 3333 field->ltype_match |= NPC_LT_LD_SCTP; 3334 group_member = true; 3335 } 3336 field->ltype_mask = ~field->ltype_match; 3337 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 3338 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 3339 /* Handle the case where any of the group item 3340 * is enabled in the group but not the final one 3341 */ 3342 if (group_member) { 3343 valid_key = true; 3344 group_member = false; 3345 } 3346 } else { 3347 field_marker = false; 3348 keyoff_marker = false; 3349 } 3350 3351 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 3352 * remember the TCP key offset of 40 byte hash key. 3353 */ 3354 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 3355 l4_key_offset = key_off; 3356 break; 3357 case NIX_FLOW_KEY_TYPE_NVGRE: 3358 field->lid = NPC_LID_LD; 3359 field->hdr_offset = 4; /* VSID offset */ 3360 field->bytesm1 = 2; 3361 field->ltype_match = NPC_LT_LD_NVGRE; 3362 field->ltype_mask = 0xF; 3363 break; 3364 case NIX_FLOW_KEY_TYPE_VXLAN: 3365 case NIX_FLOW_KEY_TYPE_GENEVE: 3366 field->lid = NPC_LID_LE; 3367 field->bytesm1 = 2; 3368 field->hdr_offset = 4; 3369 field->ltype_mask = 0xF; 3370 field_marker = false; 3371 keyoff_marker = false; 3372 3373 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 3374 field->ltype_match |= NPC_LT_LE_VXLAN; 3375 group_member = true; 3376 } 3377 3378 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 3379 field->ltype_match |= NPC_LT_LE_GENEVE; 3380 group_member = true; 3381 } 3382 3383 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 3384 if (group_member) { 3385 field->ltype_mask = ~field->ltype_match; 3386 field_marker = true; 3387 keyoff_marker = true; 3388 valid_key = true; 3389 group_member = false; 3390 } 3391 } 3392 break; 3393 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 3394 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 3395 field->lid = NPC_LID_LA; 3396 field->ltype_match = NPC_LT_LA_ETHER; 3397 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 3398 field->lid = NPC_LID_LF; 3399 field->ltype_match = NPC_LT_LF_TU_ETHER; 3400 } 3401 field->hdr_offset = 0; 3402 field->bytesm1 = 5; /* DMAC 6 Byte */ 3403 field->ltype_mask = 0xF; 3404 break; 3405 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 3406 field->lid = NPC_LID_LC; 3407 field->hdr_offset = 40; /* IPV6 hdr */ 3408 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 3409 field->ltype_match = NPC_LT_LC_IP6_EXT; 3410 field->ltype_mask = 0xF; 3411 break; 3412 case NIX_FLOW_KEY_TYPE_GTPU: 3413 field->lid = NPC_LID_LE; 3414 field->hdr_offset = 4; 3415 field->bytesm1 = 3; /* 4 bytes TID*/ 3416 field->ltype_match = NPC_LT_LE_GTPU; 3417 field->ltype_mask = 0xF; 3418 break; 3419 case NIX_FLOW_KEY_TYPE_VLAN: 3420 field->lid = NPC_LID_LB; 3421 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 3422 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 3423 field->ltype_match = NPC_LT_LB_CTAG; 3424 field->ltype_mask = 0xF; 3425 field->fn_mask = 1; /* Mask out the first nibble */ 3426 break; 3427 case NIX_FLOW_KEY_TYPE_AH: 3428 case NIX_FLOW_KEY_TYPE_ESP: 3429 field->hdr_offset = 0; 3430 field->bytesm1 = 7; /* SPI + sequence number */ 3431 field->ltype_mask = 0xF; 3432 field->lid = NPC_LID_LE; 3433 field->ltype_match = NPC_LT_LE_ESP; 3434 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 3435 field->lid = NPC_LID_LD; 3436 field->ltype_match = NPC_LT_LD_AH; 3437 field->hdr_offset = 4; 3438 keyoff_marker = false; 3439 } 3440 break; 3441 } 3442 field->ena = 1; 3443 3444 /* Found a valid flow key type */ 3445 if (valid_key) { 3446 /* Use the key offset of TCP/UDP/SCTP fields 3447 * for ESP/AH fields. 3448 */ 3449 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 3450 key_type == NIX_FLOW_KEY_TYPE_AH) 3451 key_off = l4_key_offset; 3452 field->key_offset = key_off; 3453 memcpy(&alg[nr_field], field, sizeof(*field)); 3454 max_key_off = max(max_key_off, field->bytesm1 + 1); 3455 3456 /* Found a field marker, get the next field */ 3457 if (field_marker) 3458 nr_field++; 3459 } 3460 3461 /* Found a keyoff marker, update the new key_off */ 3462 if (keyoff_marker) { 3463 key_off += max_key_off; 3464 max_key_off = 0; 3465 } 3466 } 3467 /* Processed all the flow key types */ 3468 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 3469 return 0; 3470 else 3471 return NIX_AF_ERR_RSS_NOSPC_FIELD; 3472 } 3473 3474 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 3475 { 3476 u64 field[FIELDS_PER_ALG]; 3477 struct nix_hw *hw; 3478 int fid, rc; 3479 3480 hw = get_nix_hw(rvu->hw, blkaddr); 3481 if (!hw) 3482 return NIX_AF_ERR_INVALID_NIXBLK; 3483 3484 /* No room to add new flow hash algoritham */ 3485 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 3486 return NIX_AF_ERR_RSS_NOSPC_ALGO; 3487 3488 /* Generate algo fields for the given flow_cfg */ 3489 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 3490 if (rc) 3491 return rc; 3492 3493 /* Update ALGX_FIELDX register with generated fields */ 3494 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3495 rvu_write64(rvu, blkaddr, 3496 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 3497 fid), field[fid]); 3498 3499 /* Store the flow_cfg for futher lookup */ 3500 rc = hw->flowkey.in_use; 3501 hw->flowkey.flowkey[rc] = flow_cfg; 3502 hw->flowkey.in_use++; 3503 3504 return rc; 3505 } 3506 3507 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 3508 struct nix_rss_flowkey_cfg *req, 3509 struct nix_rss_flowkey_cfg_rsp *rsp) 3510 { 3511 u16 pcifunc = req->hdr.pcifunc; 3512 int alg_idx, nixlf, blkaddr; 3513 struct nix_hw *nix_hw; 3514 int err; 3515 3516 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3517 if (err) 3518 return err; 3519 3520 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3521 if (!nix_hw) 3522 return NIX_AF_ERR_INVALID_NIXBLK; 3523 3524 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 3525 /* Failed to get algo index from the exiting list, reserve new */ 3526 if (alg_idx < 0) { 3527 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 3528 req->flowkey_cfg); 3529 if (alg_idx < 0) 3530 return alg_idx; 3531 } 3532 rsp->alg_idx = alg_idx; 3533 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 3534 alg_idx, req->mcam_index); 3535 return 0; 3536 } 3537 3538 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 3539 { 3540 u32 flowkey_cfg, minkey_cfg; 3541 int alg, fid, rc; 3542 3543 /* Disable all flow key algx fieldx */ 3544 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 3545 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3546 rvu_write64(rvu, blkaddr, 3547 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 3548 0); 3549 } 3550 3551 /* IPv4/IPv6 SIP/DIPs */ 3552 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 3553 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3554 if (rc < 0) 3555 return rc; 3556 3557 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3558 minkey_cfg = flowkey_cfg; 3559 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 3560 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3561 if (rc < 0) 3562 return rc; 3563 3564 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3565 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 3566 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3567 if (rc < 0) 3568 return rc; 3569 3570 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3571 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 3572 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3573 if (rc < 0) 3574 return rc; 3575 3576 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3577 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3578 NIX_FLOW_KEY_TYPE_UDP; 3579 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3580 if (rc < 0) 3581 return rc; 3582 3583 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3584 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3585 NIX_FLOW_KEY_TYPE_SCTP; 3586 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3587 if (rc < 0) 3588 return rc; 3589 3590 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3591 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 3592 NIX_FLOW_KEY_TYPE_SCTP; 3593 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3594 if (rc < 0) 3595 return rc; 3596 3597 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3598 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3599 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 3600 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3601 if (rc < 0) 3602 return rc; 3603 3604 return 0; 3605 } 3606 3607 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 3608 struct nix_set_mac_addr *req, 3609 struct msg_rsp *rsp) 3610 { 3611 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 3612 u16 pcifunc = req->hdr.pcifunc; 3613 int blkaddr, nixlf, err; 3614 struct rvu_pfvf *pfvf; 3615 3616 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3617 if (err) 3618 return err; 3619 3620 pfvf = rvu_get_pfvf(rvu, pcifunc); 3621 3622 /* untrusted VF can't overwrite admin(PF) changes */ 3623 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 3624 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { 3625 dev_warn(rvu->dev, 3626 "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); 3627 return -EPERM; 3628 } 3629 3630 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 3631 3632 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 3633 pfvf->rx_chan_base, req->mac_addr); 3634 3635 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) 3636 ether_addr_copy(pfvf->default_mac, req->mac_addr); 3637 3638 rvu_switch_update_rules(rvu, pcifunc); 3639 3640 return 0; 3641 } 3642 3643 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 3644 struct msg_req *req, 3645 struct nix_get_mac_addr_rsp *rsp) 3646 { 3647 u16 pcifunc = req->hdr.pcifunc; 3648 struct rvu_pfvf *pfvf; 3649 3650 if (!is_nixlf_attached(rvu, pcifunc)) 3651 return NIX_AF_ERR_AF_LF_INVALID; 3652 3653 pfvf = rvu_get_pfvf(rvu, pcifunc); 3654 3655 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 3656 3657 return 0; 3658 } 3659 3660 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 3661 struct msg_rsp *rsp) 3662 { 3663 bool allmulti, promisc, nix_rx_multicast; 3664 u16 pcifunc = req->hdr.pcifunc; 3665 struct rvu_pfvf *pfvf; 3666 int nixlf, err; 3667 3668 pfvf = rvu_get_pfvf(rvu, pcifunc); 3669 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; 3670 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; 3671 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; 3672 3673 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; 3674 3675 if (is_vf(pcifunc) && !nix_rx_multicast && 3676 (promisc || allmulti)) { 3677 dev_warn_ratelimited(rvu->dev, 3678 "VF promisc/multicast not supported\n"); 3679 return 0; 3680 } 3681 3682 /* untrusted VF can't configure promisc/allmulti */ 3683 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 3684 (promisc || allmulti)) 3685 return 0; 3686 3687 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3688 if (err) 3689 return err; 3690 3691 if (nix_rx_multicast) { 3692 /* add/del this PF_FUNC to/from mcast pkt replication list */ 3693 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, 3694 allmulti); 3695 if (err) { 3696 dev_err(rvu->dev, 3697 "Failed to update pcifunc 0x%x to multicast list\n", 3698 pcifunc); 3699 return err; 3700 } 3701 3702 /* add/del this PF_FUNC to/from promisc pkt replication list */ 3703 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, 3704 promisc); 3705 if (err) { 3706 dev_err(rvu->dev, 3707 "Failed to update pcifunc 0x%x to promisc list\n", 3708 pcifunc); 3709 return err; 3710 } 3711 } 3712 3713 /* install/uninstall allmulti entry */ 3714 if (allmulti) { 3715 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, 3716 pfvf->rx_chan_base); 3717 } else { 3718 if (!nix_rx_multicast) 3719 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); 3720 } 3721 3722 /* install/uninstall promisc entry */ 3723 if (promisc) { 3724 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 3725 pfvf->rx_chan_base, 3726 pfvf->rx_chan_cnt); 3727 } else { 3728 if (!nix_rx_multicast) 3729 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 3730 } 3731 3732 return 0; 3733 } 3734 3735 static void nix_find_link_frs(struct rvu *rvu, 3736 struct nix_frs_cfg *req, u16 pcifunc) 3737 { 3738 int pf = rvu_get_pf(pcifunc); 3739 struct rvu_pfvf *pfvf; 3740 int maxlen, minlen; 3741 int numvfs, hwvf; 3742 int vf; 3743 3744 /* Update with requester's min/max lengths */ 3745 pfvf = rvu_get_pfvf(rvu, pcifunc); 3746 pfvf->maxlen = req->maxlen; 3747 if (req->update_minlen) 3748 pfvf->minlen = req->minlen; 3749 3750 maxlen = req->maxlen; 3751 minlen = req->update_minlen ? req->minlen : 0; 3752 3753 /* Get this PF's numVFs and starting hwvf */ 3754 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 3755 3756 /* For each VF, compare requested max/minlen */ 3757 for (vf = 0; vf < numvfs; vf++) { 3758 pfvf = &rvu->hwvf[hwvf + vf]; 3759 if (pfvf->maxlen > maxlen) 3760 maxlen = pfvf->maxlen; 3761 if (req->update_minlen && 3762 pfvf->minlen && pfvf->minlen < minlen) 3763 minlen = pfvf->minlen; 3764 } 3765 3766 /* Compare requested max/minlen with PF's max/minlen */ 3767 pfvf = &rvu->pf[pf]; 3768 if (pfvf->maxlen > maxlen) 3769 maxlen = pfvf->maxlen; 3770 if (req->update_minlen && 3771 pfvf->minlen && pfvf->minlen < minlen) 3772 minlen = pfvf->minlen; 3773 3774 /* Update the request with max/min PF's and it's VF's max/min */ 3775 req->maxlen = maxlen; 3776 if (req->update_minlen) 3777 req->minlen = minlen; 3778 } 3779 3780 static int 3781 nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, 3782 u16 pcifunc, u64 tx_credits) 3783 { 3784 struct rvu_hwinfo *hw = rvu->hw; 3785 int pf = rvu_get_pf(pcifunc); 3786 u8 cgx_id = 0, lmac_id = 0; 3787 unsigned long poll_tmo; 3788 bool restore_tx_en = 0; 3789 struct nix_hw *nix_hw; 3790 u64 cfg, sw_xoff = 0; 3791 u32 schq = 0; 3792 u32 credits; 3793 int rc; 3794 3795 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3796 if (!nix_hw) 3797 return NIX_AF_ERR_INVALID_NIXBLK; 3798 3799 if (tx_credits == nix_hw->tx_credits[link]) 3800 return 0; 3801 3802 /* Enable cgx tx if disabled for credits to be back */ 3803 if (is_pf_cgxmapped(rvu, pf)) { 3804 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 3805 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), 3806 lmac_id, true); 3807 } 3808 3809 mutex_lock(&rvu->rsrc_lock); 3810 /* Disable new traffic to link */ 3811 if (hw->cap.nix_shaping) { 3812 schq = nix_get_tx_link(rvu, pcifunc); 3813 sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)); 3814 rvu_write64(rvu, blkaddr, 3815 NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0)); 3816 } 3817 3818 rc = -EBUSY; 3819 poll_tmo = jiffies + usecs_to_jiffies(10000); 3820 /* Wait for credits to return */ 3821 do { 3822 if (time_after(jiffies, poll_tmo)) 3823 goto exit; 3824 usleep_range(100, 200); 3825 3826 cfg = rvu_read64(rvu, blkaddr, 3827 NIX_AF_TX_LINKX_NORM_CREDIT(link)); 3828 credits = (cfg >> 12) & 0xFFFFFULL; 3829 } while (credits != nix_hw->tx_credits[link]); 3830 3831 cfg &= ~(0xFFFFFULL << 12); 3832 cfg |= (tx_credits << 12); 3833 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 3834 rc = 0; 3835 3836 nix_hw->tx_credits[link] = tx_credits; 3837 3838 exit: 3839 /* Enable traffic back */ 3840 if (hw->cap.nix_shaping && !sw_xoff) 3841 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0); 3842 3843 /* Restore state of cgx tx */ 3844 if (restore_tx_en) 3845 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 3846 3847 mutex_unlock(&rvu->rsrc_lock); 3848 return rc; 3849 } 3850 3851 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 3852 struct msg_rsp *rsp) 3853 { 3854 struct rvu_hwinfo *hw = rvu->hw; 3855 u16 pcifunc = req->hdr.pcifunc; 3856 int pf = rvu_get_pf(pcifunc); 3857 int blkaddr, schq, link = -1; 3858 struct nix_txsch *txsch; 3859 u64 cfg, lmac_fifo_len; 3860 struct nix_hw *nix_hw; 3861 struct rvu_pfvf *pfvf; 3862 u8 cgx = 0, lmac = 0; 3863 u16 max_mtu; 3864 3865 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3866 if (blkaddr < 0) 3867 return NIX_AF_ERR_AF_LF_INVALID; 3868 3869 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3870 if (!nix_hw) 3871 return NIX_AF_ERR_INVALID_NIXBLK; 3872 3873 if (is_afvf(pcifunc)) 3874 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 3875 else 3876 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 3877 3878 if (!req->sdp_link && req->maxlen > max_mtu) 3879 return NIX_AF_ERR_FRS_INVALID; 3880 3881 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 3882 return NIX_AF_ERR_FRS_INVALID; 3883 3884 /* Check if requester wants to update SMQ's */ 3885 if (!req->update_smq) 3886 goto rx_frscfg; 3887 3888 /* Update min/maxlen in each of the SMQ attached to this PF/VF */ 3889 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 3890 mutex_lock(&rvu->rsrc_lock); 3891 for (schq = 0; schq < txsch->schq.max; schq++) { 3892 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 3893 continue; 3894 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); 3895 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); 3896 if (req->update_minlen) 3897 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); 3898 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); 3899 } 3900 mutex_unlock(&rvu->rsrc_lock); 3901 3902 rx_frscfg: 3903 /* Check if config is for SDP link */ 3904 if (req->sdp_link) { 3905 if (!hw->sdp_links) 3906 return NIX_AF_ERR_RX_LINK_INVALID; 3907 link = hw->cgx_links + hw->lbk_links; 3908 goto linkcfg; 3909 } 3910 3911 /* Check if the request is from CGX mapped RVU PF */ 3912 if (is_pf_cgxmapped(rvu, pf)) { 3913 /* Get CGX and LMAC to which this PF is mapped and find link */ 3914 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 3915 link = (cgx * hw->lmac_per_cgx) + lmac; 3916 } else if (pf == 0) { 3917 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 3918 pfvf = rvu_get_pfvf(rvu, pcifunc); 3919 link = hw->cgx_links + pfvf->lbkid; 3920 } 3921 3922 if (link < 0) 3923 return NIX_AF_ERR_RX_LINK_INVALID; 3924 3925 nix_find_link_frs(rvu, req, pcifunc); 3926 3927 linkcfg: 3928 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 3929 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 3930 if (req->update_minlen) 3931 cfg = (cfg & ~0xFFFFULL) | req->minlen; 3932 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 3933 3934 if (req->sdp_link || pf == 0) 3935 return 0; 3936 3937 /* Update transmit credits for CGX links */ 3938 lmac_fifo_len = 3939 rvu_cgx_get_fifolen(rvu) / 3940 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3941 return nix_config_link_credits(rvu, blkaddr, link, pcifunc, 3942 (lmac_fifo_len - req->maxlen) / 16); 3943 } 3944 3945 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 3946 struct msg_rsp *rsp) 3947 { 3948 int nixlf, blkaddr, err; 3949 u64 cfg; 3950 3951 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 3952 if (err) 3953 return err; 3954 3955 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 3956 /* Set the interface configuration */ 3957 if (req->len_verify & BIT(0)) 3958 cfg |= BIT_ULL(41); 3959 else 3960 cfg &= ~BIT_ULL(41); 3961 3962 if (req->len_verify & BIT(1)) 3963 cfg |= BIT_ULL(40); 3964 else 3965 cfg &= ~BIT_ULL(40); 3966 3967 if (req->csum_verify & BIT(0)) 3968 cfg |= BIT_ULL(37); 3969 else 3970 cfg &= ~BIT_ULL(37); 3971 3972 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 3973 3974 return 0; 3975 } 3976 3977 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 3978 { 3979 /* CN10k supports 72KB FIFO size and max packet size of 64k */ 3980 if (rvu->hw->lbk_bufsize == 0x12000) 3981 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16; 3982 3983 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 3984 } 3985 3986 static void nix_link_config(struct rvu *rvu, int blkaddr, 3987 struct nix_hw *nix_hw) 3988 { 3989 struct rvu_hwinfo *hw = rvu->hw; 3990 int cgx, lmac_cnt, slink, link; 3991 u16 lbk_max_frs, lmac_max_frs; 3992 u64 tx_credits, cfg; 3993 3994 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 3995 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 3996 3997 /* Set default min/max packet lengths allowed on NIX Rx links. 3998 * 3999 * With HW reset minlen value of 60byte, HW will treat ARP pkts 4000 * as undersize and report them to SW as error pkts, hence 4001 * setting it to 40 bytes. 4002 */ 4003 for (link = 0; link < hw->cgx_links; link++) { 4004 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4005 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 4006 } 4007 4008 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 4009 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4010 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 4011 } 4012 if (hw->sdp_links) { 4013 link = hw->cgx_links + hw->lbk_links; 4014 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 4015 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 4016 } 4017 4018 /* Set credits for Tx links assuming max packet length allowed. 4019 * This will be reconfigured based on MTU set for PF/VF. 4020 */ 4021 for (cgx = 0; cgx < hw->cgx; cgx++) { 4022 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 4023 /* Skip when cgx is not available or lmac cnt is zero */ 4024 if (lmac_cnt <= 0) 4025 continue; 4026 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) - 4027 lmac_max_frs) / 16; 4028 /* Enable credits and set credit pkt count to max allowed */ 4029 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4030 slink = cgx * hw->lmac_per_cgx; 4031 for (link = slink; link < (slink + lmac_cnt); link++) { 4032 nix_hw->tx_credits[link] = tx_credits; 4033 rvu_write64(rvu, blkaddr, 4034 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 4035 } 4036 } 4037 4038 /* Set Tx credits for LBK link */ 4039 slink = hw->cgx_links; 4040 for (link = slink; link < (slink + hw->lbk_links); link++) { 4041 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 4042 nix_hw->tx_credits[link] = tx_credits; 4043 /* Enable credits and set credit pkt count to max allowed */ 4044 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 4045 rvu_write64(rvu, blkaddr, 4046 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 4047 } 4048 } 4049 4050 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 4051 { 4052 int idx, err; 4053 u64 status; 4054 4055 /* Start X2P bus calibration */ 4056 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4057 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 4058 /* Wait for calibration to complete */ 4059 err = rvu_poll_reg(rvu, blkaddr, 4060 NIX_AF_STATUS, BIT_ULL(10), false); 4061 if (err) { 4062 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 4063 return err; 4064 } 4065 4066 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 4067 /* Check if CGX devices are ready */ 4068 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 4069 /* Skip when cgx port is not available */ 4070 if (!rvu_cgx_pdata(idx, rvu) || 4071 (status & (BIT_ULL(16 + idx)))) 4072 continue; 4073 dev_err(rvu->dev, 4074 "CGX%d didn't respond to NIX X2P calibration\n", idx); 4075 err = -EBUSY; 4076 } 4077 4078 /* Check if LBK is ready */ 4079 if (!(status & BIT_ULL(19))) { 4080 dev_err(rvu->dev, 4081 "LBK didn't respond to NIX X2P calibration\n"); 4082 err = -EBUSY; 4083 } 4084 4085 /* Clear 'calibrate_x2p' bit */ 4086 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4087 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 4088 if (err || (status & 0x3FFULL)) 4089 dev_err(rvu->dev, 4090 "NIX X2P calibration failed, status 0x%llx\n", status); 4091 if (err) 4092 return err; 4093 return 0; 4094 } 4095 4096 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 4097 { 4098 u64 cfg; 4099 int err; 4100 4101 /* Set admin queue endianness */ 4102 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 4103 #ifdef __BIG_ENDIAN 4104 cfg |= BIT_ULL(8); 4105 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4106 #else 4107 cfg &= ~BIT_ULL(8); 4108 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 4109 #endif 4110 4111 /* Do not bypass NDC cache */ 4112 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 4113 cfg &= ~0x3FFEULL; 4114 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 4115 /* Disable caching of SQB aka SQEs */ 4116 cfg |= 0x04ULL; 4117 #endif 4118 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 4119 4120 /* Result structure can be followed by RQ/SQ/CQ context at 4121 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 4122 * operation type. Alloc sufficient result memory for all operations. 4123 */ 4124 err = rvu_aq_alloc(rvu, &block->aq, 4125 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 4126 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 4127 if (err) 4128 return err; 4129 4130 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 4131 rvu_write64(rvu, block->addr, 4132 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 4133 return 0; 4134 } 4135 4136 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) 4137 { 4138 struct rvu_hwinfo *hw = rvu->hw; 4139 u64 hw_const; 4140 4141 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 4142 4143 /* On OcteonTx2 DWRR quantum is directly configured into each of 4144 * the transmit scheduler queues. And PF/VF drivers were free to 4145 * config any value upto 2^24. 4146 * On CN10K, HW is modified, the quantum configuration at scheduler 4147 * queues is in terms of weight. And SW needs to setup a base DWRR MTU 4148 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do 4149 * 'DWRR MTU * weight' to get the quantum. 4150 * 4151 * Check if HW uses a common MTU for all DWRR quantum configs. 4152 * On OcteonTx2 this register field is '0'. 4153 */ 4154 if (((hw_const >> 56) & 0x10) == 0x10) 4155 hw->cap.nix_common_dwrr_mtu = true; 4156 } 4157 4158 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 4159 { 4160 const struct npc_lt_def_cfg *ltdefs; 4161 struct rvu_hwinfo *hw = rvu->hw; 4162 int blkaddr = nix_hw->blkaddr; 4163 struct rvu_block *block; 4164 int err; 4165 u64 cfg; 4166 4167 block = &hw->block[blkaddr]; 4168 4169 if (is_rvu_96xx_B0(rvu)) { 4170 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 4171 * internal state when conditional clocks are turned off. 4172 * Hence enable them. 4173 */ 4174 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 4175 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 4176 4177 /* Set chan/link to backpressure TL3 instead of TL2 */ 4178 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 4179 4180 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 4181 * This sticky mode is known to cause SQ stalls when multiple 4182 * SQs are mapped to same SMQ and transmitting pkts at a time. 4183 */ 4184 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 4185 cfg &= ~BIT_ULL(15); 4186 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 4187 } 4188 4189 ltdefs = rvu->kpu.lt_def; 4190 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 4191 err = nix_calibrate_x2p(rvu, blkaddr); 4192 if (err) 4193 return err; 4194 4195 /* Setup capabilities of the NIX block */ 4196 rvu_nix_setup_capabilities(rvu, blkaddr); 4197 4198 /* Initialize admin queue */ 4199 err = nix_aq_init(rvu, block); 4200 if (err) 4201 return err; 4202 4203 /* Restore CINT timer delay to HW reset values */ 4204 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 4205 4206 if (is_block_implemented(hw, blkaddr)) { 4207 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 4208 if (err) 4209 return err; 4210 4211 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); 4212 if (err) 4213 return err; 4214 4215 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 4216 if (err) 4217 return err; 4218 4219 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 4220 if (err) 4221 return err; 4222 4223 err = nix_setup_txvlan(rvu, nix_hw); 4224 if (err) 4225 return err; 4226 4227 /* Configure segmentation offload formats */ 4228 nix_setup_lso(rvu, nix_hw, blkaddr); 4229 4230 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 4231 * This helps HW protocol checker to identify headers 4232 * and validate length and checksums. 4233 */ 4234 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 4235 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 4236 ltdefs->rx_ol2.ltype_mask); 4237 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 4238 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 4239 ltdefs->rx_oip4.ltype_mask); 4240 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 4241 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 4242 ltdefs->rx_iip4.ltype_mask); 4243 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 4244 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 4245 ltdefs->rx_oip6.ltype_mask); 4246 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 4247 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 4248 ltdefs->rx_iip6.ltype_mask); 4249 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 4250 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 4251 ltdefs->rx_otcp.ltype_mask); 4252 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 4253 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 4254 ltdefs->rx_itcp.ltype_mask); 4255 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 4256 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 4257 ltdefs->rx_oudp.ltype_mask); 4258 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 4259 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 4260 ltdefs->rx_iudp.ltype_mask); 4261 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 4262 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 4263 ltdefs->rx_osctp.ltype_mask); 4264 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 4265 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 4266 ltdefs->rx_isctp.ltype_mask); 4267 4268 if (!is_rvu_otx2(rvu)) { 4269 /* Enable APAD calculation for other protocols 4270 * matching APAD0 and APAD1 lt def registers. 4271 */ 4272 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, 4273 (ltdefs->rx_apad0.valid << 11) | 4274 (ltdefs->rx_apad0.lid << 8) | 4275 (ltdefs->rx_apad0.ltype_match << 4) | 4276 ltdefs->rx_apad0.ltype_mask); 4277 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, 4278 (ltdefs->rx_apad1.valid << 11) | 4279 (ltdefs->rx_apad1.lid << 8) | 4280 (ltdefs->rx_apad1.ltype_match << 4) | 4281 ltdefs->rx_apad1.ltype_mask); 4282 4283 /* Receive ethertype defination register defines layer 4284 * information in NPC_RESULT_S to identify the Ethertype 4285 * location in L2 header. Used for Ethertype overwriting 4286 * in inline IPsec flow. 4287 */ 4288 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), 4289 (ltdefs->rx_et[0].offset << 12) | 4290 (ltdefs->rx_et[0].valid << 11) | 4291 (ltdefs->rx_et[0].lid << 8) | 4292 (ltdefs->rx_et[0].ltype_match << 4) | 4293 ltdefs->rx_et[0].ltype_mask); 4294 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), 4295 (ltdefs->rx_et[1].offset << 12) | 4296 (ltdefs->rx_et[1].valid << 11) | 4297 (ltdefs->rx_et[1].lid << 8) | 4298 (ltdefs->rx_et[1].ltype_match << 4) | 4299 ltdefs->rx_et[1].ltype_mask); 4300 } 4301 4302 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 4303 if (err) 4304 return err; 4305 4306 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, 4307 sizeof(u64), GFP_KERNEL); 4308 if (!nix_hw->tx_credits) 4309 return -ENOMEM; 4310 4311 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 4312 nix_link_config(rvu, blkaddr, nix_hw); 4313 4314 /* Enable Channel backpressure */ 4315 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 4316 } 4317 return 0; 4318 } 4319 4320 int rvu_nix_init(struct rvu *rvu) 4321 { 4322 struct rvu_hwinfo *hw = rvu->hw; 4323 struct nix_hw *nix_hw; 4324 int blkaddr = 0, err; 4325 int i = 0; 4326 4327 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 4328 GFP_KERNEL); 4329 if (!hw->nix) 4330 return -ENOMEM; 4331 4332 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4333 while (blkaddr) { 4334 nix_hw = &hw->nix[i]; 4335 nix_hw->rvu = rvu; 4336 nix_hw->blkaddr = blkaddr; 4337 err = rvu_nix_block_init(rvu, nix_hw); 4338 if (err) 4339 return err; 4340 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4341 i++; 4342 } 4343 4344 return 0; 4345 } 4346 4347 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 4348 struct rvu_block *block) 4349 { 4350 struct nix_txsch *txsch; 4351 struct nix_mcast *mcast; 4352 struct nix_txvlan *vlan; 4353 struct nix_hw *nix_hw; 4354 int lvl; 4355 4356 rvu_aq_free(rvu, block->aq); 4357 4358 if (is_block_implemented(rvu->hw, blkaddr)) { 4359 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4360 if (!nix_hw) 4361 return; 4362 4363 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 4364 txsch = &nix_hw->txsch[lvl]; 4365 kfree(txsch->schq.bmap); 4366 } 4367 4368 kfree(nix_hw->tx_credits); 4369 4370 nix_ipolicer_freemem(rvu, nix_hw); 4371 4372 vlan = &nix_hw->txvlan; 4373 kfree(vlan->rsrc.bmap); 4374 mutex_destroy(&vlan->rsrc_lock); 4375 4376 mcast = &nix_hw->mcast; 4377 qmem_free(rvu->dev, mcast->mce_ctx); 4378 qmem_free(rvu->dev, mcast->mcast_buf); 4379 mutex_destroy(&mcast->mce_lock); 4380 } 4381 } 4382 4383 void rvu_nix_freemem(struct rvu *rvu) 4384 { 4385 struct rvu_hwinfo *hw = rvu->hw; 4386 struct rvu_block *block; 4387 int blkaddr = 0; 4388 4389 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4390 while (blkaddr) { 4391 block = &hw->block[blkaddr]; 4392 rvu_nix_block_freemem(rvu, blkaddr, block); 4393 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 4394 } 4395 } 4396 4397 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 4398 struct msg_rsp *rsp) 4399 { 4400 u16 pcifunc = req->hdr.pcifunc; 4401 struct rvu_pfvf *pfvf; 4402 int nixlf, err; 4403 4404 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4405 if (err) 4406 return err; 4407 4408 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 4409 4410 npc_mcam_enable_flows(rvu, pcifunc); 4411 4412 pfvf = rvu_get_pfvf(rvu, pcifunc); 4413 set_bit(NIXLF_INITIALIZED, &pfvf->flags); 4414 4415 rvu_switch_update_rules(rvu, pcifunc); 4416 4417 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 4418 } 4419 4420 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 4421 struct msg_rsp *rsp) 4422 { 4423 u16 pcifunc = req->hdr.pcifunc; 4424 struct rvu_pfvf *pfvf; 4425 int nixlf, err; 4426 4427 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 4428 if (err) 4429 return err; 4430 4431 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 4432 4433 pfvf = rvu_get_pfvf(rvu, pcifunc); 4434 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 4435 4436 return rvu_cgx_start_stop_io(rvu, pcifunc, false); 4437 } 4438 4439 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 4440 { 4441 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 4442 struct hwctx_disable_req ctx_req; 4443 int err; 4444 4445 ctx_req.hdr.pcifunc = pcifunc; 4446 4447 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 4448 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 4449 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 4450 nix_interface_deinit(rvu, pcifunc, nixlf); 4451 nix_rx_sync(rvu, blkaddr); 4452 nix_txschq_free(rvu, pcifunc); 4453 4454 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 4455 4456 rvu_cgx_start_stop_io(rvu, pcifunc, false); 4457 4458 if (pfvf->sq_ctx) { 4459 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 4460 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4461 if (err) 4462 dev_err(rvu->dev, "SQ ctx disable failed\n"); 4463 } 4464 4465 if (pfvf->rq_ctx) { 4466 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 4467 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4468 if (err) 4469 dev_err(rvu->dev, "RQ ctx disable failed\n"); 4470 } 4471 4472 if (pfvf->cq_ctx) { 4473 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 4474 err = nix_lf_hwctx_disable(rvu, &ctx_req); 4475 if (err) 4476 dev_err(rvu->dev, "CQ ctx disable failed\n"); 4477 } 4478 4479 nix_ctx_free(rvu, pfvf); 4480 4481 nix_free_all_bandprof(rvu, pcifunc); 4482 } 4483 4484 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 4485 4486 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 4487 { 4488 struct rvu_hwinfo *hw = rvu->hw; 4489 struct rvu_block *block; 4490 int blkaddr, pf; 4491 int nixlf; 4492 u64 cfg; 4493 4494 pf = rvu_get_pf(pcifunc); 4495 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 4496 return 0; 4497 4498 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4499 if (blkaddr < 0) 4500 return NIX_AF_ERR_AF_LF_INVALID; 4501 4502 block = &hw->block[blkaddr]; 4503 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 4504 if (nixlf < 0) 4505 return NIX_AF_ERR_AF_LF_INVALID; 4506 4507 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 4508 4509 if (enable) 4510 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 4511 else 4512 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 4513 4514 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 4515 4516 return 0; 4517 } 4518 4519 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 4520 struct msg_rsp *rsp) 4521 { 4522 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 4523 } 4524 4525 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 4526 struct msg_rsp *rsp) 4527 { 4528 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 4529 } 4530 4531 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 4532 struct nix_lso_format_cfg *req, 4533 struct nix_lso_format_cfg_rsp *rsp) 4534 { 4535 u16 pcifunc = req->hdr.pcifunc; 4536 struct nix_hw *nix_hw; 4537 struct rvu_pfvf *pfvf; 4538 int blkaddr, idx, f; 4539 u64 reg; 4540 4541 pfvf = rvu_get_pfvf(rvu, pcifunc); 4542 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4543 if (!pfvf->nixlf || blkaddr < 0) 4544 return NIX_AF_ERR_AF_LF_INVALID; 4545 4546 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4547 if (!nix_hw) 4548 return NIX_AF_ERR_INVALID_NIXBLK; 4549 4550 /* Find existing matching LSO format, if any */ 4551 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 4552 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 4553 reg = rvu_read64(rvu, blkaddr, 4554 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 4555 if (req->fields[f] != (reg & req->field_mask)) 4556 break; 4557 } 4558 4559 if (f == NIX_LSO_FIELD_MAX) 4560 break; 4561 } 4562 4563 if (idx < nix_hw->lso.in_use) { 4564 /* Match found */ 4565 rsp->lso_format_idx = idx; 4566 return 0; 4567 } 4568 4569 if (nix_hw->lso.in_use == nix_hw->lso.total) 4570 return NIX_AF_ERR_LSO_CFG_FAIL; 4571 4572 rsp->lso_format_idx = nix_hw->lso.in_use++; 4573 4574 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 4575 rvu_write64(rvu, blkaddr, 4576 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 4577 req->fields[f]); 4578 4579 return 0; 4580 } 4581 4582 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 4583 { 4584 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 4585 4586 /* overwrite vf mac address with default_mac */ 4587 if (from_vf) 4588 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 4589 } 4590 4591 /* NIX ingress policers or bandwidth profiles APIs */ 4592 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) 4593 { 4594 struct npc_lt_def_cfg defs, *ltdefs; 4595 4596 ltdefs = &defs; 4597 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); 4598 4599 /* Extract PCP and DEI fields from outer VLAN from byte offset 4600 * 2 from the start of LB_PTR (ie TAG). 4601 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN 4602 * fields are considered when 'Tunnel enable' is set in profile. 4603 */ 4604 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, 4605 (2UL << 12) | (ltdefs->ovlan.lid << 8) | 4606 (ltdefs->ovlan.ltype_match << 4) | 4607 ltdefs->ovlan.ltype_mask); 4608 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, 4609 (2UL << 12) | (ltdefs->ivlan.lid << 8) | 4610 (ltdefs->ivlan.ltype_match << 4) | 4611 ltdefs->ivlan.ltype_mask); 4612 4613 /* DSCP field in outer and tunneled IPv4 packets */ 4614 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, 4615 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | 4616 (ltdefs->rx_oip4.ltype_match << 4) | 4617 ltdefs->rx_oip4.ltype_mask); 4618 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, 4619 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | 4620 (ltdefs->rx_iip4.ltype_match << 4) | 4621 ltdefs->rx_iip4.ltype_mask); 4622 4623 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ 4624 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, 4625 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | 4626 (ltdefs->rx_oip6.ltype_match << 4) | 4627 ltdefs->rx_oip6.ltype_mask); 4628 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, 4629 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | 4630 (ltdefs->rx_iip6.ltype_match << 4) | 4631 ltdefs->rx_iip6.ltype_mask); 4632 } 4633 4634 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, 4635 int layer, int prof_idx) 4636 { 4637 struct nix_cn10k_aq_enq_req aq_req; 4638 int rc; 4639 4640 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4641 4642 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); 4643 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 4644 aq_req.op = NIX_AQ_INSTOP_INIT; 4645 4646 /* Context is all zeros, submit to AQ */ 4647 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4648 (struct nix_aq_enq_req *)&aq_req, NULL); 4649 if (rc) 4650 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", 4651 layer, prof_idx); 4652 return rc; 4653 } 4654 4655 static int nix_setup_ipolicers(struct rvu *rvu, 4656 struct nix_hw *nix_hw, int blkaddr) 4657 { 4658 struct rvu_hwinfo *hw = rvu->hw; 4659 struct nix_ipolicer *ipolicer; 4660 int err, layer, prof_idx; 4661 u64 cfg; 4662 4663 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 4664 if (!(cfg & BIT_ULL(61))) { 4665 hw->cap.ipolicer = false; 4666 return 0; 4667 } 4668 4669 hw->cap.ipolicer = true; 4670 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, 4671 sizeof(*ipolicer), GFP_KERNEL); 4672 if (!nix_hw->ipolicer) 4673 return -ENOMEM; 4674 4675 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); 4676 4677 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4678 ipolicer = &nix_hw->ipolicer[layer]; 4679 switch (layer) { 4680 case BAND_PROF_LEAF_LAYER: 4681 ipolicer->band_prof.max = cfg & 0XFFFF; 4682 break; 4683 case BAND_PROF_MID_LAYER: 4684 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; 4685 break; 4686 case BAND_PROF_TOP_LAYER: 4687 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; 4688 break; 4689 } 4690 4691 if (!ipolicer->band_prof.max) 4692 continue; 4693 4694 err = rvu_alloc_bitmap(&ipolicer->band_prof); 4695 if (err) 4696 return err; 4697 4698 ipolicer->pfvf_map = devm_kcalloc(rvu->dev, 4699 ipolicer->band_prof.max, 4700 sizeof(u16), GFP_KERNEL); 4701 if (!ipolicer->pfvf_map) 4702 return -ENOMEM; 4703 4704 ipolicer->match_id = devm_kcalloc(rvu->dev, 4705 ipolicer->band_prof.max, 4706 sizeof(u16), GFP_KERNEL); 4707 if (!ipolicer->match_id) 4708 return -ENOMEM; 4709 4710 for (prof_idx = 0; 4711 prof_idx < ipolicer->band_prof.max; prof_idx++) { 4712 /* Set AF as current owner for INIT ops to succeed */ 4713 ipolicer->pfvf_map[prof_idx] = 0x00; 4714 4715 /* There is no enable bit in the profile context, 4716 * so no context disable. So let's INIT them here 4717 * so that PF/VF later on have to just do WRITE to 4718 * setup policer rates and config. 4719 */ 4720 err = nix_init_policer_context(rvu, nix_hw, 4721 layer, prof_idx); 4722 if (err) 4723 return err; 4724 } 4725 4726 /* Allocate memory for maintaining ref_counts for MID level 4727 * profiles, this will be needed for leaf layer profiles' 4728 * aggregation. 4729 */ 4730 if (layer != BAND_PROF_MID_LAYER) 4731 continue; 4732 4733 ipolicer->ref_count = devm_kcalloc(rvu->dev, 4734 ipolicer->band_prof.max, 4735 sizeof(u16), GFP_KERNEL); 4736 } 4737 4738 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ 4739 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); 4740 4741 nix_config_rx_pkt_policer_precolor(rvu, blkaddr); 4742 4743 return 0; 4744 } 4745 4746 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) 4747 { 4748 struct nix_ipolicer *ipolicer; 4749 int layer; 4750 4751 if (!rvu->hw->cap.ipolicer) 4752 return; 4753 4754 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4755 ipolicer = &nix_hw->ipolicer[layer]; 4756 4757 if (!ipolicer->band_prof.max) 4758 continue; 4759 4760 kfree(ipolicer->band_prof.bmap); 4761 } 4762 } 4763 4764 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 4765 struct nix_hw *nix_hw, u16 pcifunc) 4766 { 4767 struct nix_ipolicer *ipolicer; 4768 int layer, hi_layer, prof_idx; 4769 4770 /* Bits [15:14] in profile index represent layer */ 4771 layer = (req->qidx >> 14) & 0x03; 4772 prof_idx = req->qidx & 0x3FFF; 4773 4774 ipolicer = &nix_hw->ipolicer[layer]; 4775 if (prof_idx >= ipolicer->band_prof.max) 4776 return -EINVAL; 4777 4778 /* Check if the profile is allocated to the requesting PCIFUNC or not 4779 * with the exception of AF. AF is allowed to read and update contexts. 4780 */ 4781 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) 4782 return -EINVAL; 4783 4784 /* If this profile is linked to higher layer profile then check 4785 * if that profile is also allocated to the requesting PCIFUNC 4786 * or not. 4787 */ 4788 if (!req->prof.hl_en) 4789 return 0; 4790 4791 /* Leaf layer profile can link only to mid layer and 4792 * mid layer to top layer. 4793 */ 4794 if (layer == BAND_PROF_LEAF_LAYER) 4795 hi_layer = BAND_PROF_MID_LAYER; 4796 else if (layer == BAND_PROF_MID_LAYER) 4797 hi_layer = BAND_PROF_TOP_LAYER; 4798 else 4799 return -EINVAL; 4800 4801 ipolicer = &nix_hw->ipolicer[hi_layer]; 4802 prof_idx = req->prof.band_prof_id; 4803 if (prof_idx >= ipolicer->band_prof.max || 4804 ipolicer->pfvf_map[prof_idx] != pcifunc) 4805 return -EINVAL; 4806 4807 return 0; 4808 } 4809 4810 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, 4811 struct nix_bandprof_alloc_req *req, 4812 struct nix_bandprof_alloc_rsp *rsp) 4813 { 4814 int blkaddr, layer, prof, idx, err; 4815 u16 pcifunc = req->hdr.pcifunc; 4816 struct nix_ipolicer *ipolicer; 4817 struct nix_hw *nix_hw; 4818 4819 if (!rvu->hw->cap.ipolicer) 4820 return NIX_AF_ERR_IPOLICER_NOTSUPP; 4821 4822 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4823 if (err) 4824 return err; 4825 4826 mutex_lock(&rvu->rsrc_lock); 4827 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4828 if (layer == BAND_PROF_INVAL_LAYER) 4829 continue; 4830 if (!req->prof_count[layer]) 4831 continue; 4832 4833 ipolicer = &nix_hw->ipolicer[layer]; 4834 for (idx = 0; idx < req->prof_count[layer]; idx++) { 4835 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ 4836 if (idx == MAX_BANDPROF_PER_PFFUNC) 4837 break; 4838 4839 prof = rvu_alloc_rsrc(&ipolicer->band_prof); 4840 if (prof < 0) 4841 break; 4842 rsp->prof_count[layer]++; 4843 rsp->prof_idx[layer][idx] = prof; 4844 ipolicer->pfvf_map[prof] = pcifunc; 4845 } 4846 } 4847 mutex_unlock(&rvu->rsrc_lock); 4848 return 0; 4849 } 4850 4851 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) 4852 { 4853 int blkaddr, layer, prof_idx, err; 4854 struct nix_ipolicer *ipolicer; 4855 struct nix_hw *nix_hw; 4856 4857 if (!rvu->hw->cap.ipolicer) 4858 return NIX_AF_ERR_IPOLICER_NOTSUPP; 4859 4860 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4861 if (err) 4862 return err; 4863 4864 mutex_lock(&rvu->rsrc_lock); 4865 /* Free all the profiles allocated to the PCIFUNC */ 4866 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4867 if (layer == BAND_PROF_INVAL_LAYER) 4868 continue; 4869 ipolicer = &nix_hw->ipolicer[layer]; 4870 4871 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { 4872 if (ipolicer->pfvf_map[prof_idx] != pcifunc) 4873 continue; 4874 4875 /* Clear ratelimit aggregation, if any */ 4876 if (layer == BAND_PROF_LEAF_LAYER && 4877 ipolicer->match_id[prof_idx]) 4878 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 4879 4880 ipolicer->pfvf_map[prof_idx] = 0x00; 4881 ipolicer->match_id[prof_idx] = 0; 4882 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 4883 } 4884 } 4885 mutex_unlock(&rvu->rsrc_lock); 4886 return 0; 4887 } 4888 4889 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, 4890 struct nix_bandprof_free_req *req, 4891 struct msg_rsp *rsp) 4892 { 4893 int blkaddr, layer, prof_idx, idx, err; 4894 u16 pcifunc = req->hdr.pcifunc; 4895 struct nix_ipolicer *ipolicer; 4896 struct nix_hw *nix_hw; 4897 4898 if (req->free_all) 4899 return nix_free_all_bandprof(rvu, pcifunc); 4900 4901 if (!rvu->hw->cap.ipolicer) 4902 return NIX_AF_ERR_IPOLICER_NOTSUPP; 4903 4904 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4905 if (err) 4906 return err; 4907 4908 mutex_lock(&rvu->rsrc_lock); 4909 /* Free the requested profile indices */ 4910 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4911 if (layer == BAND_PROF_INVAL_LAYER) 4912 continue; 4913 if (!req->prof_count[layer]) 4914 continue; 4915 4916 ipolicer = &nix_hw->ipolicer[layer]; 4917 for (idx = 0; idx < req->prof_count[layer]; idx++) { 4918 prof_idx = req->prof_idx[layer][idx]; 4919 if (prof_idx >= ipolicer->band_prof.max || 4920 ipolicer->pfvf_map[prof_idx] != pcifunc) 4921 continue; 4922 4923 /* Clear ratelimit aggregation, if any */ 4924 if (layer == BAND_PROF_LEAF_LAYER && 4925 ipolicer->match_id[prof_idx]) 4926 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 4927 4928 ipolicer->pfvf_map[prof_idx] = 0x00; 4929 ipolicer->match_id[prof_idx] = 0; 4930 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 4931 if (idx == MAX_BANDPROF_PER_PFFUNC) 4932 break; 4933 } 4934 } 4935 mutex_unlock(&rvu->rsrc_lock); 4936 return 0; 4937 } 4938 4939 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, 4940 struct nix_cn10k_aq_enq_req *aq_req, 4941 struct nix_cn10k_aq_enq_rsp *aq_rsp, 4942 u16 pcifunc, u8 ctype, u32 qidx) 4943 { 4944 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4945 aq_req->hdr.pcifunc = pcifunc; 4946 aq_req->ctype = ctype; 4947 aq_req->op = NIX_AQ_INSTOP_READ; 4948 aq_req->qidx = qidx; 4949 4950 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4951 (struct nix_aq_enq_req *)aq_req, 4952 (struct nix_aq_enq_rsp *)aq_rsp); 4953 } 4954 4955 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, 4956 struct nix_hw *nix_hw, 4957 struct nix_cn10k_aq_enq_req *aq_req, 4958 struct nix_cn10k_aq_enq_rsp *aq_rsp, 4959 u32 leaf_prof, u16 mid_prof) 4960 { 4961 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4962 aq_req->hdr.pcifunc = 0x00; 4963 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; 4964 aq_req->op = NIX_AQ_INSTOP_WRITE; 4965 aq_req->qidx = leaf_prof; 4966 4967 aq_req->prof.band_prof_id = mid_prof; 4968 aq_req->prof_mask.band_prof_id = GENMASK(6, 0); 4969 aq_req->prof.hl_en = 1; 4970 aq_req->prof_mask.hl_en = 1; 4971 4972 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4973 (struct nix_aq_enq_req *)aq_req, 4974 (struct nix_aq_enq_rsp *)aq_rsp); 4975 } 4976 4977 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, 4978 u16 rq_idx, u16 match_id) 4979 { 4980 int leaf_prof, mid_prof, leaf_match; 4981 struct nix_cn10k_aq_enq_req aq_req; 4982 struct nix_cn10k_aq_enq_rsp aq_rsp; 4983 struct nix_ipolicer *ipolicer; 4984 struct nix_hw *nix_hw; 4985 int blkaddr, idx, rc; 4986 4987 if (!rvu->hw->cap.ipolicer) 4988 return 0; 4989 4990 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4991 if (rc) 4992 return rc; 4993 4994 /* Fetch the RQ's context to see if policing is enabled */ 4995 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, 4996 NIX_AQ_CTYPE_RQ, rq_idx); 4997 if (rc) { 4998 dev_err(rvu->dev, 4999 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", 5000 __func__, rq_idx, pcifunc); 5001 return rc; 5002 } 5003 5004 if (!aq_rsp.rq.policer_ena) 5005 return 0; 5006 5007 /* Get the bandwidth profile ID mapped to this RQ */ 5008 leaf_prof = aq_rsp.rq.band_prof_id; 5009 5010 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; 5011 ipolicer->match_id[leaf_prof] = match_id; 5012 5013 /* Check if any other leaf profile is marked with same match_id */ 5014 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 5015 if (idx == leaf_prof) 5016 continue; 5017 if (ipolicer->match_id[idx] != match_id) 5018 continue; 5019 5020 leaf_match = idx; 5021 break; 5022 } 5023 5024 if (idx == ipolicer->band_prof.max) 5025 return 0; 5026 5027 /* Fetch the matching profile's context to check if it's already 5028 * mapped to a mid level profile. 5029 */ 5030 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5031 NIX_AQ_CTYPE_BANDPROF, leaf_match); 5032 if (rc) { 5033 dev_err(rvu->dev, 5034 "%s: Failed to fetch context of leaf profile %d\n", 5035 __func__, leaf_match); 5036 return rc; 5037 } 5038 5039 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5040 if (aq_rsp.prof.hl_en) { 5041 /* Get Mid layer prof index and map leaf_prof index 5042 * also such that flows that are being steered 5043 * to different RQs and marked with same match_id 5044 * are rate limited in a aggregate fashion 5045 */ 5046 mid_prof = aq_rsp.prof.band_prof_id; 5047 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5048 &aq_req, &aq_rsp, 5049 leaf_prof, mid_prof); 5050 if (rc) { 5051 dev_err(rvu->dev, 5052 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5053 __func__, leaf_prof, mid_prof); 5054 goto exit; 5055 } 5056 5057 mutex_lock(&rvu->rsrc_lock); 5058 ipolicer->ref_count[mid_prof]++; 5059 mutex_unlock(&rvu->rsrc_lock); 5060 goto exit; 5061 } 5062 5063 /* Allocate a mid layer profile and 5064 * map both 'leaf_prof' and 'leaf_match' profiles to it. 5065 */ 5066 mutex_lock(&rvu->rsrc_lock); 5067 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); 5068 if (mid_prof < 0) { 5069 dev_err(rvu->dev, 5070 "%s: Unable to allocate mid layer profile\n", __func__); 5071 mutex_unlock(&rvu->rsrc_lock); 5072 goto exit; 5073 } 5074 mutex_unlock(&rvu->rsrc_lock); 5075 ipolicer->pfvf_map[mid_prof] = 0x00; 5076 ipolicer->ref_count[mid_prof] = 0; 5077 5078 /* Initialize mid layer profile same as 'leaf_prof' */ 5079 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5080 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 5081 if (rc) { 5082 dev_err(rvu->dev, 5083 "%s: Failed to fetch context of leaf profile %d\n", 5084 __func__, leaf_prof); 5085 goto exit; 5086 } 5087 5088 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 5089 aq_req.hdr.pcifunc = 0x00; 5090 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); 5091 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 5092 aq_req.op = NIX_AQ_INSTOP_WRITE; 5093 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); 5094 /* Clear higher layer enable bit in the mid profile, just in case */ 5095 aq_req.prof.hl_en = 0; 5096 aq_req.prof_mask.hl_en = 1; 5097 5098 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 5099 (struct nix_aq_enq_req *)&aq_req, NULL); 5100 if (rc) { 5101 dev_err(rvu->dev, 5102 "%s: Failed to INIT context of mid layer profile %d\n", 5103 __func__, mid_prof); 5104 goto exit; 5105 } 5106 5107 /* Map both leaf profiles to this mid layer profile */ 5108 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5109 &aq_req, &aq_rsp, 5110 leaf_prof, mid_prof); 5111 if (rc) { 5112 dev_err(rvu->dev, 5113 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5114 __func__, leaf_prof, mid_prof); 5115 goto exit; 5116 } 5117 5118 mutex_lock(&rvu->rsrc_lock); 5119 ipolicer->ref_count[mid_prof]++; 5120 mutex_unlock(&rvu->rsrc_lock); 5121 5122 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 5123 &aq_req, &aq_rsp, 5124 leaf_match, mid_prof); 5125 if (rc) { 5126 dev_err(rvu->dev, 5127 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 5128 __func__, leaf_match, mid_prof); 5129 ipolicer->ref_count[mid_prof]--; 5130 goto exit; 5131 } 5132 5133 mutex_lock(&rvu->rsrc_lock); 5134 ipolicer->ref_count[mid_prof]++; 5135 mutex_unlock(&rvu->rsrc_lock); 5136 5137 exit: 5138 return rc; 5139 } 5140 5141 /* Called with mutex rsrc_lock */ 5142 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 5143 u32 leaf_prof) 5144 { 5145 struct nix_cn10k_aq_enq_req aq_req; 5146 struct nix_cn10k_aq_enq_rsp aq_rsp; 5147 struct nix_ipolicer *ipolicer; 5148 u16 mid_prof; 5149 int rc; 5150 5151 mutex_unlock(&rvu->rsrc_lock); 5152 5153 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 5154 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 5155 5156 mutex_lock(&rvu->rsrc_lock); 5157 if (rc) { 5158 dev_err(rvu->dev, 5159 "%s: Failed to fetch context of leaf profile %d\n", 5160 __func__, leaf_prof); 5161 return; 5162 } 5163 5164 if (!aq_rsp.prof.hl_en) 5165 return; 5166 5167 mid_prof = aq_rsp.prof.band_prof_id; 5168 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 5169 ipolicer->ref_count[mid_prof]--; 5170 /* If ref_count is zero, free mid layer profile */ 5171 if (!ipolicer->ref_count[mid_prof]) { 5172 ipolicer->pfvf_map[mid_prof] = 0x00; 5173 rvu_free_rsrc(&ipolicer->band_prof, mid_prof); 5174 } 5175 } 5176 5177 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, 5178 struct nix_bandprof_get_hwinfo_rsp *rsp) 5179 { 5180 struct nix_ipolicer *ipolicer; 5181 int blkaddr, layer, err; 5182 struct nix_hw *nix_hw; 5183 u64 tu; 5184 5185 if (!rvu->hw->cap.ipolicer) 5186 return NIX_AF_ERR_IPOLICER_NOTSUPP; 5187 5188 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); 5189 if (err) 5190 return err; 5191 5192 /* Return number of bandwidth profiles free at each layer */ 5193 mutex_lock(&rvu->rsrc_lock); 5194 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 5195 if (layer == BAND_PROF_INVAL_LAYER) 5196 continue; 5197 5198 ipolicer = &nix_hw->ipolicer[layer]; 5199 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); 5200 } 5201 mutex_unlock(&rvu->rsrc_lock); 5202 5203 /* Set the policer timeunit in nanosec */ 5204 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); 5205 rsp->policer_timeunit = (tu + 1) * 100; 5206 5207 return 0; 5208 } 5209