1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 14 #include "rvu_struct.h" 15 #include "rvu_reg.h" 16 #include "rvu.h" 17 #include "npc.h" 18 #include "cgx.h" 19 #include "lmac_common.h" 20 21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 23 int type, int chan_id); 24 25 enum mc_tbl_sz { 26 MC_TBL_SZ_256, 27 MC_TBL_SZ_512, 28 MC_TBL_SZ_1K, 29 MC_TBL_SZ_2K, 30 MC_TBL_SZ_4K, 31 MC_TBL_SZ_8K, 32 MC_TBL_SZ_16K, 33 MC_TBL_SZ_32K, 34 MC_TBL_SZ_64K, 35 }; 36 37 enum mc_buf_cnt { 38 MC_BUF_CNT_8, 39 MC_BUF_CNT_16, 40 MC_BUF_CNT_32, 41 MC_BUF_CNT_64, 42 MC_BUF_CNT_128, 43 MC_BUF_CNT_256, 44 MC_BUF_CNT_512, 45 MC_BUF_CNT_1024, 46 MC_BUF_CNT_2048, 47 }; 48 49 enum nix_makr_fmt_indexes { 50 NIX_MARK_CFG_IP_DSCP_RED, 51 NIX_MARK_CFG_IP_DSCP_YELLOW, 52 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 53 NIX_MARK_CFG_IP_ECN_RED, 54 NIX_MARK_CFG_IP_ECN_YELLOW, 55 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 56 NIX_MARK_CFG_VLAN_DEI_RED, 57 NIX_MARK_CFG_VLAN_DEI_YELLOW, 58 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 59 NIX_MARK_CFG_MAX, 60 }; 61 62 /* For now considering MC resources needed for broadcast 63 * pkt replication only. i.e 256 HWVFs + 12 PFs. 64 */ 65 #define MC_TBL_SIZE MC_TBL_SZ_512 66 #define MC_BUF_CNT MC_BUF_CNT_128 67 68 struct mce { 69 struct hlist_node node; 70 u16 pcifunc; 71 }; 72 73 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 74 { 75 int i = 0; 76 77 /*If blkaddr is 0, return the first nix block address*/ 78 if (blkaddr == 0) 79 return rvu->nix_blkaddr[blkaddr]; 80 81 while (i + 1 < MAX_NIX_BLKS) { 82 if (rvu->nix_blkaddr[i] == blkaddr) 83 return rvu->nix_blkaddr[i + 1]; 84 i++; 85 } 86 87 return 0; 88 } 89 90 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 91 { 92 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 93 int blkaddr; 94 95 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 96 if (!pfvf->nixlf || blkaddr < 0) 97 return false; 98 return true; 99 } 100 101 int rvu_get_nixlf_count(struct rvu *rvu) 102 { 103 int blkaddr = 0, max = 0; 104 struct rvu_block *block; 105 106 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 107 while (blkaddr) { 108 block = &rvu->hw->block[blkaddr]; 109 max += block->lf.max; 110 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 111 } 112 return max; 113 } 114 115 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 116 { 117 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 118 struct rvu_hwinfo *hw = rvu->hw; 119 int blkaddr; 120 121 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 122 if (!pfvf->nixlf || blkaddr < 0) 123 return NIX_AF_ERR_AF_LF_INVALID; 124 125 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 126 if (*nixlf < 0) 127 return NIX_AF_ERR_AF_LF_INVALID; 128 129 if (nix_blkaddr) 130 *nix_blkaddr = blkaddr; 131 132 return 0; 133 } 134 135 static void nix_mce_list_init(struct nix_mce_list *list, int max) 136 { 137 INIT_HLIST_HEAD(&list->head); 138 list->count = 0; 139 list->max = max; 140 } 141 142 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) 143 { 144 int idx; 145 146 if (!mcast) 147 return 0; 148 149 idx = mcast->next_free_mce; 150 mcast->next_free_mce += count; 151 return idx; 152 } 153 154 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 155 { 156 int nix_blkaddr = 0, i = 0; 157 struct rvu *rvu = hw->rvu; 158 159 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 160 while (nix_blkaddr) { 161 if (blkaddr == nix_blkaddr && hw->nix) 162 return &hw->nix[i]; 163 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 164 i++; 165 } 166 return NULL; 167 } 168 169 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 170 { 171 int err; 172 173 /*Sync all in flight RX packets to LLC/DRAM */ 174 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 175 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 176 if (err) 177 dev_err(rvu->dev, "NIX RX software sync failed\n"); 178 } 179 180 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 181 int lvl, u16 pcifunc, u16 schq) 182 { 183 struct rvu_hwinfo *hw = rvu->hw; 184 struct nix_txsch *txsch; 185 struct nix_hw *nix_hw; 186 u16 map_func; 187 188 nix_hw = get_nix_hw(rvu->hw, blkaddr); 189 if (!nix_hw) 190 return false; 191 192 txsch = &nix_hw->txsch[lvl]; 193 /* Check out of bounds */ 194 if (schq >= txsch->schq.max) 195 return false; 196 197 mutex_lock(&rvu->rsrc_lock); 198 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 199 mutex_unlock(&rvu->rsrc_lock); 200 201 /* TLs aggegating traffic are shared across PF and VFs */ 202 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 203 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 204 return false; 205 else 206 return true; 207 } 208 209 if (map_func != pcifunc) 210 return false; 211 212 return true; 213 } 214 215 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) 216 { 217 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 218 struct mac_ops *mac_ops; 219 int pkind, pf, vf, lbkid; 220 u8 cgx_id, lmac_id; 221 int err; 222 223 pf = rvu_get_pf(pcifunc); 224 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 225 return 0; 226 227 switch (type) { 228 case NIX_INTF_TYPE_CGX: 229 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 230 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 231 232 pkind = rvu_npc_get_pkind(rvu, pf); 233 if (pkind < 0) { 234 dev_err(rvu->dev, 235 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 236 return -EINVAL; 237 } 238 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 239 pfvf->tx_chan_base = pfvf->rx_chan_base; 240 pfvf->rx_chan_cnt = 1; 241 pfvf->tx_chan_cnt = 1; 242 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 243 rvu_npc_set_pkind(rvu, pkind, pfvf); 244 245 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); 246 /* By default we enable pause frames */ 247 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) 248 mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id, 249 rvu), 250 lmac_id, true, true); 251 break; 252 case NIX_INTF_TYPE_LBK: 253 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 254 255 /* If NIX1 block is present on the silicon then NIXes are 256 * assigned alternatively for lbk interfaces. NIX0 should 257 * send packets on lbk link 1 channels and NIX1 should send 258 * on lbk link 0 channels for the communication between 259 * NIX0 and NIX1. 260 */ 261 lbkid = 0; 262 if (rvu->hw->lbk_links > 1) 263 lbkid = vf & 0x1 ? 0 : 1; 264 265 /* Note that AF's VFs work in pairs and talk over consecutive 266 * loopback channels.Therefore if odd number of AF VFs are 267 * enabled then the last VF remains with no pair. 268 */ 269 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 270 pfvf->tx_chan_base = vf & 0x1 ? 271 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 272 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 273 pfvf->rx_chan_cnt = 1; 274 pfvf->tx_chan_cnt = 1; 275 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 276 pfvf->rx_chan_base, 277 pfvf->rx_chan_cnt, false); 278 break; 279 } 280 281 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 282 * RVU PF/VF's MAC address. 283 */ 284 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 285 pfvf->rx_chan_base, pfvf->mac_addr); 286 287 /* Add this PF_FUNC to bcast pkt replication list */ 288 err = nix_update_bcast_mce_list(rvu, pcifunc, true); 289 if (err) { 290 dev_err(rvu->dev, 291 "Bcast list, failed to enable PF_FUNC 0x%x\n", 292 pcifunc); 293 return err; 294 } 295 296 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 297 nixlf, pfvf->rx_chan_base); 298 pfvf->maxlen = NIC_HW_MIN_FRS; 299 pfvf->minlen = NIC_HW_MIN_FRS; 300 301 return 0; 302 } 303 304 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 305 { 306 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 307 int err; 308 309 pfvf->maxlen = 0; 310 pfvf->minlen = 0; 311 312 /* Remove this PF_FUNC from bcast pkt replication list */ 313 err = nix_update_bcast_mce_list(rvu, pcifunc, false); 314 if (err) { 315 dev_err(rvu->dev, 316 "Bcast list, failed to disable PF_FUNC 0x%x\n", 317 pcifunc); 318 } 319 320 /* Free and disable any MCAM entries used by this NIX LF */ 321 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 322 } 323 324 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 325 struct nix_bp_cfg_req *req, 326 struct msg_rsp *rsp) 327 { 328 u16 pcifunc = req->hdr.pcifunc; 329 struct rvu_pfvf *pfvf; 330 int blkaddr, pf, type; 331 u16 chan_base, chan; 332 u64 cfg; 333 334 pf = rvu_get_pf(pcifunc); 335 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 336 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 337 return 0; 338 339 pfvf = rvu_get_pfvf(rvu, pcifunc); 340 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 341 342 chan_base = pfvf->rx_chan_base + req->chan_base; 343 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 344 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 345 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 346 cfg & ~BIT_ULL(16)); 347 } 348 return 0; 349 } 350 351 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 352 int type, int chan_id) 353 { 354 int bpid, blkaddr, lmac_chan_cnt; 355 struct rvu_hwinfo *hw = rvu->hw; 356 u16 cgx_bpid_cnt, lbk_bpid_cnt; 357 struct rvu_pfvf *pfvf; 358 u8 cgx_id, lmac_id; 359 u64 cfg; 360 361 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 362 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 363 lmac_chan_cnt = cfg & 0xFF; 364 365 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; 366 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); 367 368 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 369 370 /* Backpressure IDs range division 371 * CGX channles are mapped to (0 - 191) BPIDs 372 * LBK channles are mapped to (192 - 255) BPIDs 373 * SDP channles are mapped to (256 - 511) BPIDs 374 * 375 * Lmac channles and bpids mapped as follows 376 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 377 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 378 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 379 */ 380 switch (type) { 381 case NIX_INTF_TYPE_CGX: 382 if ((req->chan_base + req->chan_cnt) > 15) 383 return -EINVAL; 384 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 385 /* Assign bpid based on cgx, lmac and chan id */ 386 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + 387 (lmac_id * lmac_chan_cnt) + req->chan_base; 388 389 if (req->bpid_per_chan) 390 bpid += chan_id; 391 if (bpid > cgx_bpid_cnt) 392 return -EINVAL; 393 break; 394 395 case NIX_INTF_TYPE_LBK: 396 if ((req->chan_base + req->chan_cnt) > 63) 397 return -EINVAL; 398 bpid = cgx_bpid_cnt + req->chan_base; 399 if (req->bpid_per_chan) 400 bpid += chan_id; 401 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) 402 return -EINVAL; 403 break; 404 default: 405 return -EINVAL; 406 } 407 return bpid; 408 } 409 410 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 411 struct nix_bp_cfg_req *req, 412 struct nix_bp_cfg_rsp *rsp) 413 { 414 int blkaddr, pf, type, chan_id = 0; 415 u16 pcifunc = req->hdr.pcifunc; 416 struct rvu_pfvf *pfvf; 417 u16 chan_base, chan; 418 s16 bpid, bpid_base; 419 u64 cfg; 420 421 pf = rvu_get_pf(pcifunc); 422 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 423 424 /* Enable backpressure only for CGX mapped PFs and LBK interface */ 425 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 426 return 0; 427 428 pfvf = rvu_get_pfvf(rvu, pcifunc); 429 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 430 431 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 432 chan_base = pfvf->rx_chan_base + req->chan_base; 433 bpid = bpid_base; 434 435 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 436 if (bpid < 0) { 437 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 438 return -EINVAL; 439 } 440 441 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 442 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 443 cfg | (bpid & 0xFF) | BIT_ULL(16)); 444 chan_id++; 445 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 446 } 447 448 for (chan = 0; chan < req->chan_cnt; chan++) { 449 /* Map channel and bpid assign to it */ 450 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 451 (bpid_base & 0x3FF); 452 if (req->bpid_per_chan) 453 bpid_base++; 454 } 455 rsp->chan_cnt = req->chan_cnt; 456 457 return 0; 458 } 459 460 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 461 u64 format, bool v4, u64 *fidx) 462 { 463 struct nix_lso_format field = {0}; 464 465 /* IP's Length field */ 466 field.layer = NIX_TXLAYER_OL3; 467 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 468 field.offset = v4 ? 2 : 4; 469 field.sizem1 = 1; /* i.e 2 bytes */ 470 field.alg = NIX_LSOALG_ADD_PAYLEN; 471 rvu_write64(rvu, blkaddr, 472 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 473 *(u64 *)&field); 474 475 /* No ID field in IPv6 header */ 476 if (!v4) 477 return; 478 479 /* IP's ID field */ 480 field.layer = NIX_TXLAYER_OL3; 481 field.offset = 4; 482 field.sizem1 = 1; /* i.e 2 bytes */ 483 field.alg = NIX_LSOALG_ADD_SEGNUM; 484 rvu_write64(rvu, blkaddr, 485 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 486 *(u64 *)&field); 487 } 488 489 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 490 u64 format, u64 *fidx) 491 { 492 struct nix_lso_format field = {0}; 493 494 /* TCP's sequence number field */ 495 field.layer = NIX_TXLAYER_OL4; 496 field.offset = 4; 497 field.sizem1 = 3; /* i.e 4 bytes */ 498 field.alg = NIX_LSOALG_ADD_OFFSET; 499 rvu_write64(rvu, blkaddr, 500 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 501 *(u64 *)&field); 502 503 /* TCP's flags field */ 504 field.layer = NIX_TXLAYER_OL4; 505 field.offset = 12; 506 field.sizem1 = 1; /* 2 bytes */ 507 field.alg = NIX_LSOALG_TCP_FLAGS; 508 rvu_write64(rvu, blkaddr, 509 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 510 *(u64 *)&field); 511 } 512 513 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 514 { 515 u64 cfg, idx, fidx = 0; 516 517 /* Get max HW supported format indices */ 518 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 519 nix_hw->lso.total = cfg; 520 521 /* Enable LSO */ 522 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 523 /* For TSO, set first and middle segment flags to 524 * mask out PSH, RST & FIN flags in TCP packet 525 */ 526 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 527 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 528 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 529 530 /* Setup default static LSO formats 531 * 532 * Configure format fields for TCPv4 segmentation offload 533 */ 534 idx = NIX_LSO_FORMAT_IDX_TSOV4; 535 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 536 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 537 538 /* Set rest of the fields to NOP */ 539 for (; fidx < 8; fidx++) { 540 rvu_write64(rvu, blkaddr, 541 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 542 } 543 nix_hw->lso.in_use++; 544 545 /* Configure format fields for TCPv6 segmentation offload */ 546 idx = NIX_LSO_FORMAT_IDX_TSOV6; 547 fidx = 0; 548 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 549 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 550 551 /* Set rest of the fields to NOP */ 552 for (; fidx < 8; fidx++) { 553 rvu_write64(rvu, blkaddr, 554 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 555 } 556 nix_hw->lso.in_use++; 557 } 558 559 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 560 { 561 kfree(pfvf->rq_bmap); 562 kfree(pfvf->sq_bmap); 563 kfree(pfvf->cq_bmap); 564 if (pfvf->rq_ctx) 565 qmem_free(rvu->dev, pfvf->rq_ctx); 566 if (pfvf->sq_ctx) 567 qmem_free(rvu->dev, pfvf->sq_ctx); 568 if (pfvf->cq_ctx) 569 qmem_free(rvu->dev, pfvf->cq_ctx); 570 if (pfvf->rss_ctx) 571 qmem_free(rvu->dev, pfvf->rss_ctx); 572 if (pfvf->nix_qints_ctx) 573 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 574 if (pfvf->cq_ints_ctx) 575 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 576 577 pfvf->rq_bmap = NULL; 578 pfvf->cq_bmap = NULL; 579 pfvf->sq_bmap = NULL; 580 pfvf->rq_ctx = NULL; 581 pfvf->sq_ctx = NULL; 582 pfvf->cq_ctx = NULL; 583 pfvf->rss_ctx = NULL; 584 pfvf->nix_qints_ctx = NULL; 585 pfvf->cq_ints_ctx = NULL; 586 } 587 588 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 589 struct rvu_pfvf *pfvf, int nixlf, 590 int rss_sz, int rss_grps, int hwctx_size, 591 u64 way_mask) 592 { 593 int err, grp, num_indices; 594 595 /* RSS is not requested for this NIXLF */ 596 if (!rss_sz) 597 return 0; 598 num_indices = rss_sz * rss_grps; 599 600 /* Alloc NIX RSS HW context memory and config the base */ 601 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 602 if (err) 603 return err; 604 605 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 606 (u64)pfvf->rss_ctx->iova); 607 608 /* Config full RSS table size, enable RSS and caching */ 609 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), 610 BIT_ULL(36) | BIT_ULL(4) | 611 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) | 612 way_mask << 20); 613 /* Config RSS group offset and sizes */ 614 for (grp = 0; grp < rss_grps; grp++) 615 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 616 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 617 return 0; 618 } 619 620 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 621 struct nix_aq_inst_s *inst) 622 { 623 struct admin_queue *aq = block->aq; 624 struct nix_aq_res_s *result; 625 int timeout = 1000; 626 u64 reg, head; 627 628 result = (struct nix_aq_res_s *)aq->res->base; 629 630 /* Get current head pointer where to append this instruction */ 631 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 632 head = (reg >> 4) & AQ_PTR_MASK; 633 634 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 635 (void *)inst, aq->inst->entry_sz); 636 memset(result, 0, sizeof(*result)); 637 /* sync into memory */ 638 wmb(); 639 640 /* Ring the doorbell and wait for result */ 641 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 642 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 643 cpu_relax(); 644 udelay(1); 645 timeout--; 646 if (!timeout) 647 return -EBUSY; 648 } 649 650 if (result->compcode != NIX_AQ_COMP_GOOD) 651 /* TODO: Replace this with some error code */ 652 return -EBUSY; 653 654 return 0; 655 } 656 657 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 658 struct nix_aq_enq_req *req, 659 struct nix_aq_enq_rsp *rsp) 660 { 661 struct rvu_hwinfo *hw = rvu->hw; 662 u16 pcifunc = req->hdr.pcifunc; 663 int nixlf, blkaddr, rc = 0; 664 struct nix_aq_inst_s inst; 665 struct rvu_block *block; 666 struct admin_queue *aq; 667 struct rvu_pfvf *pfvf; 668 void *ctx, *mask; 669 bool ena; 670 u64 cfg; 671 672 blkaddr = nix_hw->blkaddr; 673 block = &hw->block[blkaddr]; 674 aq = block->aq; 675 if (!aq) { 676 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 677 return NIX_AF_ERR_AQ_ENQUEUE; 678 } 679 680 pfvf = rvu_get_pfvf(rvu, pcifunc); 681 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 682 683 /* Skip NIXLF check for broadcast MCE entry init */ 684 if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) { 685 if (!pfvf->nixlf || nixlf < 0) 686 return NIX_AF_ERR_AF_LF_INVALID; 687 } 688 689 switch (req->ctype) { 690 case NIX_AQ_CTYPE_RQ: 691 /* Check if index exceeds max no of queues */ 692 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 693 rc = NIX_AF_ERR_AQ_ENQUEUE; 694 break; 695 case NIX_AQ_CTYPE_SQ: 696 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 697 rc = NIX_AF_ERR_AQ_ENQUEUE; 698 break; 699 case NIX_AQ_CTYPE_CQ: 700 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 701 rc = NIX_AF_ERR_AQ_ENQUEUE; 702 break; 703 case NIX_AQ_CTYPE_RSS: 704 /* Check if RSS is enabled and qidx is within range */ 705 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 706 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 707 (req->qidx >= (256UL << (cfg & 0xF)))) 708 rc = NIX_AF_ERR_AQ_ENQUEUE; 709 break; 710 case NIX_AQ_CTYPE_MCE: 711 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 712 713 /* Check if index exceeds MCE list length */ 714 if (!nix_hw->mcast.mce_ctx || 715 (req->qidx >= (256UL << (cfg & 0xF)))) 716 rc = NIX_AF_ERR_AQ_ENQUEUE; 717 718 /* Adding multicast lists for requests from PF/VFs is not 719 * yet supported, so ignore this. 720 */ 721 if (rsp) 722 rc = NIX_AF_ERR_AQ_ENQUEUE; 723 break; 724 default: 725 rc = NIX_AF_ERR_AQ_ENQUEUE; 726 } 727 728 if (rc) 729 return rc; 730 731 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 732 if (req->ctype == NIX_AQ_CTYPE_SQ && 733 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 734 (req->op == NIX_AQ_INSTOP_WRITE && 735 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { 736 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 737 pcifunc, req->sq.smq)) 738 return NIX_AF_ERR_AQ_ENQUEUE; 739 } 740 741 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 742 inst.lf = nixlf; 743 inst.cindex = req->qidx; 744 inst.ctype = req->ctype; 745 inst.op = req->op; 746 /* Currently we are not supporting enqueuing multiple instructions, 747 * so always choose first entry in result memory. 748 */ 749 inst.res_addr = (u64)aq->res->iova; 750 751 /* Hardware uses same aq->res->base for updating result of 752 * previous instruction hence wait here till it is done. 753 */ 754 spin_lock(&aq->lock); 755 756 /* Clean result + context memory */ 757 memset(aq->res->base, 0, aq->res->entry_sz); 758 /* Context needs to be written at RES_ADDR + 128 */ 759 ctx = aq->res->base + 128; 760 /* Mask needs to be written at RES_ADDR + 256 */ 761 mask = aq->res->base + 256; 762 763 switch (req->op) { 764 case NIX_AQ_INSTOP_WRITE: 765 if (req->ctype == NIX_AQ_CTYPE_RQ) 766 memcpy(mask, &req->rq_mask, 767 sizeof(struct nix_rq_ctx_s)); 768 else if (req->ctype == NIX_AQ_CTYPE_SQ) 769 memcpy(mask, &req->sq_mask, 770 sizeof(struct nix_sq_ctx_s)); 771 else if (req->ctype == NIX_AQ_CTYPE_CQ) 772 memcpy(mask, &req->cq_mask, 773 sizeof(struct nix_cq_ctx_s)); 774 else if (req->ctype == NIX_AQ_CTYPE_RSS) 775 memcpy(mask, &req->rss_mask, 776 sizeof(struct nix_rsse_s)); 777 else if (req->ctype == NIX_AQ_CTYPE_MCE) 778 memcpy(mask, &req->mce_mask, 779 sizeof(struct nix_rx_mce_s)); 780 fallthrough; 781 case NIX_AQ_INSTOP_INIT: 782 if (req->ctype == NIX_AQ_CTYPE_RQ) 783 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 784 else if (req->ctype == NIX_AQ_CTYPE_SQ) 785 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 786 else if (req->ctype == NIX_AQ_CTYPE_CQ) 787 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 788 else if (req->ctype == NIX_AQ_CTYPE_RSS) 789 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 790 else if (req->ctype == NIX_AQ_CTYPE_MCE) 791 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 792 break; 793 case NIX_AQ_INSTOP_NOP: 794 case NIX_AQ_INSTOP_READ: 795 case NIX_AQ_INSTOP_LOCK: 796 case NIX_AQ_INSTOP_UNLOCK: 797 break; 798 default: 799 rc = NIX_AF_ERR_AQ_ENQUEUE; 800 spin_unlock(&aq->lock); 801 return rc; 802 } 803 804 /* Submit the instruction to AQ */ 805 rc = nix_aq_enqueue_wait(rvu, block, &inst); 806 if (rc) { 807 spin_unlock(&aq->lock); 808 return rc; 809 } 810 811 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 812 if (req->op == NIX_AQ_INSTOP_INIT) { 813 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 814 __set_bit(req->qidx, pfvf->rq_bmap); 815 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 816 __set_bit(req->qidx, pfvf->sq_bmap); 817 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 818 __set_bit(req->qidx, pfvf->cq_bmap); 819 } 820 821 if (req->op == NIX_AQ_INSTOP_WRITE) { 822 if (req->ctype == NIX_AQ_CTYPE_RQ) { 823 ena = (req->rq.ena & req->rq_mask.ena) | 824 (test_bit(req->qidx, pfvf->rq_bmap) & 825 ~req->rq_mask.ena); 826 if (ena) 827 __set_bit(req->qidx, pfvf->rq_bmap); 828 else 829 __clear_bit(req->qidx, pfvf->rq_bmap); 830 } 831 if (req->ctype == NIX_AQ_CTYPE_SQ) { 832 ena = (req->rq.ena & req->sq_mask.ena) | 833 (test_bit(req->qidx, pfvf->sq_bmap) & 834 ~req->sq_mask.ena); 835 if (ena) 836 __set_bit(req->qidx, pfvf->sq_bmap); 837 else 838 __clear_bit(req->qidx, pfvf->sq_bmap); 839 } 840 if (req->ctype == NIX_AQ_CTYPE_CQ) { 841 ena = (req->rq.ena & req->cq_mask.ena) | 842 (test_bit(req->qidx, pfvf->cq_bmap) & 843 ~req->cq_mask.ena); 844 if (ena) 845 __set_bit(req->qidx, pfvf->cq_bmap); 846 else 847 __clear_bit(req->qidx, pfvf->cq_bmap); 848 } 849 } 850 851 if (rsp) { 852 /* Copy read context into mailbox */ 853 if (req->op == NIX_AQ_INSTOP_READ) { 854 if (req->ctype == NIX_AQ_CTYPE_RQ) 855 memcpy(&rsp->rq, ctx, 856 sizeof(struct nix_rq_ctx_s)); 857 else if (req->ctype == NIX_AQ_CTYPE_SQ) 858 memcpy(&rsp->sq, ctx, 859 sizeof(struct nix_sq_ctx_s)); 860 else if (req->ctype == NIX_AQ_CTYPE_CQ) 861 memcpy(&rsp->cq, ctx, 862 sizeof(struct nix_cq_ctx_s)); 863 else if (req->ctype == NIX_AQ_CTYPE_RSS) 864 memcpy(&rsp->rss, ctx, 865 sizeof(struct nix_rsse_s)); 866 else if (req->ctype == NIX_AQ_CTYPE_MCE) 867 memcpy(&rsp->mce, ctx, 868 sizeof(struct nix_rx_mce_s)); 869 } 870 } 871 872 spin_unlock(&aq->lock); 873 return 0; 874 } 875 876 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 877 struct nix_aq_enq_rsp *rsp) 878 { 879 struct nix_hw *nix_hw; 880 int blkaddr; 881 882 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 883 if (blkaddr < 0) 884 return NIX_AF_ERR_AF_LF_INVALID; 885 886 nix_hw = get_nix_hw(rvu->hw, blkaddr); 887 if (!nix_hw) 888 return -EINVAL; 889 890 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 891 } 892 893 static const char *nix_get_ctx_name(int ctype) 894 { 895 switch (ctype) { 896 case NIX_AQ_CTYPE_CQ: 897 return "CQ"; 898 case NIX_AQ_CTYPE_SQ: 899 return "SQ"; 900 case NIX_AQ_CTYPE_RQ: 901 return "RQ"; 902 case NIX_AQ_CTYPE_RSS: 903 return "RSS"; 904 } 905 return ""; 906 } 907 908 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 909 { 910 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 911 struct nix_aq_enq_req aq_req; 912 unsigned long *bmap; 913 int qidx, q_cnt = 0; 914 int err = 0, rc; 915 916 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 917 return NIX_AF_ERR_AQ_ENQUEUE; 918 919 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 920 aq_req.hdr.pcifunc = req->hdr.pcifunc; 921 922 if (req->ctype == NIX_AQ_CTYPE_CQ) { 923 aq_req.cq.ena = 0; 924 aq_req.cq_mask.ena = 1; 925 aq_req.cq.bp_ena = 0; 926 aq_req.cq_mask.bp_ena = 1; 927 q_cnt = pfvf->cq_ctx->qsize; 928 bmap = pfvf->cq_bmap; 929 } 930 if (req->ctype == NIX_AQ_CTYPE_SQ) { 931 aq_req.sq.ena = 0; 932 aq_req.sq_mask.ena = 1; 933 q_cnt = pfvf->sq_ctx->qsize; 934 bmap = pfvf->sq_bmap; 935 } 936 if (req->ctype == NIX_AQ_CTYPE_RQ) { 937 aq_req.rq.ena = 0; 938 aq_req.rq_mask.ena = 1; 939 q_cnt = pfvf->rq_ctx->qsize; 940 bmap = pfvf->rq_bmap; 941 } 942 943 aq_req.ctype = req->ctype; 944 aq_req.op = NIX_AQ_INSTOP_WRITE; 945 946 for (qidx = 0; qidx < q_cnt; qidx++) { 947 if (!test_bit(qidx, bmap)) 948 continue; 949 aq_req.qidx = qidx; 950 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 951 if (rc) { 952 err = rc; 953 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 954 nix_get_ctx_name(req->ctype), qidx); 955 } 956 } 957 958 return err; 959 } 960 961 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 962 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 963 { 964 struct nix_aq_enq_req lock_ctx_req; 965 int err; 966 967 if (req->op != NIX_AQ_INSTOP_INIT) 968 return 0; 969 970 if (req->ctype == NIX_AQ_CTYPE_MCE || 971 req->ctype == NIX_AQ_CTYPE_DYNO) 972 return 0; 973 974 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 975 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 976 lock_ctx_req.ctype = req->ctype; 977 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 978 lock_ctx_req.qidx = req->qidx; 979 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 980 if (err) 981 dev_err(rvu->dev, 982 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 983 req->hdr.pcifunc, 984 nix_get_ctx_name(req->ctype), req->qidx); 985 return err; 986 } 987 988 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 989 struct nix_aq_enq_req *req, 990 struct nix_aq_enq_rsp *rsp) 991 { 992 int err; 993 994 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 995 if (!err) 996 err = nix_lf_hwctx_lockdown(rvu, req); 997 return err; 998 } 999 #else 1000 1001 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1002 struct nix_aq_enq_req *req, 1003 struct nix_aq_enq_rsp *rsp) 1004 { 1005 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1006 } 1007 #endif 1008 /* CN10K mbox handler */ 1009 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1010 struct nix_cn10k_aq_enq_req *req, 1011 struct nix_cn10k_aq_enq_rsp *rsp) 1012 { 1013 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1014 (struct nix_aq_enq_rsp *)rsp); 1015 } 1016 1017 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1018 struct hwctx_disable_req *req, 1019 struct msg_rsp *rsp) 1020 { 1021 return nix_lf_hwctx_disable(rvu, req); 1022 } 1023 1024 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1025 struct nix_lf_alloc_req *req, 1026 struct nix_lf_alloc_rsp *rsp) 1027 { 1028 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1029 struct rvu_hwinfo *hw = rvu->hw; 1030 u16 pcifunc = req->hdr.pcifunc; 1031 struct rvu_block *block; 1032 struct rvu_pfvf *pfvf; 1033 u64 cfg, ctx_cfg; 1034 int blkaddr; 1035 1036 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1037 return NIX_AF_ERR_PARAM; 1038 1039 if (req->way_mask) 1040 req->way_mask &= 0xFFFF; 1041 1042 pfvf = rvu_get_pfvf(rvu, pcifunc); 1043 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1044 if (!pfvf->nixlf || blkaddr < 0) 1045 return NIX_AF_ERR_AF_LF_INVALID; 1046 1047 block = &hw->block[blkaddr]; 1048 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1049 if (nixlf < 0) 1050 return NIX_AF_ERR_AF_LF_INVALID; 1051 1052 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1053 if (req->npa_func) { 1054 /* If default, use 'this' NIXLF's PFFUNC */ 1055 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1056 req->npa_func = pcifunc; 1057 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1058 return NIX_AF_INVAL_NPA_PF_FUNC; 1059 } 1060 1061 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1062 if (req->sso_func) { 1063 /* If default, use 'this' NIXLF's PFFUNC */ 1064 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1065 req->sso_func = pcifunc; 1066 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1067 return NIX_AF_INVAL_SSO_PF_FUNC; 1068 } 1069 1070 /* If RSS is being enabled, check if requested config is valid. 1071 * RSS table size should be power of two, otherwise 1072 * RSS_GRP::OFFSET + adder might go beyond that group or 1073 * won't be able to use entire table. 1074 */ 1075 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1076 !is_power_of_2(req->rss_sz))) 1077 return NIX_AF_ERR_RSS_SIZE_INVALID; 1078 1079 if (req->rss_sz && 1080 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1081 return NIX_AF_ERR_RSS_GRPS_INVALID; 1082 1083 /* Reset this NIX LF */ 1084 err = rvu_lf_reset(rvu, block, nixlf); 1085 if (err) { 1086 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1087 block->addr - BLKADDR_NIX0, nixlf); 1088 return NIX_AF_ERR_LF_RESET; 1089 } 1090 1091 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1092 1093 /* Alloc NIX RQ HW context memory and config the base */ 1094 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1095 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1096 if (err) 1097 goto free_mem; 1098 1099 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1100 if (!pfvf->rq_bmap) 1101 goto free_mem; 1102 1103 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1104 (u64)pfvf->rq_ctx->iova); 1105 1106 /* Set caching and queue count in HW */ 1107 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1108 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1109 1110 /* Alloc NIX SQ HW context memory and config the base */ 1111 hwctx_size = 1UL << (ctx_cfg & 0xF); 1112 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1113 if (err) 1114 goto free_mem; 1115 1116 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1117 if (!pfvf->sq_bmap) 1118 goto free_mem; 1119 1120 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1121 (u64)pfvf->sq_ctx->iova); 1122 1123 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1124 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1125 1126 /* Alloc NIX CQ HW context memory and config the base */ 1127 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1128 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1129 if (err) 1130 goto free_mem; 1131 1132 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1133 if (!pfvf->cq_bmap) 1134 goto free_mem; 1135 1136 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1137 (u64)pfvf->cq_ctx->iova); 1138 1139 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1140 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1141 1142 /* Initialize receive side scaling (RSS) */ 1143 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1144 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1145 req->rss_grps, hwctx_size, req->way_mask); 1146 if (err) 1147 goto free_mem; 1148 1149 /* Alloc memory for CQINT's HW contexts */ 1150 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1151 qints = (cfg >> 24) & 0xFFF; 1152 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1153 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1154 if (err) 1155 goto free_mem; 1156 1157 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1158 (u64)pfvf->cq_ints_ctx->iova); 1159 1160 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1161 BIT_ULL(36) | req->way_mask << 20); 1162 1163 /* Alloc memory for QINT's HW contexts */ 1164 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1165 qints = (cfg >> 12) & 0xFFF; 1166 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1167 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1168 if (err) 1169 goto free_mem; 1170 1171 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1172 (u64)pfvf->nix_qints_ctx->iova); 1173 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1174 BIT_ULL(36) | req->way_mask << 20); 1175 1176 /* Setup VLANX TPID's. 1177 * Use VLAN1 for 802.1Q 1178 * and VLAN0 for 802.1AD. 1179 */ 1180 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1181 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1182 1183 /* Enable LMTST for this NIX LF */ 1184 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1185 1186 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1187 if (req->npa_func) 1188 cfg = req->npa_func; 1189 if (req->sso_func) 1190 cfg |= (u64)req->sso_func << 16; 1191 1192 cfg |= (u64)req->xqe_sz << 33; 1193 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1194 1195 /* Config Rx pkt length, csum checks and apad enable / disable */ 1196 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1197 1198 /* Configure pkind for TX parse config */ 1199 cfg = NPC_TX_DEF_PKIND; 1200 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1201 1202 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1203 err = nix_interface_init(rvu, pcifunc, intf, nixlf); 1204 if (err) 1205 goto free_mem; 1206 1207 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1208 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1209 1210 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1211 rvu_write64(rvu, blkaddr, 1212 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1213 VTAGSIZE_T4 | VTAG_STRIP); 1214 1215 goto exit; 1216 1217 free_mem: 1218 nix_ctx_free(rvu, pfvf); 1219 rc = -ENOMEM; 1220 1221 exit: 1222 /* Set macaddr of this PF/VF */ 1223 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1224 1225 /* set SQB size info */ 1226 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1227 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1228 rsp->rx_chan_base = pfvf->rx_chan_base; 1229 rsp->tx_chan_base = pfvf->tx_chan_base; 1230 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1231 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1232 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1233 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1234 /* Get HW supported stat count */ 1235 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1236 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1237 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1238 /* Get count of CQ IRQs and error IRQs supported per LF */ 1239 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1240 rsp->qints = ((cfg >> 12) & 0xFFF); 1241 rsp->cints = ((cfg >> 24) & 0xFFF); 1242 rsp->cgx_links = hw->cgx_links; 1243 rsp->lbk_links = hw->lbk_links; 1244 rsp->sdp_links = hw->sdp_links; 1245 1246 return rc; 1247 } 1248 1249 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1250 struct msg_rsp *rsp) 1251 { 1252 struct rvu_hwinfo *hw = rvu->hw; 1253 u16 pcifunc = req->hdr.pcifunc; 1254 struct rvu_block *block; 1255 int blkaddr, nixlf, err; 1256 struct rvu_pfvf *pfvf; 1257 1258 pfvf = rvu_get_pfvf(rvu, pcifunc); 1259 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1260 if (!pfvf->nixlf || blkaddr < 0) 1261 return NIX_AF_ERR_AF_LF_INVALID; 1262 1263 block = &hw->block[blkaddr]; 1264 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1265 if (nixlf < 0) 1266 return NIX_AF_ERR_AF_LF_INVALID; 1267 1268 if (req->flags & NIX_LF_DISABLE_FLOWS) 1269 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1270 else 1271 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1272 1273 /* Free any tx vtag def entries used by this NIX LF */ 1274 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1275 nix_free_tx_vtag_entries(rvu, pcifunc); 1276 1277 nix_interface_deinit(rvu, pcifunc, nixlf); 1278 1279 /* Reset this NIX LF */ 1280 err = rvu_lf_reset(rvu, block, nixlf); 1281 if (err) { 1282 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1283 block->addr - BLKADDR_NIX0, nixlf); 1284 return NIX_AF_ERR_LF_RESET; 1285 } 1286 1287 nix_ctx_free(rvu, pfvf); 1288 1289 return 0; 1290 } 1291 1292 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1293 struct nix_mark_format_cfg *req, 1294 struct nix_mark_format_cfg_rsp *rsp) 1295 { 1296 u16 pcifunc = req->hdr.pcifunc; 1297 struct nix_hw *nix_hw; 1298 struct rvu_pfvf *pfvf; 1299 int blkaddr, rc; 1300 u32 cfg; 1301 1302 pfvf = rvu_get_pfvf(rvu, pcifunc); 1303 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1304 if (!pfvf->nixlf || blkaddr < 0) 1305 return NIX_AF_ERR_AF_LF_INVALID; 1306 1307 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1308 if (!nix_hw) 1309 return -EINVAL; 1310 1311 cfg = (((u32)req->offset & 0x7) << 16) | 1312 (((u32)req->y_mask & 0xF) << 12) | 1313 (((u32)req->y_val & 0xF) << 8) | 1314 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1315 1316 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1317 if (rc < 0) { 1318 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1319 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1320 return NIX_AF_ERR_MARK_CFG_FAIL; 1321 } 1322 1323 rsp->mark_format_idx = rc; 1324 return 0; 1325 } 1326 1327 /* Disable shaping of pkts by a scheduler queue 1328 * at a given scheduler level. 1329 */ 1330 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1331 int lvl, int schq) 1332 { 1333 u64 cir_reg = 0, pir_reg = 0; 1334 u64 cfg; 1335 1336 switch (lvl) { 1337 case NIX_TXSCH_LVL_TL1: 1338 cir_reg = NIX_AF_TL1X_CIR(schq); 1339 pir_reg = 0; /* PIR not available at TL1 */ 1340 break; 1341 case NIX_TXSCH_LVL_TL2: 1342 cir_reg = NIX_AF_TL2X_CIR(schq); 1343 pir_reg = NIX_AF_TL2X_PIR(schq); 1344 break; 1345 case NIX_TXSCH_LVL_TL3: 1346 cir_reg = NIX_AF_TL3X_CIR(schq); 1347 pir_reg = NIX_AF_TL3X_PIR(schq); 1348 break; 1349 case NIX_TXSCH_LVL_TL4: 1350 cir_reg = NIX_AF_TL4X_CIR(schq); 1351 pir_reg = NIX_AF_TL4X_PIR(schq); 1352 break; 1353 } 1354 1355 if (!cir_reg) 1356 return; 1357 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1358 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1359 1360 if (!pir_reg) 1361 return; 1362 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1363 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1364 } 1365 1366 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1367 int lvl, int schq) 1368 { 1369 struct rvu_hwinfo *hw = rvu->hw; 1370 int link; 1371 1372 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1373 return; 1374 1375 /* Reset TL4's SDP link config */ 1376 if (lvl == NIX_TXSCH_LVL_TL4) 1377 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1378 1379 if (lvl != NIX_TXSCH_LVL_TL2) 1380 return; 1381 1382 /* Reset TL2's CGX or LBK link config */ 1383 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1384 rvu_write64(rvu, blkaddr, 1385 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1386 } 1387 1388 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1389 { 1390 struct rvu_hwinfo *hw = rvu->hw; 1391 int pf = rvu_get_pf(pcifunc); 1392 u8 cgx_id = 0, lmac_id = 0; 1393 1394 if (is_afvf(pcifunc)) {/* LBK links */ 1395 return hw->cgx_links; 1396 } else if (is_pf_cgxmapped(rvu, pf)) { 1397 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1398 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1399 } 1400 1401 /* SDP link */ 1402 return hw->cgx_links + hw->lbk_links; 1403 } 1404 1405 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 1406 int link, int *start, int *end) 1407 { 1408 struct rvu_hwinfo *hw = rvu->hw; 1409 int pf = rvu_get_pf(pcifunc); 1410 1411 if (is_afvf(pcifunc)) { /* LBK links */ 1412 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1413 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 1414 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 1415 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1416 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 1417 } else { /* SDP link */ 1418 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 1419 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 1420 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 1421 } 1422 } 1423 1424 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 1425 struct nix_hw *nix_hw, 1426 struct nix_txsch_alloc_req *req) 1427 { 1428 struct rvu_hwinfo *hw = rvu->hw; 1429 int schq, req_schq, free_cnt; 1430 struct nix_txsch *txsch; 1431 int link, start, end; 1432 1433 txsch = &nix_hw->txsch[lvl]; 1434 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 1435 1436 if (!req_schq) 1437 return 0; 1438 1439 link = nix_get_tx_link(rvu, pcifunc); 1440 1441 /* For traffic aggregating scheduler level, one queue is enough */ 1442 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1443 if (req_schq != 1) 1444 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1445 return 0; 1446 } 1447 1448 /* Get free SCHQ count and check if request can be accomodated */ 1449 if (hw->cap.nix_fixed_txschq_mapping) { 1450 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1451 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 1452 if (end <= txsch->schq.max && schq < end && 1453 !test_bit(schq, txsch->schq.bmap)) 1454 free_cnt = 1; 1455 else 1456 free_cnt = 0; 1457 } else { 1458 free_cnt = rvu_rsrc_free_count(&txsch->schq); 1459 } 1460 1461 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) 1462 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1463 1464 /* If contiguous queues are needed, check for availability */ 1465 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 1466 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 1467 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1468 1469 return 0; 1470 } 1471 1472 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 1473 struct nix_txsch_alloc_rsp *rsp, 1474 int lvl, int start, int end) 1475 { 1476 struct rvu_hwinfo *hw = rvu->hw; 1477 u16 pcifunc = rsp->hdr.pcifunc; 1478 int idx, schq; 1479 1480 /* For traffic aggregating levels, queue alloc is based 1481 * on transmit link to which PF_FUNC is mapped to. 1482 */ 1483 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1484 /* A single TL queue is allocated */ 1485 if (rsp->schq_contig[lvl]) { 1486 rsp->schq_contig[lvl] = 1; 1487 rsp->schq_contig_list[lvl][0] = start; 1488 } 1489 1490 /* Both contig and non-contig reqs doesn't make sense here */ 1491 if (rsp->schq_contig[lvl]) 1492 rsp->schq[lvl] = 0; 1493 1494 if (rsp->schq[lvl]) { 1495 rsp->schq[lvl] = 1; 1496 rsp->schq_list[lvl][0] = start; 1497 } 1498 return; 1499 } 1500 1501 /* Adjust the queue request count if HW supports 1502 * only one queue per level configuration. 1503 */ 1504 if (hw->cap.nix_fixed_txschq_mapping) { 1505 idx = pcifunc & RVU_PFVF_FUNC_MASK; 1506 schq = start + idx; 1507 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 1508 rsp->schq_contig[lvl] = 0; 1509 rsp->schq[lvl] = 0; 1510 return; 1511 } 1512 1513 if (rsp->schq_contig[lvl]) { 1514 rsp->schq_contig[lvl] = 1; 1515 set_bit(schq, txsch->schq.bmap); 1516 rsp->schq_contig_list[lvl][0] = schq; 1517 rsp->schq[lvl] = 0; 1518 } else if (rsp->schq[lvl]) { 1519 rsp->schq[lvl] = 1; 1520 set_bit(schq, txsch->schq.bmap); 1521 rsp->schq_list[lvl][0] = schq; 1522 } 1523 return; 1524 } 1525 1526 /* Allocate contiguous queue indices requesty first */ 1527 if (rsp->schq_contig[lvl]) { 1528 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 1529 txsch->schq.max, start, 1530 rsp->schq_contig[lvl], 0); 1531 if (schq >= end) 1532 rsp->schq_contig[lvl] = 0; 1533 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 1534 set_bit(schq, txsch->schq.bmap); 1535 rsp->schq_contig_list[lvl][idx] = schq; 1536 schq++; 1537 } 1538 } 1539 1540 /* Allocate non-contiguous queue indices */ 1541 if (rsp->schq[lvl]) { 1542 idx = 0; 1543 for (schq = start; schq < end; schq++) { 1544 if (!test_bit(schq, txsch->schq.bmap)) { 1545 set_bit(schq, txsch->schq.bmap); 1546 rsp->schq_list[lvl][idx++] = schq; 1547 } 1548 if (idx == rsp->schq[lvl]) 1549 break; 1550 } 1551 /* Update how many were allocated */ 1552 rsp->schq[lvl] = idx; 1553 } 1554 } 1555 1556 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 1557 struct nix_txsch_alloc_req *req, 1558 struct nix_txsch_alloc_rsp *rsp) 1559 { 1560 struct rvu_hwinfo *hw = rvu->hw; 1561 u16 pcifunc = req->hdr.pcifunc; 1562 int link, blkaddr, rc = 0; 1563 int lvl, idx, start, end; 1564 struct nix_txsch *txsch; 1565 struct rvu_pfvf *pfvf; 1566 struct nix_hw *nix_hw; 1567 u32 *pfvf_map; 1568 u16 schq; 1569 1570 pfvf = rvu_get_pfvf(rvu, pcifunc); 1571 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1572 if (!pfvf->nixlf || blkaddr < 0) 1573 return NIX_AF_ERR_AF_LF_INVALID; 1574 1575 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1576 if (!nix_hw) 1577 return -EINVAL; 1578 1579 mutex_lock(&rvu->rsrc_lock); 1580 1581 /* Check if request is valid as per HW capabilities 1582 * and can be accomodated. 1583 */ 1584 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1585 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 1586 if (rc) 1587 goto err; 1588 } 1589 1590 /* Allocate requested Tx scheduler queues */ 1591 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1592 txsch = &nix_hw->txsch[lvl]; 1593 pfvf_map = txsch->pfvf_map; 1594 1595 if (!req->schq[lvl] && !req->schq_contig[lvl]) 1596 continue; 1597 1598 rsp->schq[lvl] = req->schq[lvl]; 1599 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 1600 1601 link = nix_get_tx_link(rvu, pcifunc); 1602 1603 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1604 start = link; 1605 end = link; 1606 } else if (hw->cap.nix_fixed_txschq_mapping) { 1607 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1608 } else { 1609 start = 0; 1610 end = txsch->schq.max; 1611 } 1612 1613 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 1614 1615 /* Reset queue config */ 1616 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 1617 schq = rsp->schq_contig_list[lvl][idx]; 1618 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1619 NIX_TXSCHQ_CFG_DONE)) 1620 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1621 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1622 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); 1623 } 1624 1625 for (idx = 0; idx < req->schq[lvl]; idx++) { 1626 schq = rsp->schq_list[lvl][idx]; 1627 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1628 NIX_TXSCHQ_CFG_DONE)) 1629 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1630 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1631 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); 1632 } 1633 } 1634 1635 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 1636 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 1637 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 1638 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1639 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1640 goto exit; 1641 err: 1642 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 1643 exit: 1644 mutex_unlock(&rvu->rsrc_lock); 1645 return rc; 1646 } 1647 1648 static void nix_smq_flush(struct rvu *rvu, int blkaddr, 1649 int smq, u16 pcifunc, int nixlf) 1650 { 1651 int pf = rvu_get_pf(pcifunc); 1652 u8 cgx_id = 0, lmac_id = 0; 1653 int err, restore_tx_en = 0; 1654 u64 cfg; 1655 1656 /* enable cgx tx if disabled */ 1657 if (is_pf_cgxmapped(rvu, pf)) { 1658 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1659 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), 1660 lmac_id, true); 1661 } 1662 1663 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 1664 /* Do SMQ flush and set enqueue xoff */ 1665 cfg |= BIT_ULL(50) | BIT_ULL(49); 1666 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 1667 1668 /* Disable backpressure from physical link, 1669 * otherwise SMQ flush may stall. 1670 */ 1671 rvu_cgx_enadis_rx_bp(rvu, pf, false); 1672 1673 /* Wait for flush to complete */ 1674 err = rvu_poll_reg(rvu, blkaddr, 1675 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 1676 if (err) 1677 dev_err(rvu->dev, 1678 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq); 1679 1680 rvu_cgx_enadis_rx_bp(rvu, pf, true); 1681 /* restore cgx tx state */ 1682 if (restore_tx_en) 1683 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 1684 } 1685 1686 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 1687 { 1688 int blkaddr, nixlf, lvl, schq, err; 1689 struct rvu_hwinfo *hw = rvu->hw; 1690 struct nix_txsch *txsch; 1691 struct nix_hw *nix_hw; 1692 1693 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1694 if (blkaddr < 0) 1695 return NIX_AF_ERR_AF_LF_INVALID; 1696 1697 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1698 if (!nix_hw) 1699 return -EINVAL; 1700 1701 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1702 if (nixlf < 0) 1703 return NIX_AF_ERR_AF_LF_INVALID; 1704 1705 /* Disable TL2/3 queue links before SMQ flush*/ 1706 mutex_lock(&rvu->rsrc_lock); 1707 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1708 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) 1709 continue; 1710 1711 txsch = &nix_hw->txsch[lvl]; 1712 for (schq = 0; schq < txsch->schq.max; schq++) { 1713 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1714 continue; 1715 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1716 } 1717 } 1718 1719 /* Flush SMQs */ 1720 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 1721 for (schq = 0; schq < txsch->schq.max; schq++) { 1722 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1723 continue; 1724 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1725 } 1726 1727 /* Now free scheduler queues to free pool */ 1728 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1729 /* TLs above aggregation level are shared across all PF 1730 * and it's VFs, hence skip freeing them. 1731 */ 1732 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1733 continue; 1734 1735 txsch = &nix_hw->txsch[lvl]; 1736 for (schq = 0; schq < txsch->schq.max; schq++) { 1737 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1738 continue; 1739 rvu_free_rsrc(&txsch->schq, schq); 1740 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 1741 } 1742 } 1743 mutex_unlock(&rvu->rsrc_lock); 1744 1745 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ 1746 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); 1747 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); 1748 if (err) 1749 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 1750 1751 return 0; 1752 } 1753 1754 static int nix_txschq_free_one(struct rvu *rvu, 1755 struct nix_txsch_free_req *req) 1756 { 1757 struct rvu_hwinfo *hw = rvu->hw; 1758 u16 pcifunc = req->hdr.pcifunc; 1759 int lvl, schq, nixlf, blkaddr; 1760 struct nix_txsch *txsch; 1761 struct nix_hw *nix_hw; 1762 u32 *pfvf_map; 1763 1764 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1765 if (blkaddr < 0) 1766 return NIX_AF_ERR_AF_LF_INVALID; 1767 1768 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1769 if (!nix_hw) 1770 return -EINVAL; 1771 1772 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1773 if (nixlf < 0) 1774 return NIX_AF_ERR_AF_LF_INVALID; 1775 1776 lvl = req->schq_lvl; 1777 schq = req->schq; 1778 txsch = &nix_hw->txsch[lvl]; 1779 1780 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 1781 return 0; 1782 1783 pfvf_map = txsch->pfvf_map; 1784 mutex_lock(&rvu->rsrc_lock); 1785 1786 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 1787 mutex_unlock(&rvu->rsrc_lock); 1788 goto err; 1789 } 1790 1791 /* Flush if it is a SMQ. Onus of disabling 1792 * TL2/3 queue links before SMQ flush is on user 1793 */ 1794 if (lvl == NIX_TXSCH_LVL_SMQ) 1795 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1796 1797 /* Free the resource */ 1798 rvu_free_rsrc(&txsch->schq, schq); 1799 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 1800 mutex_unlock(&rvu->rsrc_lock); 1801 return 0; 1802 err: 1803 return NIX_AF_ERR_TLX_INVALID; 1804 } 1805 1806 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 1807 struct nix_txsch_free_req *req, 1808 struct msg_rsp *rsp) 1809 { 1810 if (req->flags & TXSCHQ_FREE_ALL) 1811 return nix_txschq_free(rvu, req->hdr.pcifunc); 1812 else 1813 return nix_txschq_free_one(rvu, req); 1814 } 1815 1816 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 1817 int lvl, u64 reg, u64 regval) 1818 { 1819 u64 regbase = reg & 0xFFFF; 1820 u16 schq, parent; 1821 1822 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 1823 return false; 1824 1825 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1826 /* Check if this schq belongs to this PF/VF or not */ 1827 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 1828 return false; 1829 1830 parent = (regval >> 16) & 0x1FF; 1831 /* Validate MDQ's TL4 parent */ 1832 if (regbase == NIX_AF_MDQX_PARENT(0) && 1833 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 1834 return false; 1835 1836 /* Validate TL4's TL3 parent */ 1837 if (regbase == NIX_AF_TL4X_PARENT(0) && 1838 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 1839 return false; 1840 1841 /* Validate TL3's TL2 parent */ 1842 if (regbase == NIX_AF_TL3X_PARENT(0) && 1843 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 1844 return false; 1845 1846 /* Validate TL2's TL1 parent */ 1847 if (regbase == NIX_AF_TL2X_PARENT(0) && 1848 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 1849 return false; 1850 1851 return true; 1852 } 1853 1854 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 1855 { 1856 u64 regbase; 1857 1858 if (hw->cap.nix_shaping) 1859 return true; 1860 1861 /* If shaping and coloring is not supported, then 1862 * *_CIR and *_PIR registers should not be configured. 1863 */ 1864 regbase = reg & 0xFFFF; 1865 1866 switch (lvl) { 1867 case NIX_TXSCH_LVL_TL1: 1868 if (regbase == NIX_AF_TL1X_CIR(0)) 1869 return false; 1870 break; 1871 case NIX_TXSCH_LVL_TL2: 1872 if (regbase == NIX_AF_TL2X_CIR(0) || 1873 regbase == NIX_AF_TL2X_PIR(0)) 1874 return false; 1875 break; 1876 case NIX_TXSCH_LVL_TL3: 1877 if (regbase == NIX_AF_TL3X_CIR(0) || 1878 regbase == NIX_AF_TL3X_PIR(0)) 1879 return false; 1880 break; 1881 case NIX_TXSCH_LVL_TL4: 1882 if (regbase == NIX_AF_TL4X_CIR(0) || 1883 regbase == NIX_AF_TL4X_PIR(0)) 1884 return false; 1885 break; 1886 } 1887 return true; 1888 } 1889 1890 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 1891 u16 pcifunc, int blkaddr) 1892 { 1893 u32 *pfvf_map; 1894 int schq; 1895 1896 schq = nix_get_tx_link(rvu, pcifunc); 1897 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 1898 /* Skip if PF has already done the config */ 1899 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 1900 return; 1901 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 1902 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 1903 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 1904 TXSCH_TL1_DFLT_RR_QTM); 1905 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 1906 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 1907 } 1908 1909 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 1910 struct nix_txschq_config *req, 1911 struct msg_rsp *rsp) 1912 { 1913 struct rvu_hwinfo *hw = rvu->hw; 1914 u16 pcifunc = req->hdr.pcifunc; 1915 u64 reg, regval, schq_regbase; 1916 struct nix_txsch *txsch; 1917 struct nix_hw *nix_hw; 1918 int blkaddr, idx, err; 1919 int nixlf, schq; 1920 u32 *pfvf_map; 1921 1922 if (req->lvl >= NIX_TXSCH_LVL_CNT || 1923 req->num_regs > MAX_REGS_PER_MBOX_MSG) 1924 return NIX_AF_INVAL_TXSCHQ_CFG; 1925 1926 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 1927 if (err) 1928 return err; 1929 1930 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1931 if (!nix_hw) 1932 return -EINVAL; 1933 1934 txsch = &nix_hw->txsch[req->lvl]; 1935 pfvf_map = txsch->pfvf_map; 1936 1937 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 1938 pcifunc & RVU_PFVF_FUNC_MASK) { 1939 mutex_lock(&rvu->rsrc_lock); 1940 if (req->lvl == NIX_TXSCH_LVL_TL1) 1941 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 1942 mutex_unlock(&rvu->rsrc_lock); 1943 return 0; 1944 } 1945 1946 for (idx = 0; idx < req->num_regs; idx++) { 1947 reg = req->reg[idx]; 1948 regval = req->regval[idx]; 1949 schq_regbase = reg & 0xFFFF; 1950 1951 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 1952 txsch->lvl, reg, regval)) 1953 return NIX_AF_INVAL_TXSCHQ_CFG; 1954 1955 /* Check if shaping and coloring is supported */ 1956 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 1957 continue; 1958 1959 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 1960 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 1961 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 1962 pcifunc, 0); 1963 regval &= ~(0x7FULL << 24); 1964 regval |= ((u64)nixlf << 24); 1965 } 1966 1967 /* Clear 'BP_ENA' config, if it's not allowed */ 1968 if (!hw->cap.nix_tx_link_bp) { 1969 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 1970 (schq_regbase & 0xFF00) == 1971 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 1972 regval &= ~BIT_ULL(13); 1973 } 1974 1975 /* Mark config as done for TL1 by PF */ 1976 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 1977 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 1978 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1979 mutex_lock(&rvu->rsrc_lock); 1980 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 1981 NIX_TXSCHQ_CFG_DONE); 1982 mutex_unlock(&rvu->rsrc_lock); 1983 } 1984 1985 /* SMQ flush is special hence split register writes such 1986 * that flush first and write rest of the bits later. 1987 */ 1988 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 1989 (regval & BIT_ULL(49))) { 1990 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1991 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1992 regval &= ~BIT_ULL(49); 1993 } 1994 rvu_write64(rvu, blkaddr, reg, regval); 1995 } 1996 1997 return 0; 1998 } 1999 2000 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 2001 struct nix_vtag_config *req) 2002 { 2003 u64 regval = req->vtag_size; 2004 2005 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2006 req->vtag_size > VTAGSIZE_T8) 2007 return -EINVAL; 2008 2009 /* RX VTAG Type 7 reserved for vf vlan */ 2010 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2011 return NIX_AF_ERR_RX_VTAG_INUSE; 2012 2013 if (req->rx.capture_vtag) 2014 regval |= BIT_ULL(5); 2015 if (req->rx.strip_vtag) 2016 regval |= BIT_ULL(4); 2017 2018 rvu_write64(rvu, blkaddr, 2019 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2020 return 0; 2021 } 2022 2023 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2024 u16 pcifunc, int index) 2025 { 2026 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2027 struct nix_txvlan *vlan = &nix_hw->txvlan; 2028 2029 if (vlan->entry2pfvf_map[index] != pcifunc) 2030 return NIX_AF_ERR_PARAM; 2031 2032 rvu_write64(rvu, blkaddr, 2033 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2034 rvu_write64(rvu, blkaddr, 2035 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2036 2037 vlan->entry2pfvf_map[index] = 0; 2038 rvu_free_rsrc(&vlan->rsrc, index); 2039 2040 return 0; 2041 } 2042 2043 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2044 { 2045 struct nix_txvlan *vlan; 2046 struct nix_hw *nix_hw; 2047 int index, blkaddr; 2048 2049 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2050 if (blkaddr < 0) 2051 return; 2052 2053 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2054 vlan = &nix_hw->txvlan; 2055 2056 mutex_lock(&vlan->rsrc_lock); 2057 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2058 for (index = 0; index < vlan->rsrc.max; index++) { 2059 if (vlan->entry2pfvf_map[index] == pcifunc) 2060 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2061 } 2062 mutex_unlock(&vlan->rsrc_lock); 2063 } 2064 2065 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2066 u64 vtag, u8 size) 2067 { 2068 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2069 struct nix_txvlan *vlan = &nix_hw->txvlan; 2070 u64 regval; 2071 int index; 2072 2073 mutex_lock(&vlan->rsrc_lock); 2074 2075 index = rvu_alloc_rsrc(&vlan->rsrc); 2076 if (index < 0) { 2077 mutex_unlock(&vlan->rsrc_lock); 2078 return index; 2079 } 2080 2081 mutex_unlock(&vlan->rsrc_lock); 2082 2083 regval = size ? vtag : vtag << 32; 2084 2085 rvu_write64(rvu, blkaddr, 2086 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 2087 rvu_write64(rvu, blkaddr, 2088 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 2089 2090 return index; 2091 } 2092 2093 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 2094 struct nix_vtag_config *req) 2095 { 2096 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2097 struct nix_txvlan *vlan = &nix_hw->txvlan; 2098 u16 pcifunc = req->hdr.pcifunc; 2099 int idx0 = req->tx.vtag0_idx; 2100 int idx1 = req->tx.vtag1_idx; 2101 int err = 0; 2102 2103 if (req->tx.free_vtag0 && req->tx.free_vtag1) 2104 if (vlan->entry2pfvf_map[idx0] != pcifunc || 2105 vlan->entry2pfvf_map[idx1] != pcifunc) 2106 return NIX_AF_ERR_PARAM; 2107 2108 mutex_lock(&vlan->rsrc_lock); 2109 2110 if (req->tx.free_vtag0) { 2111 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 2112 if (err) 2113 goto exit; 2114 } 2115 2116 if (req->tx.free_vtag1) 2117 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 2118 2119 exit: 2120 mutex_unlock(&vlan->rsrc_lock); 2121 return err; 2122 } 2123 2124 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 2125 struct nix_vtag_config *req, 2126 struct nix_vtag_config_rsp *rsp) 2127 { 2128 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2129 struct nix_txvlan *vlan = &nix_hw->txvlan; 2130 u16 pcifunc = req->hdr.pcifunc; 2131 2132 if (req->tx.cfg_vtag0) { 2133 rsp->vtag0_idx = 2134 nix_tx_vtag_alloc(rvu, blkaddr, 2135 req->tx.vtag0, req->vtag_size); 2136 2137 if (rsp->vtag0_idx < 0) 2138 return NIX_AF_ERR_TX_VTAG_NOSPC; 2139 2140 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 2141 } 2142 2143 if (req->tx.cfg_vtag1) { 2144 rsp->vtag1_idx = 2145 nix_tx_vtag_alloc(rvu, blkaddr, 2146 req->tx.vtag1, req->vtag_size); 2147 2148 if (rsp->vtag1_idx < 0) 2149 goto err_free; 2150 2151 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 2152 } 2153 2154 return 0; 2155 2156 err_free: 2157 if (req->tx.cfg_vtag0) 2158 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 2159 2160 return NIX_AF_ERR_TX_VTAG_NOSPC; 2161 } 2162 2163 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 2164 struct nix_vtag_config *req, 2165 struct nix_vtag_config_rsp *rsp) 2166 { 2167 u16 pcifunc = req->hdr.pcifunc; 2168 int blkaddr, nixlf, err; 2169 2170 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2171 if (err) 2172 return err; 2173 2174 if (req->cfg_type) { 2175 /* rx vtag configuration */ 2176 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 2177 if (err) 2178 return NIX_AF_ERR_PARAM; 2179 } else { 2180 /* tx vtag configuration */ 2181 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 2182 (req->tx.free_vtag0 || req->tx.free_vtag1)) 2183 return NIX_AF_ERR_PARAM; 2184 2185 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 2186 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 2187 2188 if (req->tx.free_vtag0 || req->tx.free_vtag1) 2189 return nix_tx_vtag_decfg(rvu, blkaddr, req); 2190 } 2191 2192 return 0; 2193 } 2194 2195 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 2196 int mce, u8 op, u16 pcifunc, int next, bool eol) 2197 { 2198 struct nix_aq_enq_req aq_req; 2199 int err; 2200 2201 aq_req.hdr.pcifunc = 0; 2202 aq_req.ctype = NIX_AQ_CTYPE_MCE; 2203 aq_req.op = op; 2204 aq_req.qidx = mce; 2205 2206 /* Forward bcast pkts to RQ0, RSS not needed */ 2207 aq_req.mce.op = 0; 2208 aq_req.mce.index = 0; 2209 aq_req.mce.eol = eol; 2210 aq_req.mce.pf_func = pcifunc; 2211 aq_req.mce.next = next; 2212 2213 /* All fields valid */ 2214 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 2215 2216 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 2217 if (err) { 2218 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 2219 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 2220 return err; 2221 } 2222 return 0; 2223 } 2224 2225 static int nix_update_mce_list(struct nix_mce_list *mce_list, 2226 u16 pcifunc, bool add) 2227 { 2228 struct mce *mce, *tail = NULL; 2229 bool delete = false; 2230 2231 /* Scan through the current list */ 2232 hlist_for_each_entry(mce, &mce_list->head, node) { 2233 /* If already exists, then delete */ 2234 if (mce->pcifunc == pcifunc && !add) { 2235 delete = true; 2236 break; 2237 } 2238 tail = mce; 2239 } 2240 2241 if (delete) { 2242 hlist_del(&mce->node); 2243 kfree(mce); 2244 mce_list->count--; 2245 return 0; 2246 } 2247 2248 if (!add) 2249 return 0; 2250 2251 /* Add a new one to the list, at the tail */ 2252 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 2253 if (!mce) 2254 return -ENOMEM; 2255 mce->pcifunc = pcifunc; 2256 if (!tail) 2257 hlist_add_head(&mce->node, &mce_list->head); 2258 else 2259 hlist_add_behind(&mce->node, &tail->node); 2260 mce_list->count++; 2261 return 0; 2262 } 2263 2264 int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) 2265 { 2266 int err = 0, idx, next_idx, last_idx; 2267 struct nix_mce_list *mce_list; 2268 struct nix_mcast *mcast; 2269 struct nix_hw *nix_hw; 2270 struct rvu_pfvf *pfvf; 2271 struct mce *mce; 2272 int blkaddr; 2273 2274 /* Broadcast pkt replication is not needed for AF's VFs, hence skip */ 2275 if (is_afvf(pcifunc)) 2276 return 0; 2277 2278 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2279 if (blkaddr < 0) 2280 return 0; 2281 2282 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2283 if (!nix_hw) 2284 return 0; 2285 2286 mcast = &nix_hw->mcast; 2287 2288 /* Get this PF/VF func's MCE index */ 2289 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 2290 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 2291 2292 mce_list = &pfvf->bcast_mce_list; 2293 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) { 2294 dev_err(rvu->dev, 2295 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 2296 __func__, idx, mce_list->max, 2297 pcifunc >> RVU_PFVF_PF_SHIFT); 2298 return -EINVAL; 2299 } 2300 2301 mutex_lock(&mcast->mce_lock); 2302 2303 err = nix_update_mce_list(mce_list, pcifunc, add); 2304 if (err) 2305 goto end; 2306 2307 /* Disable MCAM entry in NPC */ 2308 if (!mce_list->count) { 2309 rvu_npc_enable_bcast_entry(rvu, pcifunc, false); 2310 goto end; 2311 } 2312 2313 /* Dump the updated list to HW */ 2314 idx = pfvf->bcast_mce_idx; 2315 last_idx = idx + mce_list->count - 1; 2316 hlist_for_each_entry(mce, &mce_list->head, node) { 2317 if (idx > last_idx) 2318 break; 2319 2320 next_idx = idx + 1; 2321 /* EOL should be set in last MCE */ 2322 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 2323 mce->pcifunc, next_idx, 2324 (next_idx > last_idx) ? true : false); 2325 if (err) 2326 goto end; 2327 idx++; 2328 } 2329 2330 end: 2331 mutex_unlock(&mcast->mce_lock); 2332 return err; 2333 } 2334 2335 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) 2336 { 2337 struct nix_mcast *mcast = &nix_hw->mcast; 2338 int err, pf, numvfs, idx; 2339 struct rvu_pfvf *pfvf; 2340 u16 pcifunc; 2341 u64 cfg; 2342 2343 /* Skip PF0 (i.e AF) */ 2344 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 2345 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2346 /* If PF is not enabled, nothing to do */ 2347 if (!((cfg >> 20) & 0x01)) 2348 continue; 2349 /* Get numVFs attached to this PF */ 2350 numvfs = (cfg >> 12) & 0xFF; 2351 2352 pfvf = &rvu->pf[pf]; 2353 2354 /* This NIX0/1 block mapped to PF ? */ 2355 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 2356 continue; 2357 2358 /* Save the start MCE */ 2359 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2360 2361 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 2362 2363 for (idx = 0; idx < (numvfs + 1); idx++) { 2364 /* idx-0 is for PF, followed by VFs */ 2365 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 2366 pcifunc |= idx; 2367 /* Add dummy entries now, so that we don't have to check 2368 * for whether AQ_OP should be INIT/WRITE later on. 2369 * Will be updated when a NIXLF is attached/detached to 2370 * these PF/VFs. 2371 */ 2372 err = nix_blk_setup_mce(rvu, nix_hw, 2373 pfvf->bcast_mce_idx + idx, 2374 NIX_AQ_INSTOP_INIT, 2375 pcifunc, 0, true); 2376 if (err) 2377 return err; 2378 } 2379 } 2380 return 0; 2381 } 2382 2383 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2384 { 2385 struct nix_mcast *mcast = &nix_hw->mcast; 2386 struct rvu_hwinfo *hw = rvu->hw; 2387 int err, size; 2388 2389 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 2390 size = (1ULL << size); 2391 2392 /* Alloc memory for multicast/mirror replication entries */ 2393 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 2394 (256UL << MC_TBL_SIZE), size); 2395 if (err) 2396 return -ENOMEM; 2397 2398 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 2399 (u64)mcast->mce_ctx->iova); 2400 2401 /* Set max list length equal to max no of VFs per PF + PF itself */ 2402 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 2403 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 2404 2405 /* Alloc memory for multicast replication buffers */ 2406 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 2407 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 2408 (8UL << MC_BUF_CNT), size); 2409 if (err) 2410 return -ENOMEM; 2411 2412 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 2413 (u64)mcast->mcast_buf->iova); 2414 2415 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 2416 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 2417 2418 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 2419 BIT_ULL(63) | (mcast->replay_pkind << 24) | 2420 BIT_ULL(20) | MC_BUF_CNT); 2421 2422 mutex_init(&mcast->mce_lock); 2423 2424 return nix_setup_bcast_tables(rvu, nix_hw); 2425 } 2426 2427 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 2428 { 2429 struct nix_txvlan *vlan = &nix_hw->txvlan; 2430 int err; 2431 2432 /* Allocate resource bimap for tx vtag def registers*/ 2433 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 2434 err = rvu_alloc_bitmap(&vlan->rsrc); 2435 if (err) 2436 return -ENOMEM; 2437 2438 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 2439 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 2440 sizeof(u16), GFP_KERNEL); 2441 if (!vlan->entry2pfvf_map) 2442 goto free_mem; 2443 2444 mutex_init(&vlan->rsrc_lock); 2445 return 0; 2446 2447 free_mem: 2448 kfree(vlan->rsrc.bmap); 2449 return -ENOMEM; 2450 } 2451 2452 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2453 { 2454 struct nix_txsch *txsch; 2455 int err, lvl, schq; 2456 u64 cfg, reg; 2457 2458 /* Get scheduler queue count of each type and alloc 2459 * bitmap for each for alloc/free/attach operations. 2460 */ 2461 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2462 txsch = &nix_hw->txsch[lvl]; 2463 txsch->lvl = lvl; 2464 switch (lvl) { 2465 case NIX_TXSCH_LVL_SMQ: 2466 reg = NIX_AF_MDQ_CONST; 2467 break; 2468 case NIX_TXSCH_LVL_TL4: 2469 reg = NIX_AF_TL4_CONST; 2470 break; 2471 case NIX_TXSCH_LVL_TL3: 2472 reg = NIX_AF_TL3_CONST; 2473 break; 2474 case NIX_TXSCH_LVL_TL2: 2475 reg = NIX_AF_TL2_CONST; 2476 break; 2477 case NIX_TXSCH_LVL_TL1: 2478 reg = NIX_AF_TL1_CONST; 2479 break; 2480 } 2481 cfg = rvu_read64(rvu, blkaddr, reg); 2482 txsch->schq.max = cfg & 0xFFFF; 2483 err = rvu_alloc_bitmap(&txsch->schq); 2484 if (err) 2485 return err; 2486 2487 /* Allocate memory for scheduler queues to 2488 * PF/VF pcifunc mapping info. 2489 */ 2490 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 2491 sizeof(u32), GFP_KERNEL); 2492 if (!txsch->pfvf_map) 2493 return -ENOMEM; 2494 for (schq = 0; schq < txsch->schq.max; schq++) 2495 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2496 } 2497 return 0; 2498 } 2499 2500 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 2501 int blkaddr, u32 cfg) 2502 { 2503 int fmt_idx; 2504 2505 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 2506 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 2507 return fmt_idx; 2508 } 2509 if (fmt_idx >= nix_hw->mark_format.total) 2510 return -ERANGE; 2511 2512 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 2513 nix_hw->mark_format.cfg[fmt_idx] = cfg; 2514 nix_hw->mark_format.in_use++; 2515 return fmt_idx; 2516 } 2517 2518 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 2519 int blkaddr) 2520 { 2521 u64 cfgs[] = { 2522 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 2523 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 2524 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 2525 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 2526 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 2527 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 2528 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 2529 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 2530 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 2531 }; 2532 int i, rc; 2533 u64 total; 2534 2535 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 2536 nix_hw->mark_format.total = (u8)total; 2537 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 2538 GFP_KERNEL); 2539 if (!nix_hw->mark_format.cfg) 2540 return -ENOMEM; 2541 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 2542 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 2543 if (rc < 0) 2544 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 2545 i, rc); 2546 } 2547 2548 return 0; 2549 } 2550 2551 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 2552 { 2553 /* CN10K supports LBK FIFO size 72 KB */ 2554 if (rvu->hw->lbk_bufsize == 0x12000) 2555 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 2556 else 2557 *max_mtu = NIC_HW_MAX_FRS; 2558 } 2559 2560 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 2561 { 2562 /* RPM supports FIFO len 128 KB */ 2563 if (rvu_cgx_get_fifolen(rvu) == 0x20000) 2564 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 2565 else 2566 *max_mtu = NIC_HW_MAX_FRS; 2567 } 2568 2569 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 2570 struct nix_hw_info *rsp) 2571 { 2572 u16 pcifunc = req->hdr.pcifunc; 2573 int blkaddr; 2574 2575 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2576 if (blkaddr < 0) 2577 return NIX_AF_ERR_AF_LF_INVALID; 2578 2579 if (is_afvf(pcifunc)) 2580 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 2581 else 2582 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 2583 2584 rsp->min_mtu = NIC_HW_MIN_FRS; 2585 return 0; 2586 } 2587 2588 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 2589 struct msg_rsp *rsp) 2590 { 2591 u16 pcifunc = req->hdr.pcifunc; 2592 int i, nixlf, blkaddr, err; 2593 u64 stats; 2594 2595 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2596 if (err) 2597 return err; 2598 2599 /* Get stats count supported by HW */ 2600 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 2601 2602 /* Reset tx stats */ 2603 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 2604 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 2605 2606 /* Reset rx stats */ 2607 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 2608 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 2609 2610 return 0; 2611 } 2612 2613 /* Returns the ALG index to be set into NPC_RX_ACTION */ 2614 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 2615 { 2616 int i; 2617 2618 /* Scan over exiting algo entries to find a match */ 2619 for (i = 0; i < nix_hw->flowkey.in_use; i++) 2620 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 2621 return i; 2622 2623 return -ERANGE; 2624 } 2625 2626 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 2627 { 2628 int idx, nr_field, key_off, field_marker, keyoff_marker; 2629 int max_key_off, max_bit_pos, group_member; 2630 struct nix_rx_flowkey_alg *field; 2631 struct nix_rx_flowkey_alg tmp; 2632 u32 key_type, valid_key; 2633 int l4_key_offset = 0; 2634 2635 if (!alg) 2636 return -EINVAL; 2637 2638 #define FIELDS_PER_ALG 5 2639 #define MAX_KEY_OFF 40 2640 /* Clear all fields */ 2641 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 2642 2643 /* Each of the 32 possible flow key algorithm definitions should 2644 * fall into above incremental config (except ALG0). Otherwise a 2645 * single NPC MCAM entry is not sufficient for supporting RSS. 2646 * 2647 * If a different definition or combination needed then NPC MCAM 2648 * has to be programmed to filter such pkts and it's action should 2649 * point to this definition to calculate flowtag or hash. 2650 * 2651 * The `for loop` goes over _all_ protocol field and the following 2652 * variables depicts the state machine forward progress logic. 2653 * 2654 * keyoff_marker - Enabled when hash byte length needs to be accounted 2655 * in field->key_offset update. 2656 * field_marker - Enabled when a new field needs to be selected. 2657 * group_member - Enabled when protocol is part of a group. 2658 */ 2659 2660 keyoff_marker = 0; max_key_off = 0; group_member = 0; 2661 nr_field = 0; key_off = 0; field_marker = 1; 2662 field = &tmp; max_bit_pos = fls(flow_cfg); 2663 for (idx = 0; 2664 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 2665 key_off < MAX_KEY_OFF; idx++) { 2666 key_type = BIT(idx); 2667 valid_key = flow_cfg & key_type; 2668 /* Found a field marker, reset the field values */ 2669 if (field_marker) 2670 memset(&tmp, 0, sizeof(tmp)); 2671 2672 field_marker = true; 2673 keyoff_marker = true; 2674 switch (key_type) { 2675 case NIX_FLOW_KEY_TYPE_PORT: 2676 field->sel_chan = true; 2677 /* This should be set to 1, when SEL_CHAN is set */ 2678 field->bytesm1 = 1; 2679 break; 2680 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 2681 field->lid = NPC_LID_LC; 2682 field->hdr_offset = 9; /* offset */ 2683 field->bytesm1 = 0; /* 1 byte */ 2684 field->ltype_match = NPC_LT_LC_IP; 2685 field->ltype_mask = 0xF; 2686 break; 2687 case NIX_FLOW_KEY_TYPE_IPV4: 2688 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 2689 field->lid = NPC_LID_LC; 2690 field->ltype_match = NPC_LT_LC_IP; 2691 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 2692 field->lid = NPC_LID_LG; 2693 field->ltype_match = NPC_LT_LG_TU_IP; 2694 } 2695 field->hdr_offset = 12; /* SIP offset */ 2696 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 2697 field->ltype_mask = 0xF; /* Match only IPv4 */ 2698 keyoff_marker = false; 2699 break; 2700 case NIX_FLOW_KEY_TYPE_IPV6: 2701 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 2702 field->lid = NPC_LID_LC; 2703 field->ltype_match = NPC_LT_LC_IP6; 2704 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 2705 field->lid = NPC_LID_LG; 2706 field->ltype_match = NPC_LT_LG_TU_IP6; 2707 } 2708 field->hdr_offset = 8; /* SIP offset */ 2709 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 2710 field->ltype_mask = 0xF; /* Match only IPv6 */ 2711 break; 2712 case NIX_FLOW_KEY_TYPE_TCP: 2713 case NIX_FLOW_KEY_TYPE_UDP: 2714 case NIX_FLOW_KEY_TYPE_SCTP: 2715 case NIX_FLOW_KEY_TYPE_INNR_TCP: 2716 case NIX_FLOW_KEY_TYPE_INNR_UDP: 2717 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 2718 field->lid = NPC_LID_LD; 2719 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 2720 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 2721 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 2722 field->lid = NPC_LID_LH; 2723 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 2724 2725 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 2726 * so no need to change the ltype_match, just change 2727 * the lid for inner protocols 2728 */ 2729 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 2730 (int)NPC_LT_LH_TU_TCP); 2731 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 2732 (int)NPC_LT_LH_TU_UDP); 2733 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 2734 (int)NPC_LT_LH_TU_SCTP); 2735 2736 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 2737 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 2738 valid_key) { 2739 field->ltype_match |= NPC_LT_LD_TCP; 2740 group_member = true; 2741 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 2742 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 2743 valid_key) { 2744 field->ltype_match |= NPC_LT_LD_UDP; 2745 group_member = true; 2746 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 2747 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 2748 valid_key) { 2749 field->ltype_match |= NPC_LT_LD_SCTP; 2750 group_member = true; 2751 } 2752 field->ltype_mask = ~field->ltype_match; 2753 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 2754 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 2755 /* Handle the case where any of the group item 2756 * is enabled in the group but not the final one 2757 */ 2758 if (group_member) { 2759 valid_key = true; 2760 group_member = false; 2761 } 2762 } else { 2763 field_marker = false; 2764 keyoff_marker = false; 2765 } 2766 2767 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 2768 * remember the TCP key offset of 40 byte hash key. 2769 */ 2770 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 2771 l4_key_offset = key_off; 2772 break; 2773 case NIX_FLOW_KEY_TYPE_NVGRE: 2774 field->lid = NPC_LID_LD; 2775 field->hdr_offset = 4; /* VSID offset */ 2776 field->bytesm1 = 2; 2777 field->ltype_match = NPC_LT_LD_NVGRE; 2778 field->ltype_mask = 0xF; 2779 break; 2780 case NIX_FLOW_KEY_TYPE_VXLAN: 2781 case NIX_FLOW_KEY_TYPE_GENEVE: 2782 field->lid = NPC_LID_LE; 2783 field->bytesm1 = 2; 2784 field->hdr_offset = 4; 2785 field->ltype_mask = 0xF; 2786 field_marker = false; 2787 keyoff_marker = false; 2788 2789 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 2790 field->ltype_match |= NPC_LT_LE_VXLAN; 2791 group_member = true; 2792 } 2793 2794 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 2795 field->ltype_match |= NPC_LT_LE_GENEVE; 2796 group_member = true; 2797 } 2798 2799 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 2800 if (group_member) { 2801 field->ltype_mask = ~field->ltype_match; 2802 field_marker = true; 2803 keyoff_marker = true; 2804 valid_key = true; 2805 group_member = false; 2806 } 2807 } 2808 break; 2809 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 2810 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 2811 field->lid = NPC_LID_LA; 2812 field->ltype_match = NPC_LT_LA_ETHER; 2813 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 2814 field->lid = NPC_LID_LF; 2815 field->ltype_match = NPC_LT_LF_TU_ETHER; 2816 } 2817 field->hdr_offset = 0; 2818 field->bytesm1 = 5; /* DMAC 6 Byte */ 2819 field->ltype_mask = 0xF; 2820 break; 2821 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 2822 field->lid = NPC_LID_LC; 2823 field->hdr_offset = 40; /* IPV6 hdr */ 2824 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 2825 field->ltype_match = NPC_LT_LC_IP6_EXT; 2826 field->ltype_mask = 0xF; 2827 break; 2828 case NIX_FLOW_KEY_TYPE_GTPU: 2829 field->lid = NPC_LID_LE; 2830 field->hdr_offset = 4; 2831 field->bytesm1 = 3; /* 4 bytes TID*/ 2832 field->ltype_match = NPC_LT_LE_GTPU; 2833 field->ltype_mask = 0xF; 2834 break; 2835 case NIX_FLOW_KEY_TYPE_VLAN: 2836 field->lid = NPC_LID_LB; 2837 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 2838 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 2839 field->ltype_match = NPC_LT_LB_CTAG; 2840 field->ltype_mask = 0xF; 2841 field->fn_mask = 1; /* Mask out the first nibble */ 2842 break; 2843 case NIX_FLOW_KEY_TYPE_AH: 2844 case NIX_FLOW_KEY_TYPE_ESP: 2845 field->hdr_offset = 0; 2846 field->bytesm1 = 7; /* SPI + sequence number */ 2847 field->ltype_mask = 0xF; 2848 field->lid = NPC_LID_LE; 2849 field->ltype_match = NPC_LT_LE_ESP; 2850 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 2851 field->lid = NPC_LID_LD; 2852 field->ltype_match = NPC_LT_LD_AH; 2853 field->hdr_offset = 4; 2854 keyoff_marker = false; 2855 } 2856 break; 2857 } 2858 field->ena = 1; 2859 2860 /* Found a valid flow key type */ 2861 if (valid_key) { 2862 /* Use the key offset of TCP/UDP/SCTP fields 2863 * for ESP/AH fields. 2864 */ 2865 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 2866 key_type == NIX_FLOW_KEY_TYPE_AH) 2867 key_off = l4_key_offset; 2868 field->key_offset = key_off; 2869 memcpy(&alg[nr_field], field, sizeof(*field)); 2870 max_key_off = max(max_key_off, field->bytesm1 + 1); 2871 2872 /* Found a field marker, get the next field */ 2873 if (field_marker) 2874 nr_field++; 2875 } 2876 2877 /* Found a keyoff marker, update the new key_off */ 2878 if (keyoff_marker) { 2879 key_off += max_key_off; 2880 max_key_off = 0; 2881 } 2882 } 2883 /* Processed all the flow key types */ 2884 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 2885 return 0; 2886 else 2887 return NIX_AF_ERR_RSS_NOSPC_FIELD; 2888 } 2889 2890 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 2891 { 2892 u64 field[FIELDS_PER_ALG]; 2893 struct nix_hw *hw; 2894 int fid, rc; 2895 2896 hw = get_nix_hw(rvu->hw, blkaddr); 2897 if (!hw) 2898 return -EINVAL; 2899 2900 /* No room to add new flow hash algoritham */ 2901 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 2902 return NIX_AF_ERR_RSS_NOSPC_ALGO; 2903 2904 /* Generate algo fields for the given flow_cfg */ 2905 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 2906 if (rc) 2907 return rc; 2908 2909 /* Update ALGX_FIELDX register with generated fields */ 2910 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 2911 rvu_write64(rvu, blkaddr, 2912 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 2913 fid), field[fid]); 2914 2915 /* Store the flow_cfg for futher lookup */ 2916 rc = hw->flowkey.in_use; 2917 hw->flowkey.flowkey[rc] = flow_cfg; 2918 hw->flowkey.in_use++; 2919 2920 return rc; 2921 } 2922 2923 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 2924 struct nix_rss_flowkey_cfg *req, 2925 struct nix_rss_flowkey_cfg_rsp *rsp) 2926 { 2927 u16 pcifunc = req->hdr.pcifunc; 2928 int alg_idx, nixlf, blkaddr; 2929 struct nix_hw *nix_hw; 2930 int err; 2931 2932 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2933 if (err) 2934 return err; 2935 2936 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2937 if (!nix_hw) 2938 return -EINVAL; 2939 2940 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 2941 /* Failed to get algo index from the exiting list, reserve new */ 2942 if (alg_idx < 0) { 2943 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 2944 req->flowkey_cfg); 2945 if (alg_idx < 0) 2946 return alg_idx; 2947 } 2948 rsp->alg_idx = alg_idx; 2949 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 2950 alg_idx, req->mcam_index); 2951 return 0; 2952 } 2953 2954 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 2955 { 2956 u32 flowkey_cfg, minkey_cfg; 2957 int alg, fid, rc; 2958 2959 /* Disable all flow key algx fieldx */ 2960 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 2961 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 2962 rvu_write64(rvu, blkaddr, 2963 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 2964 0); 2965 } 2966 2967 /* IPv4/IPv6 SIP/DIPs */ 2968 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 2969 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2970 if (rc < 0) 2971 return rc; 2972 2973 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 2974 minkey_cfg = flowkey_cfg; 2975 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 2976 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2977 if (rc < 0) 2978 return rc; 2979 2980 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 2981 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 2982 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2983 if (rc < 0) 2984 return rc; 2985 2986 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 2987 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 2988 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2989 if (rc < 0) 2990 return rc; 2991 2992 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 2993 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 2994 NIX_FLOW_KEY_TYPE_UDP; 2995 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2996 if (rc < 0) 2997 return rc; 2998 2999 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3000 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3001 NIX_FLOW_KEY_TYPE_SCTP; 3002 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3003 if (rc < 0) 3004 return rc; 3005 3006 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3007 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 3008 NIX_FLOW_KEY_TYPE_SCTP; 3009 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3010 if (rc < 0) 3011 return rc; 3012 3013 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3014 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3015 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 3016 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3017 if (rc < 0) 3018 return rc; 3019 3020 return 0; 3021 } 3022 3023 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 3024 struct nix_set_mac_addr *req, 3025 struct msg_rsp *rsp) 3026 { 3027 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 3028 u16 pcifunc = req->hdr.pcifunc; 3029 int blkaddr, nixlf, err; 3030 struct rvu_pfvf *pfvf; 3031 3032 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3033 if (err) 3034 return err; 3035 3036 pfvf = rvu_get_pfvf(rvu, pcifunc); 3037 3038 /* VF can't overwrite admin(PF) changes */ 3039 if (from_vf && pfvf->pf_set_vf_cfg) 3040 return -EPERM; 3041 3042 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 3043 3044 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 3045 pfvf->rx_chan_base, req->mac_addr); 3046 3047 return 0; 3048 } 3049 3050 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 3051 struct msg_req *req, 3052 struct nix_get_mac_addr_rsp *rsp) 3053 { 3054 u16 pcifunc = req->hdr.pcifunc; 3055 struct rvu_pfvf *pfvf; 3056 3057 if (!is_nixlf_attached(rvu, pcifunc)) 3058 return NIX_AF_ERR_AF_LF_INVALID; 3059 3060 pfvf = rvu_get_pfvf(rvu, pcifunc); 3061 3062 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 3063 3064 return 0; 3065 } 3066 3067 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 3068 struct msg_rsp *rsp) 3069 { 3070 bool allmulti = false, disable_promisc = false; 3071 u16 pcifunc = req->hdr.pcifunc; 3072 int blkaddr, nixlf, err; 3073 struct rvu_pfvf *pfvf; 3074 3075 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3076 if (err) 3077 return err; 3078 3079 pfvf = rvu_get_pfvf(rvu, pcifunc); 3080 3081 if (req->mode & NIX_RX_MODE_PROMISC) 3082 allmulti = false; 3083 else if (req->mode & NIX_RX_MODE_ALLMULTI) 3084 allmulti = true; 3085 else 3086 disable_promisc = true; 3087 3088 if (disable_promisc) 3089 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); 3090 else 3091 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 3092 pfvf->rx_chan_base, 3093 pfvf->rx_chan_cnt, allmulti); 3094 return 0; 3095 } 3096 3097 static void nix_find_link_frs(struct rvu *rvu, 3098 struct nix_frs_cfg *req, u16 pcifunc) 3099 { 3100 int pf = rvu_get_pf(pcifunc); 3101 struct rvu_pfvf *pfvf; 3102 int maxlen, minlen; 3103 int numvfs, hwvf; 3104 int vf; 3105 3106 /* Update with requester's min/max lengths */ 3107 pfvf = rvu_get_pfvf(rvu, pcifunc); 3108 pfvf->maxlen = req->maxlen; 3109 if (req->update_minlen) 3110 pfvf->minlen = req->minlen; 3111 3112 maxlen = req->maxlen; 3113 minlen = req->update_minlen ? req->minlen : 0; 3114 3115 /* Get this PF's numVFs and starting hwvf */ 3116 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 3117 3118 /* For each VF, compare requested max/minlen */ 3119 for (vf = 0; vf < numvfs; vf++) { 3120 pfvf = &rvu->hwvf[hwvf + vf]; 3121 if (pfvf->maxlen > maxlen) 3122 maxlen = pfvf->maxlen; 3123 if (req->update_minlen && 3124 pfvf->minlen && pfvf->minlen < minlen) 3125 minlen = pfvf->minlen; 3126 } 3127 3128 /* Compare requested max/minlen with PF's max/minlen */ 3129 pfvf = &rvu->pf[pf]; 3130 if (pfvf->maxlen > maxlen) 3131 maxlen = pfvf->maxlen; 3132 if (req->update_minlen && 3133 pfvf->minlen && pfvf->minlen < minlen) 3134 minlen = pfvf->minlen; 3135 3136 /* Update the request with max/min PF's and it's VF's max/min */ 3137 req->maxlen = maxlen; 3138 if (req->update_minlen) 3139 req->minlen = minlen; 3140 } 3141 3142 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 3143 struct msg_rsp *rsp) 3144 { 3145 struct rvu_hwinfo *hw = rvu->hw; 3146 u16 pcifunc = req->hdr.pcifunc; 3147 int pf = rvu_get_pf(pcifunc); 3148 int blkaddr, schq, link = -1; 3149 struct nix_txsch *txsch; 3150 u64 cfg, lmac_fifo_len; 3151 struct nix_hw *nix_hw; 3152 u8 cgx = 0, lmac = 0; 3153 u16 max_mtu; 3154 3155 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3156 if (blkaddr < 0) 3157 return NIX_AF_ERR_AF_LF_INVALID; 3158 3159 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3160 if (!nix_hw) 3161 return -EINVAL; 3162 3163 if (is_afvf(pcifunc)) 3164 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 3165 else 3166 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 3167 3168 if (!req->sdp_link && req->maxlen > max_mtu) 3169 return NIX_AF_ERR_FRS_INVALID; 3170 3171 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 3172 return NIX_AF_ERR_FRS_INVALID; 3173 3174 /* Check if requester wants to update SMQ's */ 3175 if (!req->update_smq) 3176 goto rx_frscfg; 3177 3178 /* Update min/maxlen in each of the SMQ attached to this PF/VF */ 3179 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 3180 mutex_lock(&rvu->rsrc_lock); 3181 for (schq = 0; schq < txsch->schq.max; schq++) { 3182 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 3183 continue; 3184 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); 3185 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); 3186 if (req->update_minlen) 3187 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); 3188 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); 3189 } 3190 mutex_unlock(&rvu->rsrc_lock); 3191 3192 rx_frscfg: 3193 /* Check if config is for SDP link */ 3194 if (req->sdp_link) { 3195 if (!hw->sdp_links) 3196 return NIX_AF_ERR_RX_LINK_INVALID; 3197 link = hw->cgx_links + hw->lbk_links; 3198 goto linkcfg; 3199 } 3200 3201 /* Check if the request is from CGX mapped RVU PF */ 3202 if (is_pf_cgxmapped(rvu, pf)) { 3203 /* Get CGX and LMAC to which this PF is mapped and find link */ 3204 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 3205 link = (cgx * hw->lmac_per_cgx) + lmac; 3206 } else if (pf == 0) { 3207 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 3208 link = hw->cgx_links; 3209 } 3210 3211 if (link < 0) 3212 return NIX_AF_ERR_RX_LINK_INVALID; 3213 3214 nix_find_link_frs(rvu, req, pcifunc); 3215 3216 linkcfg: 3217 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 3218 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 3219 if (req->update_minlen) 3220 cfg = (cfg & ~0xFFFFULL) | req->minlen; 3221 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 3222 3223 if (req->sdp_link || pf == 0) 3224 return 0; 3225 3226 /* Update transmit credits for CGX links */ 3227 lmac_fifo_len = 3228 rvu_cgx_get_fifolen(rvu) / 3229 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3230 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); 3231 cfg &= ~(0xFFFFFULL << 12); 3232 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; 3233 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 3234 return 0; 3235 } 3236 3237 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 3238 struct msg_rsp *rsp) 3239 { 3240 int nixlf, blkaddr, err; 3241 u64 cfg; 3242 3243 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 3244 if (err) 3245 return err; 3246 3247 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 3248 /* Set the interface configuration */ 3249 if (req->len_verify & BIT(0)) 3250 cfg |= BIT_ULL(41); 3251 else 3252 cfg &= ~BIT_ULL(41); 3253 3254 if (req->len_verify & BIT(1)) 3255 cfg |= BIT_ULL(40); 3256 else 3257 cfg &= ~BIT_ULL(40); 3258 3259 if (req->csum_verify & BIT(0)) 3260 cfg |= BIT_ULL(37); 3261 else 3262 cfg &= ~BIT_ULL(37); 3263 3264 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 3265 3266 return 0; 3267 } 3268 3269 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 3270 { 3271 /* CN10k supports 72KB FIFO size and max packet size of 64k */ 3272 if (rvu->hw->lbk_bufsize == 0x12000) 3273 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16; 3274 3275 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 3276 } 3277 3278 static void nix_link_config(struct rvu *rvu, int blkaddr) 3279 { 3280 struct rvu_hwinfo *hw = rvu->hw; 3281 int cgx, lmac_cnt, slink, link; 3282 u16 lbk_max_frs, lmac_max_frs; 3283 u64 tx_credits; 3284 3285 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 3286 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 3287 3288 /* Set default min/max packet lengths allowed on NIX Rx links. 3289 * 3290 * With HW reset minlen value of 60byte, HW will treat ARP pkts 3291 * as undersize and report them to SW as error pkts, hence 3292 * setting it to 40 bytes. 3293 */ 3294 for (link = 0; link < hw->cgx_links; link++) { 3295 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3296 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 3297 } 3298 3299 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 3300 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3301 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 3302 } 3303 if (hw->sdp_links) { 3304 link = hw->cgx_links + hw->lbk_links; 3305 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3306 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 3307 } 3308 3309 /* Set credits for Tx links assuming max packet length allowed. 3310 * This will be reconfigured based on MTU set for PF/VF. 3311 */ 3312 for (cgx = 0; cgx < hw->cgx; cgx++) { 3313 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3314 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) - 3315 lmac_max_frs) / 16; 3316 /* Enable credits and set credit pkt count to max allowed */ 3317 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3318 slink = cgx * hw->lmac_per_cgx; 3319 for (link = slink; link < (slink + lmac_cnt); link++) { 3320 rvu_write64(rvu, blkaddr, 3321 NIX_AF_TX_LINKX_NORM_CREDIT(link), 3322 tx_credits); 3323 } 3324 } 3325 3326 /* Set Tx credits for LBK link */ 3327 slink = hw->cgx_links; 3328 for (link = slink; link < (slink + hw->lbk_links); link++) { 3329 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 3330 /* Enable credits and set credit pkt count to max allowed */ 3331 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3332 rvu_write64(rvu, blkaddr, 3333 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 3334 } 3335 } 3336 3337 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 3338 { 3339 int idx, err; 3340 u64 status; 3341 3342 /* Start X2P bus calibration */ 3343 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3344 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 3345 /* Wait for calibration to complete */ 3346 err = rvu_poll_reg(rvu, blkaddr, 3347 NIX_AF_STATUS, BIT_ULL(10), false); 3348 if (err) { 3349 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 3350 return err; 3351 } 3352 3353 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 3354 /* Check if CGX devices are ready */ 3355 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 3356 /* Skip when cgx port is not available */ 3357 if (!rvu_cgx_pdata(idx, rvu) || 3358 (status & (BIT_ULL(16 + idx)))) 3359 continue; 3360 dev_err(rvu->dev, 3361 "CGX%d didn't respond to NIX X2P calibration\n", idx); 3362 err = -EBUSY; 3363 } 3364 3365 /* Check if LBK is ready */ 3366 if (!(status & BIT_ULL(19))) { 3367 dev_err(rvu->dev, 3368 "LBK didn't respond to NIX X2P calibration\n"); 3369 err = -EBUSY; 3370 } 3371 3372 /* Clear 'calibrate_x2p' bit */ 3373 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3374 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 3375 if (err || (status & 0x3FFULL)) 3376 dev_err(rvu->dev, 3377 "NIX X2P calibration failed, status 0x%llx\n", status); 3378 if (err) 3379 return err; 3380 return 0; 3381 } 3382 3383 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 3384 { 3385 u64 cfg; 3386 int err; 3387 3388 /* Set admin queue endianness */ 3389 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 3390 #ifdef __BIG_ENDIAN 3391 cfg |= BIT_ULL(8); 3392 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 3393 #else 3394 cfg &= ~BIT_ULL(8); 3395 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 3396 #endif 3397 3398 /* Do not bypass NDC cache */ 3399 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 3400 cfg &= ~0x3FFEULL; 3401 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 3402 /* Disable caching of SQB aka SQEs */ 3403 cfg |= 0x04ULL; 3404 #endif 3405 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 3406 3407 /* Result structure can be followed by RQ/SQ/CQ context at 3408 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 3409 * operation type. Alloc sufficient result memory for all operations. 3410 */ 3411 err = rvu_aq_alloc(rvu, &block->aq, 3412 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 3413 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 3414 if (err) 3415 return err; 3416 3417 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 3418 rvu_write64(rvu, block->addr, 3419 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 3420 return 0; 3421 } 3422 3423 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 3424 { 3425 const struct npc_lt_def_cfg *ltdefs; 3426 struct rvu_hwinfo *hw = rvu->hw; 3427 int blkaddr = nix_hw->blkaddr; 3428 struct rvu_block *block; 3429 int err; 3430 u64 cfg; 3431 3432 block = &hw->block[blkaddr]; 3433 3434 if (is_rvu_96xx_B0(rvu)) { 3435 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 3436 * internal state when conditional clocks are turned off. 3437 * Hence enable them. 3438 */ 3439 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3440 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 3441 3442 /* Set chan/link to backpressure TL3 instead of TL2 */ 3443 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 3444 3445 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 3446 * This sticky mode is known to cause SQ stalls when multiple 3447 * SQs are mapped to same SMQ and transmitting pkts at a time. 3448 */ 3449 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 3450 cfg &= ~BIT_ULL(15); 3451 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 3452 } 3453 3454 ltdefs = rvu->kpu.lt_def; 3455 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 3456 err = nix_calibrate_x2p(rvu, blkaddr); 3457 if (err) 3458 return err; 3459 3460 /* Initialize admin queue */ 3461 err = nix_aq_init(rvu, block); 3462 if (err) 3463 return err; 3464 3465 /* Restore CINT timer delay to HW reset values */ 3466 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 3467 3468 if (is_block_implemented(hw, blkaddr)) { 3469 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 3470 if (err) 3471 return err; 3472 3473 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 3474 if (err) 3475 return err; 3476 3477 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 3478 if (err) 3479 return err; 3480 3481 err = nix_setup_txvlan(rvu, nix_hw); 3482 if (err) 3483 return err; 3484 3485 /* Configure segmentation offload formats */ 3486 nix_setup_lso(rvu, nix_hw, blkaddr); 3487 3488 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 3489 * This helps HW protocol checker to identify headers 3490 * and validate length and checksums. 3491 */ 3492 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 3493 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 3494 ltdefs->rx_ol2.ltype_mask); 3495 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 3496 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 3497 ltdefs->rx_oip4.ltype_mask); 3498 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 3499 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 3500 ltdefs->rx_iip4.ltype_mask); 3501 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 3502 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 3503 ltdefs->rx_oip6.ltype_mask); 3504 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 3505 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 3506 ltdefs->rx_iip6.ltype_mask); 3507 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 3508 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 3509 ltdefs->rx_otcp.ltype_mask); 3510 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 3511 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 3512 ltdefs->rx_itcp.ltype_mask); 3513 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 3514 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 3515 ltdefs->rx_oudp.ltype_mask); 3516 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 3517 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 3518 ltdefs->rx_iudp.ltype_mask); 3519 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 3520 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 3521 ltdefs->rx_osctp.ltype_mask); 3522 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 3523 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 3524 ltdefs->rx_isctp.ltype_mask); 3525 3526 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 3527 if (err) 3528 return err; 3529 3530 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 3531 nix_link_config(rvu, blkaddr); 3532 3533 /* Enable Channel backpressure */ 3534 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 3535 } 3536 return 0; 3537 } 3538 3539 int rvu_nix_init(struct rvu *rvu) 3540 { 3541 struct rvu_hwinfo *hw = rvu->hw; 3542 struct nix_hw *nix_hw; 3543 int blkaddr = 0, err; 3544 int i = 0; 3545 3546 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 3547 GFP_KERNEL); 3548 if (!hw->nix) 3549 return -ENOMEM; 3550 3551 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3552 while (blkaddr) { 3553 nix_hw = &hw->nix[i]; 3554 nix_hw->rvu = rvu; 3555 nix_hw->blkaddr = blkaddr; 3556 err = rvu_nix_block_init(rvu, nix_hw); 3557 if (err) 3558 return err; 3559 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3560 i++; 3561 } 3562 3563 return 0; 3564 } 3565 3566 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 3567 struct rvu_block *block) 3568 { 3569 struct nix_txsch *txsch; 3570 struct nix_mcast *mcast; 3571 struct nix_txvlan *vlan; 3572 struct nix_hw *nix_hw; 3573 int lvl; 3574 3575 rvu_aq_free(rvu, block->aq); 3576 3577 if (is_block_implemented(rvu->hw, blkaddr)) { 3578 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3579 if (!nix_hw) 3580 return; 3581 3582 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3583 txsch = &nix_hw->txsch[lvl]; 3584 kfree(txsch->schq.bmap); 3585 } 3586 3587 vlan = &nix_hw->txvlan; 3588 kfree(vlan->rsrc.bmap); 3589 mutex_destroy(&vlan->rsrc_lock); 3590 devm_kfree(rvu->dev, vlan->entry2pfvf_map); 3591 3592 mcast = &nix_hw->mcast; 3593 qmem_free(rvu->dev, mcast->mce_ctx); 3594 qmem_free(rvu->dev, mcast->mcast_buf); 3595 mutex_destroy(&mcast->mce_lock); 3596 } 3597 } 3598 3599 void rvu_nix_freemem(struct rvu *rvu) 3600 { 3601 struct rvu_hwinfo *hw = rvu->hw; 3602 struct rvu_block *block; 3603 int blkaddr = 0; 3604 3605 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3606 while (blkaddr) { 3607 block = &hw->block[blkaddr]; 3608 rvu_nix_block_freemem(rvu, blkaddr, block); 3609 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3610 } 3611 } 3612 3613 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 3614 struct msg_rsp *rsp) 3615 { 3616 u16 pcifunc = req->hdr.pcifunc; 3617 int nixlf, err; 3618 3619 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3620 if (err) 3621 return err; 3622 3623 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 3624 3625 npc_mcam_enable_flows(rvu, pcifunc); 3626 3627 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 3628 } 3629 3630 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 3631 struct msg_rsp *rsp) 3632 { 3633 u16 pcifunc = req->hdr.pcifunc; 3634 int nixlf, err; 3635 3636 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3637 if (err) 3638 return err; 3639 3640 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 3641 3642 return rvu_cgx_start_stop_io(rvu, pcifunc, false); 3643 } 3644 3645 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 3646 { 3647 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 3648 struct hwctx_disable_req ctx_req; 3649 int err; 3650 3651 ctx_req.hdr.pcifunc = pcifunc; 3652 3653 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 3654 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 3655 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 3656 nix_interface_deinit(rvu, pcifunc, nixlf); 3657 nix_rx_sync(rvu, blkaddr); 3658 nix_txschq_free(rvu, pcifunc); 3659 3660 rvu_cgx_start_stop_io(rvu, pcifunc, false); 3661 3662 if (pfvf->sq_ctx) { 3663 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 3664 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3665 if (err) 3666 dev_err(rvu->dev, "SQ ctx disable failed\n"); 3667 } 3668 3669 if (pfvf->rq_ctx) { 3670 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 3671 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3672 if (err) 3673 dev_err(rvu->dev, "RQ ctx disable failed\n"); 3674 } 3675 3676 if (pfvf->cq_ctx) { 3677 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 3678 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3679 if (err) 3680 dev_err(rvu->dev, "CQ ctx disable failed\n"); 3681 } 3682 3683 nix_ctx_free(rvu, pfvf); 3684 } 3685 3686 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 3687 3688 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 3689 { 3690 struct rvu_hwinfo *hw = rvu->hw; 3691 struct rvu_block *block; 3692 int blkaddr, pf; 3693 int nixlf; 3694 u64 cfg; 3695 3696 pf = rvu_get_pf(pcifunc); 3697 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 3698 return 0; 3699 3700 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3701 if (blkaddr < 0) 3702 return NIX_AF_ERR_AF_LF_INVALID; 3703 3704 block = &hw->block[blkaddr]; 3705 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 3706 if (nixlf < 0) 3707 return NIX_AF_ERR_AF_LF_INVALID; 3708 3709 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 3710 3711 if (enable) 3712 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 3713 else 3714 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 3715 3716 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 3717 3718 return 0; 3719 } 3720 3721 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 3722 struct msg_rsp *rsp) 3723 { 3724 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 3725 } 3726 3727 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 3728 struct msg_rsp *rsp) 3729 { 3730 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 3731 } 3732 3733 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 3734 struct nix_lso_format_cfg *req, 3735 struct nix_lso_format_cfg_rsp *rsp) 3736 { 3737 u16 pcifunc = req->hdr.pcifunc; 3738 struct nix_hw *nix_hw; 3739 struct rvu_pfvf *pfvf; 3740 int blkaddr, idx, f; 3741 u64 reg; 3742 3743 pfvf = rvu_get_pfvf(rvu, pcifunc); 3744 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3745 if (!pfvf->nixlf || blkaddr < 0) 3746 return NIX_AF_ERR_AF_LF_INVALID; 3747 3748 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3749 if (!nix_hw) 3750 return -EINVAL; 3751 3752 /* Find existing matching LSO format, if any */ 3753 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 3754 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 3755 reg = rvu_read64(rvu, blkaddr, 3756 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 3757 if (req->fields[f] != (reg & req->field_mask)) 3758 break; 3759 } 3760 3761 if (f == NIX_LSO_FIELD_MAX) 3762 break; 3763 } 3764 3765 if (idx < nix_hw->lso.in_use) { 3766 /* Match found */ 3767 rsp->lso_format_idx = idx; 3768 return 0; 3769 } 3770 3771 if (nix_hw->lso.in_use == nix_hw->lso.total) 3772 return NIX_AF_ERR_LSO_CFG_FAIL; 3773 3774 rsp->lso_format_idx = nix_hw->lso.in_use++; 3775 3776 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 3777 rvu_write64(rvu, blkaddr, 3778 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 3779 req->fields[f]); 3780 3781 return 0; 3782 } 3783 3784 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 3785 { 3786 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 3787 3788 /* overwrite vf mac address with default_mac */ 3789 if (from_vf) 3790 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 3791 } 3792