1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 14 #include "rvu_struct.h" 15 #include "rvu_reg.h" 16 #include "rvu.h" 17 #include "npc.h" 18 #include "cgx.h" 19 20 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 21 int type, int chan_id); 22 23 enum mc_tbl_sz { 24 MC_TBL_SZ_256, 25 MC_TBL_SZ_512, 26 MC_TBL_SZ_1K, 27 MC_TBL_SZ_2K, 28 MC_TBL_SZ_4K, 29 MC_TBL_SZ_8K, 30 MC_TBL_SZ_16K, 31 MC_TBL_SZ_32K, 32 MC_TBL_SZ_64K, 33 }; 34 35 enum mc_buf_cnt { 36 MC_BUF_CNT_8, 37 MC_BUF_CNT_16, 38 MC_BUF_CNT_32, 39 MC_BUF_CNT_64, 40 MC_BUF_CNT_128, 41 MC_BUF_CNT_256, 42 MC_BUF_CNT_512, 43 MC_BUF_CNT_1024, 44 MC_BUF_CNT_2048, 45 }; 46 47 enum nix_makr_fmt_indexes { 48 NIX_MARK_CFG_IP_DSCP_RED, 49 NIX_MARK_CFG_IP_DSCP_YELLOW, 50 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 51 NIX_MARK_CFG_IP_ECN_RED, 52 NIX_MARK_CFG_IP_ECN_YELLOW, 53 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 54 NIX_MARK_CFG_VLAN_DEI_RED, 55 NIX_MARK_CFG_VLAN_DEI_YELLOW, 56 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 57 NIX_MARK_CFG_MAX, 58 }; 59 60 /* For now considering MC resources needed for broadcast 61 * pkt replication only. i.e 256 HWVFs + 12 PFs. 62 */ 63 #define MC_TBL_SIZE MC_TBL_SZ_512 64 #define MC_BUF_CNT MC_BUF_CNT_128 65 66 struct mce { 67 struct hlist_node node; 68 u16 pcifunc; 69 }; 70 71 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 72 { 73 int i = 0; 74 75 /*If blkaddr is 0, return the first nix block address*/ 76 if (blkaddr == 0) 77 return rvu->nix_blkaddr[blkaddr]; 78 79 while (i + 1 < MAX_NIX_BLKS) { 80 if (rvu->nix_blkaddr[i] == blkaddr) 81 return rvu->nix_blkaddr[i + 1]; 82 i++; 83 } 84 85 return 0; 86 } 87 88 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 89 { 90 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 91 int blkaddr; 92 93 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 94 if (!pfvf->nixlf || blkaddr < 0) 95 return false; 96 return true; 97 } 98 99 int rvu_get_nixlf_count(struct rvu *rvu) 100 { 101 int blkaddr = 0, max = 0; 102 struct rvu_block *block; 103 104 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 105 while (blkaddr) { 106 block = &rvu->hw->block[blkaddr]; 107 max += block->lf.max; 108 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 109 } 110 return max; 111 } 112 113 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 114 { 115 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 116 struct rvu_hwinfo *hw = rvu->hw; 117 int blkaddr; 118 119 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 120 if (!pfvf->nixlf || blkaddr < 0) 121 return NIX_AF_ERR_AF_LF_INVALID; 122 123 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 124 if (*nixlf < 0) 125 return NIX_AF_ERR_AF_LF_INVALID; 126 127 if (nix_blkaddr) 128 *nix_blkaddr = blkaddr; 129 130 return 0; 131 } 132 133 static void nix_mce_list_init(struct nix_mce_list *list, int max) 134 { 135 INIT_HLIST_HEAD(&list->head); 136 list->count = 0; 137 list->max = max; 138 } 139 140 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) 141 { 142 int idx; 143 144 if (!mcast) 145 return 0; 146 147 idx = mcast->next_free_mce; 148 mcast->next_free_mce += count; 149 return idx; 150 } 151 152 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 153 { 154 int nix_blkaddr = 0, i = 0; 155 struct rvu *rvu = hw->rvu; 156 157 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 158 while (nix_blkaddr) { 159 if (blkaddr == nix_blkaddr && hw->nix) 160 return &hw->nix[i]; 161 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 162 i++; 163 } 164 return NULL; 165 } 166 167 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 168 { 169 int err; 170 171 /*Sync all in flight RX packets to LLC/DRAM */ 172 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 173 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 174 if (err) 175 dev_err(rvu->dev, "NIX RX software sync failed\n"); 176 } 177 178 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 179 int lvl, u16 pcifunc, u16 schq) 180 { 181 struct rvu_hwinfo *hw = rvu->hw; 182 struct nix_txsch *txsch; 183 struct nix_hw *nix_hw; 184 u16 map_func; 185 186 nix_hw = get_nix_hw(rvu->hw, blkaddr); 187 if (!nix_hw) 188 return false; 189 190 txsch = &nix_hw->txsch[lvl]; 191 /* Check out of bounds */ 192 if (schq >= txsch->schq.max) 193 return false; 194 195 mutex_lock(&rvu->rsrc_lock); 196 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 197 mutex_unlock(&rvu->rsrc_lock); 198 199 /* TLs aggegating traffic are shared across PF and VFs */ 200 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 201 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 202 return false; 203 else 204 return true; 205 } 206 207 if (map_func != pcifunc) 208 return false; 209 210 return true; 211 } 212 213 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) 214 { 215 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 216 int pkind, pf, vf, lbkid; 217 u8 cgx_id, lmac_id; 218 int err; 219 220 pf = rvu_get_pf(pcifunc); 221 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 222 return 0; 223 224 switch (type) { 225 case NIX_INTF_TYPE_CGX: 226 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 227 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 228 229 pkind = rvu_npc_get_pkind(rvu, pf); 230 if (pkind < 0) { 231 dev_err(rvu->dev, 232 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 233 return -EINVAL; 234 } 235 pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0); 236 pfvf->tx_chan_base = pfvf->rx_chan_base; 237 pfvf->rx_chan_cnt = 1; 238 pfvf->tx_chan_cnt = 1; 239 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 240 rvu_npc_set_pkind(rvu, pkind, pfvf); 241 242 /* By default we enable pause frames */ 243 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) 244 cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), 245 lmac_id, true, true); 246 break; 247 case NIX_INTF_TYPE_LBK: 248 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 249 250 /* If NIX1 block is present on the silicon then NIXes are 251 * assigned alternatively for lbk interfaces. NIX0 should 252 * send packets on lbk link 1 channels and NIX1 should send 253 * on lbk link 0 channels for the communication between 254 * NIX0 and NIX1. 255 */ 256 lbkid = 0; 257 if (rvu->hw->lbk_links > 1) 258 lbkid = vf & 0x1 ? 0 : 1; 259 260 /* Note that AF's VFs work in pairs and talk over consecutive 261 * loopback channels.Therefore if odd number of AF VFs are 262 * enabled then the last VF remains with no pair. 263 */ 264 pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf); 265 pfvf->tx_chan_base = vf & 0x1 ? 266 NIX_CHAN_LBK_CHX(lbkid, vf - 1) : 267 NIX_CHAN_LBK_CHX(lbkid, vf + 1); 268 pfvf->rx_chan_cnt = 1; 269 pfvf->tx_chan_cnt = 1; 270 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 271 pfvf->rx_chan_base, false); 272 break; 273 } 274 275 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 276 * RVU PF/VF's MAC address. 277 */ 278 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 279 pfvf->rx_chan_base, pfvf->mac_addr); 280 281 /* Add this PF_FUNC to bcast pkt replication list */ 282 err = nix_update_bcast_mce_list(rvu, pcifunc, true); 283 if (err) { 284 dev_err(rvu->dev, 285 "Bcast list, failed to enable PF_FUNC 0x%x\n", 286 pcifunc); 287 return err; 288 } 289 290 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 291 nixlf, pfvf->rx_chan_base); 292 pfvf->maxlen = NIC_HW_MIN_FRS; 293 pfvf->minlen = NIC_HW_MIN_FRS; 294 295 return 0; 296 } 297 298 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 299 { 300 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 301 int err; 302 303 pfvf->maxlen = 0; 304 pfvf->minlen = 0; 305 pfvf->rxvlan = false; 306 307 /* Remove this PF_FUNC from bcast pkt replication list */ 308 err = nix_update_bcast_mce_list(rvu, pcifunc, false); 309 if (err) { 310 dev_err(rvu->dev, 311 "Bcast list, failed to disable PF_FUNC 0x%x\n", 312 pcifunc); 313 } 314 315 /* Free and disable any MCAM entries used by this NIX LF */ 316 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 317 } 318 319 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 320 struct nix_bp_cfg_req *req, 321 struct msg_rsp *rsp) 322 { 323 u16 pcifunc = req->hdr.pcifunc; 324 struct rvu_pfvf *pfvf; 325 int blkaddr, pf, type; 326 u16 chan_base, chan; 327 u64 cfg; 328 329 pf = rvu_get_pf(pcifunc); 330 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 331 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 332 return 0; 333 334 pfvf = rvu_get_pfvf(rvu, pcifunc); 335 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 336 337 chan_base = pfvf->rx_chan_base + req->chan_base; 338 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 339 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 340 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 341 cfg & ~BIT_ULL(16)); 342 } 343 return 0; 344 } 345 346 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 347 int type, int chan_id) 348 { 349 int bpid, blkaddr, lmac_chan_cnt; 350 struct rvu_hwinfo *hw = rvu->hw; 351 u16 cgx_bpid_cnt, lbk_bpid_cnt; 352 struct rvu_pfvf *pfvf; 353 u8 cgx_id, lmac_id; 354 u64 cfg; 355 356 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 357 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 358 lmac_chan_cnt = cfg & 0xFF; 359 360 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; 361 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); 362 363 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 364 365 /* Backpressure IDs range division 366 * CGX channles are mapped to (0 - 191) BPIDs 367 * LBK channles are mapped to (192 - 255) BPIDs 368 * SDP channles are mapped to (256 - 511) BPIDs 369 * 370 * Lmac channles and bpids mapped as follows 371 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 372 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 373 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 374 */ 375 switch (type) { 376 case NIX_INTF_TYPE_CGX: 377 if ((req->chan_base + req->chan_cnt) > 15) 378 return -EINVAL; 379 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 380 /* Assign bpid based on cgx, lmac and chan id */ 381 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + 382 (lmac_id * lmac_chan_cnt) + req->chan_base; 383 384 if (req->bpid_per_chan) 385 bpid += chan_id; 386 if (bpid > cgx_bpid_cnt) 387 return -EINVAL; 388 break; 389 390 case NIX_INTF_TYPE_LBK: 391 if ((req->chan_base + req->chan_cnt) > 63) 392 return -EINVAL; 393 bpid = cgx_bpid_cnt + req->chan_base; 394 if (req->bpid_per_chan) 395 bpid += chan_id; 396 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) 397 return -EINVAL; 398 break; 399 default: 400 return -EINVAL; 401 } 402 return bpid; 403 } 404 405 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 406 struct nix_bp_cfg_req *req, 407 struct nix_bp_cfg_rsp *rsp) 408 { 409 int blkaddr, pf, type, chan_id = 0; 410 u16 pcifunc = req->hdr.pcifunc; 411 struct rvu_pfvf *pfvf; 412 u16 chan_base, chan; 413 s16 bpid, bpid_base; 414 u64 cfg; 415 416 pf = rvu_get_pf(pcifunc); 417 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 418 419 /* Enable backpressure only for CGX mapped PFs and LBK interface */ 420 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 421 return 0; 422 423 pfvf = rvu_get_pfvf(rvu, pcifunc); 424 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 425 426 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 427 chan_base = pfvf->rx_chan_base + req->chan_base; 428 bpid = bpid_base; 429 430 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 431 if (bpid < 0) { 432 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 433 return -EINVAL; 434 } 435 436 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 437 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 438 cfg | (bpid & 0xFF) | BIT_ULL(16)); 439 chan_id++; 440 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 441 } 442 443 for (chan = 0; chan < req->chan_cnt; chan++) { 444 /* Map channel and bpid assign to it */ 445 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 446 (bpid_base & 0x3FF); 447 if (req->bpid_per_chan) 448 bpid_base++; 449 } 450 rsp->chan_cnt = req->chan_cnt; 451 452 return 0; 453 } 454 455 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 456 u64 format, bool v4, u64 *fidx) 457 { 458 struct nix_lso_format field = {0}; 459 460 /* IP's Length field */ 461 field.layer = NIX_TXLAYER_OL3; 462 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 463 field.offset = v4 ? 2 : 4; 464 field.sizem1 = 1; /* i.e 2 bytes */ 465 field.alg = NIX_LSOALG_ADD_PAYLEN; 466 rvu_write64(rvu, blkaddr, 467 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 468 *(u64 *)&field); 469 470 /* No ID field in IPv6 header */ 471 if (!v4) 472 return; 473 474 /* IP's ID field */ 475 field.layer = NIX_TXLAYER_OL3; 476 field.offset = 4; 477 field.sizem1 = 1; /* i.e 2 bytes */ 478 field.alg = NIX_LSOALG_ADD_SEGNUM; 479 rvu_write64(rvu, blkaddr, 480 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 481 *(u64 *)&field); 482 } 483 484 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 485 u64 format, u64 *fidx) 486 { 487 struct nix_lso_format field = {0}; 488 489 /* TCP's sequence number field */ 490 field.layer = NIX_TXLAYER_OL4; 491 field.offset = 4; 492 field.sizem1 = 3; /* i.e 4 bytes */ 493 field.alg = NIX_LSOALG_ADD_OFFSET; 494 rvu_write64(rvu, blkaddr, 495 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 496 *(u64 *)&field); 497 498 /* TCP's flags field */ 499 field.layer = NIX_TXLAYER_OL4; 500 field.offset = 12; 501 field.sizem1 = 1; /* 2 bytes */ 502 field.alg = NIX_LSOALG_TCP_FLAGS; 503 rvu_write64(rvu, blkaddr, 504 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 505 *(u64 *)&field); 506 } 507 508 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 509 { 510 u64 cfg, idx, fidx = 0; 511 512 /* Get max HW supported format indices */ 513 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 514 nix_hw->lso.total = cfg; 515 516 /* Enable LSO */ 517 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 518 /* For TSO, set first and middle segment flags to 519 * mask out PSH, RST & FIN flags in TCP packet 520 */ 521 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 522 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 523 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 524 525 /* Setup default static LSO formats 526 * 527 * Configure format fields for TCPv4 segmentation offload 528 */ 529 idx = NIX_LSO_FORMAT_IDX_TSOV4; 530 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 531 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 532 533 /* Set rest of the fields to NOP */ 534 for (; fidx < 8; fidx++) { 535 rvu_write64(rvu, blkaddr, 536 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 537 } 538 nix_hw->lso.in_use++; 539 540 /* Configure format fields for TCPv6 segmentation offload */ 541 idx = NIX_LSO_FORMAT_IDX_TSOV6; 542 fidx = 0; 543 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 544 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 545 546 /* Set rest of the fields to NOP */ 547 for (; fidx < 8; fidx++) { 548 rvu_write64(rvu, blkaddr, 549 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 550 } 551 nix_hw->lso.in_use++; 552 } 553 554 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 555 { 556 kfree(pfvf->rq_bmap); 557 kfree(pfvf->sq_bmap); 558 kfree(pfvf->cq_bmap); 559 if (pfvf->rq_ctx) 560 qmem_free(rvu->dev, pfvf->rq_ctx); 561 if (pfvf->sq_ctx) 562 qmem_free(rvu->dev, pfvf->sq_ctx); 563 if (pfvf->cq_ctx) 564 qmem_free(rvu->dev, pfvf->cq_ctx); 565 if (pfvf->rss_ctx) 566 qmem_free(rvu->dev, pfvf->rss_ctx); 567 if (pfvf->nix_qints_ctx) 568 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 569 if (pfvf->cq_ints_ctx) 570 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 571 572 pfvf->rq_bmap = NULL; 573 pfvf->cq_bmap = NULL; 574 pfvf->sq_bmap = NULL; 575 pfvf->rq_ctx = NULL; 576 pfvf->sq_ctx = NULL; 577 pfvf->cq_ctx = NULL; 578 pfvf->rss_ctx = NULL; 579 pfvf->nix_qints_ctx = NULL; 580 pfvf->cq_ints_ctx = NULL; 581 } 582 583 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 584 struct rvu_pfvf *pfvf, int nixlf, 585 int rss_sz, int rss_grps, int hwctx_size, 586 u64 way_mask) 587 { 588 int err, grp, num_indices; 589 590 /* RSS is not requested for this NIXLF */ 591 if (!rss_sz) 592 return 0; 593 num_indices = rss_sz * rss_grps; 594 595 /* Alloc NIX RSS HW context memory and config the base */ 596 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 597 if (err) 598 return err; 599 600 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 601 (u64)pfvf->rss_ctx->iova); 602 603 /* Config full RSS table size, enable RSS and caching */ 604 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), 605 BIT_ULL(36) | BIT_ULL(4) | 606 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) | 607 way_mask << 20); 608 /* Config RSS group offset and sizes */ 609 for (grp = 0; grp < rss_grps; grp++) 610 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 611 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 612 return 0; 613 } 614 615 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 616 struct nix_aq_inst_s *inst) 617 { 618 struct admin_queue *aq = block->aq; 619 struct nix_aq_res_s *result; 620 int timeout = 1000; 621 u64 reg, head; 622 623 result = (struct nix_aq_res_s *)aq->res->base; 624 625 /* Get current head pointer where to append this instruction */ 626 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 627 head = (reg >> 4) & AQ_PTR_MASK; 628 629 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 630 (void *)inst, aq->inst->entry_sz); 631 memset(result, 0, sizeof(*result)); 632 /* sync into memory */ 633 wmb(); 634 635 /* Ring the doorbell and wait for result */ 636 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 637 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 638 cpu_relax(); 639 udelay(1); 640 timeout--; 641 if (!timeout) 642 return -EBUSY; 643 } 644 645 if (result->compcode != NIX_AQ_COMP_GOOD) 646 /* TODO: Replace this with some error code */ 647 return -EBUSY; 648 649 return 0; 650 } 651 652 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 653 struct nix_aq_enq_req *req, 654 struct nix_aq_enq_rsp *rsp) 655 { 656 struct rvu_hwinfo *hw = rvu->hw; 657 u16 pcifunc = req->hdr.pcifunc; 658 int nixlf, blkaddr, rc = 0; 659 struct nix_aq_inst_s inst; 660 struct rvu_block *block; 661 struct admin_queue *aq; 662 struct rvu_pfvf *pfvf; 663 void *ctx, *mask; 664 bool ena; 665 u64 cfg; 666 667 blkaddr = nix_hw->blkaddr; 668 block = &hw->block[blkaddr]; 669 aq = block->aq; 670 if (!aq) { 671 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 672 return NIX_AF_ERR_AQ_ENQUEUE; 673 } 674 675 pfvf = rvu_get_pfvf(rvu, pcifunc); 676 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 677 678 /* Skip NIXLF check for broadcast MCE entry init */ 679 if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) { 680 if (!pfvf->nixlf || nixlf < 0) 681 return NIX_AF_ERR_AF_LF_INVALID; 682 } 683 684 switch (req->ctype) { 685 case NIX_AQ_CTYPE_RQ: 686 /* Check if index exceeds max no of queues */ 687 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 688 rc = NIX_AF_ERR_AQ_ENQUEUE; 689 break; 690 case NIX_AQ_CTYPE_SQ: 691 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 692 rc = NIX_AF_ERR_AQ_ENQUEUE; 693 break; 694 case NIX_AQ_CTYPE_CQ: 695 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 696 rc = NIX_AF_ERR_AQ_ENQUEUE; 697 break; 698 case NIX_AQ_CTYPE_RSS: 699 /* Check if RSS is enabled and qidx is within range */ 700 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 701 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 702 (req->qidx >= (256UL << (cfg & 0xF)))) 703 rc = NIX_AF_ERR_AQ_ENQUEUE; 704 break; 705 case NIX_AQ_CTYPE_MCE: 706 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 707 708 /* Check if index exceeds MCE list length */ 709 if (!nix_hw->mcast.mce_ctx || 710 (req->qidx >= (256UL << (cfg & 0xF)))) 711 rc = NIX_AF_ERR_AQ_ENQUEUE; 712 713 /* Adding multicast lists for requests from PF/VFs is not 714 * yet supported, so ignore this. 715 */ 716 if (rsp) 717 rc = NIX_AF_ERR_AQ_ENQUEUE; 718 break; 719 default: 720 rc = NIX_AF_ERR_AQ_ENQUEUE; 721 } 722 723 if (rc) 724 return rc; 725 726 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 727 if (req->ctype == NIX_AQ_CTYPE_SQ && 728 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 729 (req->op == NIX_AQ_INSTOP_WRITE && 730 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { 731 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 732 pcifunc, req->sq.smq)) 733 return NIX_AF_ERR_AQ_ENQUEUE; 734 } 735 736 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 737 inst.lf = nixlf; 738 inst.cindex = req->qidx; 739 inst.ctype = req->ctype; 740 inst.op = req->op; 741 /* Currently we are not supporting enqueuing multiple instructions, 742 * so always choose first entry in result memory. 743 */ 744 inst.res_addr = (u64)aq->res->iova; 745 746 /* Hardware uses same aq->res->base for updating result of 747 * previous instruction hence wait here till it is done. 748 */ 749 spin_lock(&aq->lock); 750 751 /* Clean result + context memory */ 752 memset(aq->res->base, 0, aq->res->entry_sz); 753 /* Context needs to be written at RES_ADDR + 128 */ 754 ctx = aq->res->base + 128; 755 /* Mask needs to be written at RES_ADDR + 256 */ 756 mask = aq->res->base + 256; 757 758 switch (req->op) { 759 case NIX_AQ_INSTOP_WRITE: 760 if (req->ctype == NIX_AQ_CTYPE_RQ) 761 memcpy(mask, &req->rq_mask, 762 sizeof(struct nix_rq_ctx_s)); 763 else if (req->ctype == NIX_AQ_CTYPE_SQ) 764 memcpy(mask, &req->sq_mask, 765 sizeof(struct nix_sq_ctx_s)); 766 else if (req->ctype == NIX_AQ_CTYPE_CQ) 767 memcpy(mask, &req->cq_mask, 768 sizeof(struct nix_cq_ctx_s)); 769 else if (req->ctype == NIX_AQ_CTYPE_RSS) 770 memcpy(mask, &req->rss_mask, 771 sizeof(struct nix_rsse_s)); 772 else if (req->ctype == NIX_AQ_CTYPE_MCE) 773 memcpy(mask, &req->mce_mask, 774 sizeof(struct nix_rx_mce_s)); 775 fallthrough; 776 case NIX_AQ_INSTOP_INIT: 777 if (req->ctype == NIX_AQ_CTYPE_RQ) 778 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 779 else if (req->ctype == NIX_AQ_CTYPE_SQ) 780 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 781 else if (req->ctype == NIX_AQ_CTYPE_CQ) 782 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 783 else if (req->ctype == NIX_AQ_CTYPE_RSS) 784 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 785 else if (req->ctype == NIX_AQ_CTYPE_MCE) 786 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 787 break; 788 case NIX_AQ_INSTOP_NOP: 789 case NIX_AQ_INSTOP_READ: 790 case NIX_AQ_INSTOP_LOCK: 791 case NIX_AQ_INSTOP_UNLOCK: 792 break; 793 default: 794 rc = NIX_AF_ERR_AQ_ENQUEUE; 795 spin_unlock(&aq->lock); 796 return rc; 797 } 798 799 /* Submit the instruction to AQ */ 800 rc = nix_aq_enqueue_wait(rvu, block, &inst); 801 if (rc) { 802 spin_unlock(&aq->lock); 803 return rc; 804 } 805 806 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 807 if (req->op == NIX_AQ_INSTOP_INIT) { 808 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 809 __set_bit(req->qidx, pfvf->rq_bmap); 810 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 811 __set_bit(req->qidx, pfvf->sq_bmap); 812 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 813 __set_bit(req->qidx, pfvf->cq_bmap); 814 } 815 816 if (req->op == NIX_AQ_INSTOP_WRITE) { 817 if (req->ctype == NIX_AQ_CTYPE_RQ) { 818 ena = (req->rq.ena & req->rq_mask.ena) | 819 (test_bit(req->qidx, pfvf->rq_bmap) & 820 ~req->rq_mask.ena); 821 if (ena) 822 __set_bit(req->qidx, pfvf->rq_bmap); 823 else 824 __clear_bit(req->qidx, pfvf->rq_bmap); 825 } 826 if (req->ctype == NIX_AQ_CTYPE_SQ) { 827 ena = (req->rq.ena & req->sq_mask.ena) | 828 (test_bit(req->qidx, pfvf->sq_bmap) & 829 ~req->sq_mask.ena); 830 if (ena) 831 __set_bit(req->qidx, pfvf->sq_bmap); 832 else 833 __clear_bit(req->qidx, pfvf->sq_bmap); 834 } 835 if (req->ctype == NIX_AQ_CTYPE_CQ) { 836 ena = (req->rq.ena & req->cq_mask.ena) | 837 (test_bit(req->qidx, pfvf->cq_bmap) & 838 ~req->cq_mask.ena); 839 if (ena) 840 __set_bit(req->qidx, pfvf->cq_bmap); 841 else 842 __clear_bit(req->qidx, pfvf->cq_bmap); 843 } 844 } 845 846 if (rsp) { 847 /* Copy read context into mailbox */ 848 if (req->op == NIX_AQ_INSTOP_READ) { 849 if (req->ctype == NIX_AQ_CTYPE_RQ) 850 memcpy(&rsp->rq, ctx, 851 sizeof(struct nix_rq_ctx_s)); 852 else if (req->ctype == NIX_AQ_CTYPE_SQ) 853 memcpy(&rsp->sq, ctx, 854 sizeof(struct nix_sq_ctx_s)); 855 else if (req->ctype == NIX_AQ_CTYPE_CQ) 856 memcpy(&rsp->cq, ctx, 857 sizeof(struct nix_cq_ctx_s)); 858 else if (req->ctype == NIX_AQ_CTYPE_RSS) 859 memcpy(&rsp->rss, ctx, 860 sizeof(struct nix_rsse_s)); 861 else if (req->ctype == NIX_AQ_CTYPE_MCE) 862 memcpy(&rsp->mce, ctx, 863 sizeof(struct nix_rx_mce_s)); 864 } 865 } 866 867 spin_unlock(&aq->lock); 868 return 0; 869 } 870 871 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 872 struct nix_aq_enq_rsp *rsp) 873 { 874 struct nix_hw *nix_hw; 875 int blkaddr; 876 877 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 878 if (blkaddr < 0) 879 return NIX_AF_ERR_AF_LF_INVALID; 880 881 nix_hw = get_nix_hw(rvu->hw, blkaddr); 882 if (!nix_hw) 883 return -EINVAL; 884 885 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 886 } 887 888 static const char *nix_get_ctx_name(int ctype) 889 { 890 switch (ctype) { 891 case NIX_AQ_CTYPE_CQ: 892 return "CQ"; 893 case NIX_AQ_CTYPE_SQ: 894 return "SQ"; 895 case NIX_AQ_CTYPE_RQ: 896 return "RQ"; 897 case NIX_AQ_CTYPE_RSS: 898 return "RSS"; 899 } 900 return ""; 901 } 902 903 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 904 { 905 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 906 struct nix_aq_enq_req aq_req; 907 unsigned long *bmap; 908 int qidx, q_cnt = 0; 909 int err = 0, rc; 910 911 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 912 return NIX_AF_ERR_AQ_ENQUEUE; 913 914 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 915 aq_req.hdr.pcifunc = req->hdr.pcifunc; 916 917 if (req->ctype == NIX_AQ_CTYPE_CQ) { 918 aq_req.cq.ena = 0; 919 aq_req.cq_mask.ena = 1; 920 aq_req.cq.bp_ena = 0; 921 aq_req.cq_mask.bp_ena = 1; 922 q_cnt = pfvf->cq_ctx->qsize; 923 bmap = pfvf->cq_bmap; 924 } 925 if (req->ctype == NIX_AQ_CTYPE_SQ) { 926 aq_req.sq.ena = 0; 927 aq_req.sq_mask.ena = 1; 928 q_cnt = pfvf->sq_ctx->qsize; 929 bmap = pfvf->sq_bmap; 930 } 931 if (req->ctype == NIX_AQ_CTYPE_RQ) { 932 aq_req.rq.ena = 0; 933 aq_req.rq_mask.ena = 1; 934 q_cnt = pfvf->rq_ctx->qsize; 935 bmap = pfvf->rq_bmap; 936 } 937 938 aq_req.ctype = req->ctype; 939 aq_req.op = NIX_AQ_INSTOP_WRITE; 940 941 for (qidx = 0; qidx < q_cnt; qidx++) { 942 if (!test_bit(qidx, bmap)) 943 continue; 944 aq_req.qidx = qidx; 945 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 946 if (rc) { 947 err = rc; 948 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 949 nix_get_ctx_name(req->ctype), qidx); 950 } 951 } 952 953 return err; 954 } 955 956 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 957 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 958 { 959 struct nix_aq_enq_req lock_ctx_req; 960 int err; 961 962 if (req->op != NIX_AQ_INSTOP_INIT) 963 return 0; 964 965 if (req->ctype == NIX_AQ_CTYPE_MCE || 966 req->ctype == NIX_AQ_CTYPE_DYNO) 967 return 0; 968 969 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 970 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 971 lock_ctx_req.ctype = req->ctype; 972 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 973 lock_ctx_req.qidx = req->qidx; 974 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 975 if (err) 976 dev_err(rvu->dev, 977 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 978 req->hdr.pcifunc, 979 nix_get_ctx_name(req->ctype), req->qidx); 980 return err; 981 } 982 983 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 984 struct nix_aq_enq_req *req, 985 struct nix_aq_enq_rsp *rsp) 986 { 987 int err; 988 989 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 990 if (!err) 991 err = nix_lf_hwctx_lockdown(rvu, req); 992 return err; 993 } 994 #else 995 996 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 997 struct nix_aq_enq_req *req, 998 struct nix_aq_enq_rsp *rsp) 999 { 1000 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1001 } 1002 #endif 1003 1004 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1005 struct hwctx_disable_req *req, 1006 struct msg_rsp *rsp) 1007 { 1008 return nix_lf_hwctx_disable(rvu, req); 1009 } 1010 1011 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1012 struct nix_lf_alloc_req *req, 1013 struct nix_lf_alloc_rsp *rsp) 1014 { 1015 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1016 struct rvu_hwinfo *hw = rvu->hw; 1017 u16 pcifunc = req->hdr.pcifunc; 1018 struct rvu_block *block; 1019 struct rvu_pfvf *pfvf; 1020 u64 cfg, ctx_cfg; 1021 int blkaddr; 1022 1023 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1024 return NIX_AF_ERR_PARAM; 1025 1026 if (req->way_mask) 1027 req->way_mask &= 0xFFFF; 1028 1029 pfvf = rvu_get_pfvf(rvu, pcifunc); 1030 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1031 if (!pfvf->nixlf || blkaddr < 0) 1032 return NIX_AF_ERR_AF_LF_INVALID; 1033 1034 block = &hw->block[blkaddr]; 1035 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1036 if (nixlf < 0) 1037 return NIX_AF_ERR_AF_LF_INVALID; 1038 1039 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1040 if (req->npa_func) { 1041 /* If default, use 'this' NIXLF's PFFUNC */ 1042 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1043 req->npa_func = pcifunc; 1044 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1045 return NIX_AF_INVAL_NPA_PF_FUNC; 1046 } 1047 1048 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1049 if (req->sso_func) { 1050 /* If default, use 'this' NIXLF's PFFUNC */ 1051 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1052 req->sso_func = pcifunc; 1053 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1054 return NIX_AF_INVAL_SSO_PF_FUNC; 1055 } 1056 1057 /* If RSS is being enabled, check if requested config is valid. 1058 * RSS table size should be power of two, otherwise 1059 * RSS_GRP::OFFSET + adder might go beyond that group or 1060 * won't be able to use entire table. 1061 */ 1062 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1063 !is_power_of_2(req->rss_sz))) 1064 return NIX_AF_ERR_RSS_SIZE_INVALID; 1065 1066 if (req->rss_sz && 1067 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1068 return NIX_AF_ERR_RSS_GRPS_INVALID; 1069 1070 /* Reset this NIX LF */ 1071 err = rvu_lf_reset(rvu, block, nixlf); 1072 if (err) { 1073 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1074 block->addr - BLKADDR_NIX0, nixlf); 1075 return NIX_AF_ERR_LF_RESET; 1076 } 1077 1078 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1079 1080 /* Alloc NIX RQ HW context memory and config the base */ 1081 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1082 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1083 if (err) 1084 goto free_mem; 1085 1086 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1087 if (!pfvf->rq_bmap) 1088 goto free_mem; 1089 1090 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1091 (u64)pfvf->rq_ctx->iova); 1092 1093 /* Set caching and queue count in HW */ 1094 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1095 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1096 1097 /* Alloc NIX SQ HW context memory and config the base */ 1098 hwctx_size = 1UL << (ctx_cfg & 0xF); 1099 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1100 if (err) 1101 goto free_mem; 1102 1103 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1104 if (!pfvf->sq_bmap) 1105 goto free_mem; 1106 1107 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1108 (u64)pfvf->sq_ctx->iova); 1109 1110 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1111 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1112 1113 /* Alloc NIX CQ HW context memory and config the base */ 1114 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1115 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1116 if (err) 1117 goto free_mem; 1118 1119 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1120 if (!pfvf->cq_bmap) 1121 goto free_mem; 1122 1123 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1124 (u64)pfvf->cq_ctx->iova); 1125 1126 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1127 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1128 1129 /* Initialize receive side scaling (RSS) */ 1130 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1131 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1132 req->rss_grps, hwctx_size, req->way_mask); 1133 if (err) 1134 goto free_mem; 1135 1136 /* Alloc memory for CQINT's HW contexts */ 1137 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1138 qints = (cfg >> 24) & 0xFFF; 1139 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1140 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1141 if (err) 1142 goto free_mem; 1143 1144 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1145 (u64)pfvf->cq_ints_ctx->iova); 1146 1147 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1148 BIT_ULL(36) | req->way_mask << 20); 1149 1150 /* Alloc memory for QINT's HW contexts */ 1151 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1152 qints = (cfg >> 12) & 0xFFF; 1153 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1154 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1155 if (err) 1156 goto free_mem; 1157 1158 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1159 (u64)pfvf->nix_qints_ctx->iova); 1160 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1161 BIT_ULL(36) | req->way_mask << 20); 1162 1163 /* Setup VLANX TPID's. 1164 * Use VLAN1 for 802.1Q 1165 * and VLAN0 for 802.1AD. 1166 */ 1167 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1168 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1169 1170 /* Enable LMTST for this NIX LF */ 1171 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1172 1173 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1174 if (req->npa_func) 1175 cfg = req->npa_func; 1176 if (req->sso_func) 1177 cfg |= (u64)req->sso_func << 16; 1178 1179 cfg |= (u64)req->xqe_sz << 33; 1180 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1181 1182 /* Config Rx pkt length, csum checks and apad enable / disable */ 1183 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1184 1185 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1186 err = nix_interface_init(rvu, pcifunc, intf, nixlf); 1187 if (err) 1188 goto free_mem; 1189 1190 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1191 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1192 1193 goto exit; 1194 1195 free_mem: 1196 nix_ctx_free(rvu, pfvf); 1197 rc = -ENOMEM; 1198 1199 exit: 1200 /* Set macaddr of this PF/VF */ 1201 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1202 1203 /* set SQB size info */ 1204 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1205 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1206 rsp->rx_chan_base = pfvf->rx_chan_base; 1207 rsp->tx_chan_base = pfvf->tx_chan_base; 1208 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1209 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1210 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1211 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1212 /* Get HW supported stat count */ 1213 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1214 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1215 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1216 /* Get count of CQ IRQs and error IRQs supported per LF */ 1217 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1218 rsp->qints = ((cfg >> 12) & 0xFFF); 1219 rsp->cints = ((cfg >> 24) & 0xFFF); 1220 rsp->cgx_links = hw->cgx_links; 1221 rsp->lbk_links = hw->lbk_links; 1222 rsp->sdp_links = hw->sdp_links; 1223 1224 return rc; 1225 } 1226 1227 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, 1228 struct msg_rsp *rsp) 1229 { 1230 struct rvu_hwinfo *hw = rvu->hw; 1231 u16 pcifunc = req->hdr.pcifunc; 1232 struct rvu_block *block; 1233 int blkaddr, nixlf, err; 1234 struct rvu_pfvf *pfvf; 1235 1236 pfvf = rvu_get_pfvf(rvu, pcifunc); 1237 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1238 if (!pfvf->nixlf || blkaddr < 0) 1239 return NIX_AF_ERR_AF_LF_INVALID; 1240 1241 block = &hw->block[blkaddr]; 1242 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1243 if (nixlf < 0) 1244 return NIX_AF_ERR_AF_LF_INVALID; 1245 1246 nix_interface_deinit(rvu, pcifunc, nixlf); 1247 1248 /* Reset this NIX LF */ 1249 err = rvu_lf_reset(rvu, block, nixlf); 1250 if (err) { 1251 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1252 block->addr - BLKADDR_NIX0, nixlf); 1253 return NIX_AF_ERR_LF_RESET; 1254 } 1255 1256 nix_ctx_free(rvu, pfvf); 1257 1258 return 0; 1259 } 1260 1261 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1262 struct nix_mark_format_cfg *req, 1263 struct nix_mark_format_cfg_rsp *rsp) 1264 { 1265 u16 pcifunc = req->hdr.pcifunc; 1266 struct nix_hw *nix_hw; 1267 struct rvu_pfvf *pfvf; 1268 int blkaddr, rc; 1269 u32 cfg; 1270 1271 pfvf = rvu_get_pfvf(rvu, pcifunc); 1272 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1273 if (!pfvf->nixlf || blkaddr < 0) 1274 return NIX_AF_ERR_AF_LF_INVALID; 1275 1276 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1277 if (!nix_hw) 1278 return -EINVAL; 1279 1280 cfg = (((u32)req->offset & 0x7) << 16) | 1281 (((u32)req->y_mask & 0xF) << 12) | 1282 (((u32)req->y_val & 0xF) << 8) | 1283 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1284 1285 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1286 if (rc < 0) { 1287 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1288 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1289 return NIX_AF_ERR_MARK_CFG_FAIL; 1290 } 1291 1292 rsp->mark_format_idx = rc; 1293 return 0; 1294 } 1295 1296 /* Disable shaping of pkts by a scheduler queue 1297 * at a given scheduler level. 1298 */ 1299 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1300 int lvl, int schq) 1301 { 1302 u64 cir_reg = 0, pir_reg = 0; 1303 u64 cfg; 1304 1305 switch (lvl) { 1306 case NIX_TXSCH_LVL_TL1: 1307 cir_reg = NIX_AF_TL1X_CIR(schq); 1308 pir_reg = 0; /* PIR not available at TL1 */ 1309 break; 1310 case NIX_TXSCH_LVL_TL2: 1311 cir_reg = NIX_AF_TL2X_CIR(schq); 1312 pir_reg = NIX_AF_TL2X_PIR(schq); 1313 break; 1314 case NIX_TXSCH_LVL_TL3: 1315 cir_reg = NIX_AF_TL3X_CIR(schq); 1316 pir_reg = NIX_AF_TL3X_PIR(schq); 1317 break; 1318 case NIX_TXSCH_LVL_TL4: 1319 cir_reg = NIX_AF_TL4X_CIR(schq); 1320 pir_reg = NIX_AF_TL4X_PIR(schq); 1321 break; 1322 } 1323 1324 if (!cir_reg) 1325 return; 1326 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1327 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1328 1329 if (!pir_reg) 1330 return; 1331 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1332 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1333 } 1334 1335 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1336 int lvl, int schq) 1337 { 1338 struct rvu_hwinfo *hw = rvu->hw; 1339 int link; 1340 1341 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1342 return; 1343 1344 /* Reset TL4's SDP link config */ 1345 if (lvl == NIX_TXSCH_LVL_TL4) 1346 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1347 1348 if (lvl != NIX_TXSCH_LVL_TL2) 1349 return; 1350 1351 /* Reset TL2's CGX or LBK link config */ 1352 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1353 rvu_write64(rvu, blkaddr, 1354 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1355 } 1356 1357 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1358 { 1359 struct rvu_hwinfo *hw = rvu->hw; 1360 int pf = rvu_get_pf(pcifunc); 1361 u8 cgx_id = 0, lmac_id = 0; 1362 1363 if (is_afvf(pcifunc)) {/* LBK links */ 1364 return hw->cgx_links; 1365 } else if (is_pf_cgxmapped(rvu, pf)) { 1366 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1367 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1368 } 1369 1370 /* SDP link */ 1371 return hw->cgx_links + hw->lbk_links; 1372 } 1373 1374 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 1375 int link, int *start, int *end) 1376 { 1377 struct rvu_hwinfo *hw = rvu->hw; 1378 int pf = rvu_get_pf(pcifunc); 1379 1380 if (is_afvf(pcifunc)) { /* LBK links */ 1381 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1382 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 1383 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 1384 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1385 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 1386 } else { /* SDP link */ 1387 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 1388 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 1389 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 1390 } 1391 } 1392 1393 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 1394 struct nix_hw *nix_hw, 1395 struct nix_txsch_alloc_req *req) 1396 { 1397 struct rvu_hwinfo *hw = rvu->hw; 1398 int schq, req_schq, free_cnt; 1399 struct nix_txsch *txsch; 1400 int link, start, end; 1401 1402 txsch = &nix_hw->txsch[lvl]; 1403 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 1404 1405 if (!req_schq) 1406 return 0; 1407 1408 link = nix_get_tx_link(rvu, pcifunc); 1409 1410 /* For traffic aggregating scheduler level, one queue is enough */ 1411 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1412 if (req_schq != 1) 1413 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1414 return 0; 1415 } 1416 1417 /* Get free SCHQ count and check if request can be accomodated */ 1418 if (hw->cap.nix_fixed_txschq_mapping) { 1419 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1420 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 1421 if (end <= txsch->schq.max && schq < end && 1422 !test_bit(schq, txsch->schq.bmap)) 1423 free_cnt = 1; 1424 else 1425 free_cnt = 0; 1426 } else { 1427 free_cnt = rvu_rsrc_free_count(&txsch->schq); 1428 } 1429 1430 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) 1431 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1432 1433 /* If contiguous queues are needed, check for availability */ 1434 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 1435 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 1436 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1437 1438 return 0; 1439 } 1440 1441 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 1442 struct nix_txsch_alloc_rsp *rsp, 1443 int lvl, int start, int end) 1444 { 1445 struct rvu_hwinfo *hw = rvu->hw; 1446 u16 pcifunc = rsp->hdr.pcifunc; 1447 int idx, schq; 1448 1449 /* For traffic aggregating levels, queue alloc is based 1450 * on transmit link to which PF_FUNC is mapped to. 1451 */ 1452 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1453 /* A single TL queue is allocated */ 1454 if (rsp->schq_contig[lvl]) { 1455 rsp->schq_contig[lvl] = 1; 1456 rsp->schq_contig_list[lvl][0] = start; 1457 } 1458 1459 /* Both contig and non-contig reqs doesn't make sense here */ 1460 if (rsp->schq_contig[lvl]) 1461 rsp->schq[lvl] = 0; 1462 1463 if (rsp->schq[lvl]) { 1464 rsp->schq[lvl] = 1; 1465 rsp->schq_list[lvl][0] = start; 1466 } 1467 return; 1468 } 1469 1470 /* Adjust the queue request count if HW supports 1471 * only one queue per level configuration. 1472 */ 1473 if (hw->cap.nix_fixed_txschq_mapping) { 1474 idx = pcifunc & RVU_PFVF_FUNC_MASK; 1475 schq = start + idx; 1476 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 1477 rsp->schq_contig[lvl] = 0; 1478 rsp->schq[lvl] = 0; 1479 return; 1480 } 1481 1482 if (rsp->schq_contig[lvl]) { 1483 rsp->schq_contig[lvl] = 1; 1484 set_bit(schq, txsch->schq.bmap); 1485 rsp->schq_contig_list[lvl][0] = schq; 1486 rsp->schq[lvl] = 0; 1487 } else if (rsp->schq[lvl]) { 1488 rsp->schq[lvl] = 1; 1489 set_bit(schq, txsch->schq.bmap); 1490 rsp->schq_list[lvl][0] = schq; 1491 } 1492 return; 1493 } 1494 1495 /* Allocate contiguous queue indices requesty first */ 1496 if (rsp->schq_contig[lvl]) { 1497 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 1498 txsch->schq.max, start, 1499 rsp->schq_contig[lvl], 0); 1500 if (schq >= end) 1501 rsp->schq_contig[lvl] = 0; 1502 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 1503 set_bit(schq, txsch->schq.bmap); 1504 rsp->schq_contig_list[lvl][idx] = schq; 1505 schq++; 1506 } 1507 } 1508 1509 /* Allocate non-contiguous queue indices */ 1510 if (rsp->schq[lvl]) { 1511 idx = 0; 1512 for (schq = start; schq < end; schq++) { 1513 if (!test_bit(schq, txsch->schq.bmap)) { 1514 set_bit(schq, txsch->schq.bmap); 1515 rsp->schq_list[lvl][idx++] = schq; 1516 } 1517 if (idx == rsp->schq[lvl]) 1518 break; 1519 } 1520 /* Update how many were allocated */ 1521 rsp->schq[lvl] = idx; 1522 } 1523 } 1524 1525 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 1526 struct nix_txsch_alloc_req *req, 1527 struct nix_txsch_alloc_rsp *rsp) 1528 { 1529 struct rvu_hwinfo *hw = rvu->hw; 1530 u16 pcifunc = req->hdr.pcifunc; 1531 int link, blkaddr, rc = 0; 1532 int lvl, idx, start, end; 1533 struct nix_txsch *txsch; 1534 struct rvu_pfvf *pfvf; 1535 struct nix_hw *nix_hw; 1536 u32 *pfvf_map; 1537 u16 schq; 1538 1539 pfvf = rvu_get_pfvf(rvu, pcifunc); 1540 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1541 if (!pfvf->nixlf || blkaddr < 0) 1542 return NIX_AF_ERR_AF_LF_INVALID; 1543 1544 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1545 if (!nix_hw) 1546 return -EINVAL; 1547 1548 mutex_lock(&rvu->rsrc_lock); 1549 1550 /* Check if request is valid as per HW capabilities 1551 * and can be accomodated. 1552 */ 1553 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1554 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 1555 if (rc) 1556 goto err; 1557 } 1558 1559 /* Allocate requested Tx scheduler queues */ 1560 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1561 txsch = &nix_hw->txsch[lvl]; 1562 pfvf_map = txsch->pfvf_map; 1563 1564 if (!req->schq[lvl] && !req->schq_contig[lvl]) 1565 continue; 1566 1567 rsp->schq[lvl] = req->schq[lvl]; 1568 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 1569 1570 link = nix_get_tx_link(rvu, pcifunc); 1571 1572 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1573 start = link; 1574 end = link; 1575 } else if (hw->cap.nix_fixed_txschq_mapping) { 1576 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1577 } else { 1578 start = 0; 1579 end = txsch->schq.max; 1580 } 1581 1582 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 1583 1584 /* Reset queue config */ 1585 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 1586 schq = rsp->schq_contig_list[lvl][idx]; 1587 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1588 NIX_TXSCHQ_CFG_DONE)) 1589 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1590 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1591 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); 1592 } 1593 1594 for (idx = 0; idx < req->schq[lvl]; idx++) { 1595 schq = rsp->schq_list[lvl][idx]; 1596 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1597 NIX_TXSCHQ_CFG_DONE)) 1598 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1599 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1600 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); 1601 } 1602 } 1603 1604 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 1605 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 1606 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 1607 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1608 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1609 goto exit; 1610 err: 1611 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 1612 exit: 1613 mutex_unlock(&rvu->rsrc_lock); 1614 return rc; 1615 } 1616 1617 static void nix_smq_flush(struct rvu *rvu, int blkaddr, 1618 int smq, u16 pcifunc, int nixlf) 1619 { 1620 int pf = rvu_get_pf(pcifunc); 1621 u8 cgx_id = 0, lmac_id = 0; 1622 int err, restore_tx_en = 0; 1623 u64 cfg; 1624 1625 /* enable cgx tx if disabled */ 1626 if (is_pf_cgxmapped(rvu, pf)) { 1627 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1628 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), 1629 lmac_id, true); 1630 } 1631 1632 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 1633 /* Do SMQ flush and set enqueue xoff */ 1634 cfg |= BIT_ULL(50) | BIT_ULL(49); 1635 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 1636 1637 /* Disable backpressure from physical link, 1638 * otherwise SMQ flush may stall. 1639 */ 1640 rvu_cgx_enadis_rx_bp(rvu, pf, false); 1641 1642 /* Wait for flush to complete */ 1643 err = rvu_poll_reg(rvu, blkaddr, 1644 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 1645 if (err) 1646 dev_err(rvu->dev, 1647 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq); 1648 1649 rvu_cgx_enadis_rx_bp(rvu, pf, true); 1650 /* restore cgx tx state */ 1651 if (restore_tx_en) 1652 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 1653 } 1654 1655 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 1656 { 1657 int blkaddr, nixlf, lvl, schq, err; 1658 struct rvu_hwinfo *hw = rvu->hw; 1659 struct nix_txsch *txsch; 1660 struct nix_hw *nix_hw; 1661 1662 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1663 if (blkaddr < 0) 1664 return NIX_AF_ERR_AF_LF_INVALID; 1665 1666 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1667 if (!nix_hw) 1668 return -EINVAL; 1669 1670 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1671 if (nixlf < 0) 1672 return NIX_AF_ERR_AF_LF_INVALID; 1673 1674 /* Disable TL2/3 queue links before SMQ flush*/ 1675 mutex_lock(&rvu->rsrc_lock); 1676 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1677 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) 1678 continue; 1679 1680 txsch = &nix_hw->txsch[lvl]; 1681 for (schq = 0; schq < txsch->schq.max; schq++) { 1682 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1683 continue; 1684 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1685 } 1686 } 1687 1688 /* Flush SMQs */ 1689 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 1690 for (schq = 0; schq < txsch->schq.max; schq++) { 1691 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1692 continue; 1693 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1694 } 1695 1696 /* Now free scheduler queues to free pool */ 1697 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1698 /* TLs above aggregation level are shared across all PF 1699 * and it's VFs, hence skip freeing them. 1700 */ 1701 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1702 continue; 1703 1704 txsch = &nix_hw->txsch[lvl]; 1705 for (schq = 0; schq < txsch->schq.max; schq++) { 1706 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1707 continue; 1708 rvu_free_rsrc(&txsch->schq, schq); 1709 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 1710 } 1711 } 1712 mutex_unlock(&rvu->rsrc_lock); 1713 1714 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ 1715 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); 1716 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); 1717 if (err) 1718 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 1719 1720 return 0; 1721 } 1722 1723 static int nix_txschq_free_one(struct rvu *rvu, 1724 struct nix_txsch_free_req *req) 1725 { 1726 struct rvu_hwinfo *hw = rvu->hw; 1727 u16 pcifunc = req->hdr.pcifunc; 1728 int lvl, schq, nixlf, blkaddr; 1729 struct nix_txsch *txsch; 1730 struct nix_hw *nix_hw; 1731 u32 *pfvf_map; 1732 1733 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1734 if (blkaddr < 0) 1735 return NIX_AF_ERR_AF_LF_INVALID; 1736 1737 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1738 if (!nix_hw) 1739 return -EINVAL; 1740 1741 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1742 if (nixlf < 0) 1743 return NIX_AF_ERR_AF_LF_INVALID; 1744 1745 lvl = req->schq_lvl; 1746 schq = req->schq; 1747 txsch = &nix_hw->txsch[lvl]; 1748 1749 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 1750 return 0; 1751 1752 pfvf_map = txsch->pfvf_map; 1753 mutex_lock(&rvu->rsrc_lock); 1754 1755 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 1756 mutex_unlock(&rvu->rsrc_lock); 1757 goto err; 1758 } 1759 1760 /* Flush if it is a SMQ. Onus of disabling 1761 * TL2/3 queue links before SMQ flush is on user 1762 */ 1763 if (lvl == NIX_TXSCH_LVL_SMQ) 1764 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1765 1766 /* Free the resource */ 1767 rvu_free_rsrc(&txsch->schq, schq); 1768 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 1769 mutex_unlock(&rvu->rsrc_lock); 1770 return 0; 1771 err: 1772 return NIX_AF_ERR_TLX_INVALID; 1773 } 1774 1775 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 1776 struct nix_txsch_free_req *req, 1777 struct msg_rsp *rsp) 1778 { 1779 if (req->flags & TXSCHQ_FREE_ALL) 1780 return nix_txschq_free(rvu, req->hdr.pcifunc); 1781 else 1782 return nix_txschq_free_one(rvu, req); 1783 } 1784 1785 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 1786 int lvl, u64 reg, u64 regval) 1787 { 1788 u64 regbase = reg & 0xFFFF; 1789 u16 schq, parent; 1790 1791 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 1792 return false; 1793 1794 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1795 /* Check if this schq belongs to this PF/VF or not */ 1796 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 1797 return false; 1798 1799 parent = (regval >> 16) & 0x1FF; 1800 /* Validate MDQ's TL4 parent */ 1801 if (regbase == NIX_AF_MDQX_PARENT(0) && 1802 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 1803 return false; 1804 1805 /* Validate TL4's TL3 parent */ 1806 if (regbase == NIX_AF_TL4X_PARENT(0) && 1807 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 1808 return false; 1809 1810 /* Validate TL3's TL2 parent */ 1811 if (regbase == NIX_AF_TL3X_PARENT(0) && 1812 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 1813 return false; 1814 1815 /* Validate TL2's TL1 parent */ 1816 if (regbase == NIX_AF_TL2X_PARENT(0) && 1817 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 1818 return false; 1819 1820 return true; 1821 } 1822 1823 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 1824 { 1825 u64 regbase; 1826 1827 if (hw->cap.nix_shaping) 1828 return true; 1829 1830 /* If shaping and coloring is not supported, then 1831 * *_CIR and *_PIR registers should not be configured. 1832 */ 1833 regbase = reg & 0xFFFF; 1834 1835 switch (lvl) { 1836 case NIX_TXSCH_LVL_TL1: 1837 if (regbase == NIX_AF_TL1X_CIR(0)) 1838 return false; 1839 break; 1840 case NIX_TXSCH_LVL_TL2: 1841 if (regbase == NIX_AF_TL2X_CIR(0) || 1842 regbase == NIX_AF_TL2X_PIR(0)) 1843 return false; 1844 break; 1845 case NIX_TXSCH_LVL_TL3: 1846 if (regbase == NIX_AF_TL3X_CIR(0) || 1847 regbase == NIX_AF_TL3X_PIR(0)) 1848 return false; 1849 break; 1850 case NIX_TXSCH_LVL_TL4: 1851 if (regbase == NIX_AF_TL4X_CIR(0) || 1852 regbase == NIX_AF_TL4X_PIR(0)) 1853 return false; 1854 break; 1855 } 1856 return true; 1857 } 1858 1859 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 1860 u16 pcifunc, int blkaddr) 1861 { 1862 u32 *pfvf_map; 1863 int schq; 1864 1865 schq = nix_get_tx_link(rvu, pcifunc); 1866 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 1867 /* Skip if PF has already done the config */ 1868 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 1869 return; 1870 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 1871 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 1872 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 1873 TXSCH_TL1_DFLT_RR_QTM); 1874 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 1875 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 1876 } 1877 1878 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 1879 struct nix_txschq_config *req, 1880 struct msg_rsp *rsp) 1881 { 1882 struct rvu_hwinfo *hw = rvu->hw; 1883 u16 pcifunc = req->hdr.pcifunc; 1884 u64 reg, regval, schq_regbase; 1885 struct nix_txsch *txsch; 1886 struct nix_hw *nix_hw; 1887 int blkaddr, idx, err; 1888 int nixlf, schq; 1889 u32 *pfvf_map; 1890 1891 if (req->lvl >= NIX_TXSCH_LVL_CNT || 1892 req->num_regs > MAX_REGS_PER_MBOX_MSG) 1893 return NIX_AF_INVAL_TXSCHQ_CFG; 1894 1895 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 1896 if (err) 1897 return err; 1898 1899 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1900 if (!nix_hw) 1901 return -EINVAL; 1902 1903 txsch = &nix_hw->txsch[req->lvl]; 1904 pfvf_map = txsch->pfvf_map; 1905 1906 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 1907 pcifunc & RVU_PFVF_FUNC_MASK) { 1908 mutex_lock(&rvu->rsrc_lock); 1909 if (req->lvl == NIX_TXSCH_LVL_TL1) 1910 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 1911 mutex_unlock(&rvu->rsrc_lock); 1912 return 0; 1913 } 1914 1915 for (idx = 0; idx < req->num_regs; idx++) { 1916 reg = req->reg[idx]; 1917 regval = req->regval[idx]; 1918 schq_regbase = reg & 0xFFFF; 1919 1920 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 1921 txsch->lvl, reg, regval)) 1922 return NIX_AF_INVAL_TXSCHQ_CFG; 1923 1924 /* Check if shaping and coloring is supported */ 1925 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 1926 continue; 1927 1928 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 1929 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 1930 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 1931 pcifunc, 0); 1932 regval &= ~(0x7FULL << 24); 1933 regval |= ((u64)nixlf << 24); 1934 } 1935 1936 /* Clear 'BP_ENA' config, if it's not allowed */ 1937 if (!hw->cap.nix_tx_link_bp) { 1938 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 1939 (schq_regbase & 0xFF00) == 1940 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 1941 regval &= ~BIT_ULL(13); 1942 } 1943 1944 /* Mark config as done for TL1 by PF */ 1945 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 1946 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 1947 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1948 mutex_lock(&rvu->rsrc_lock); 1949 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 1950 NIX_TXSCHQ_CFG_DONE); 1951 mutex_unlock(&rvu->rsrc_lock); 1952 } 1953 1954 /* SMQ flush is special hence split register writes such 1955 * that flush first and write rest of the bits later. 1956 */ 1957 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 1958 (regval & BIT_ULL(49))) { 1959 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1960 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1961 regval &= ~BIT_ULL(49); 1962 } 1963 rvu_write64(rvu, blkaddr, reg, regval); 1964 } 1965 1966 return 0; 1967 } 1968 1969 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 1970 struct nix_vtag_config *req) 1971 { 1972 u64 regval = req->vtag_size; 1973 1974 if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8) 1975 return -EINVAL; 1976 1977 if (req->rx.capture_vtag) 1978 regval |= BIT_ULL(5); 1979 if (req->rx.strip_vtag) 1980 regval |= BIT_ULL(4); 1981 1982 rvu_write64(rvu, blkaddr, 1983 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 1984 return 0; 1985 } 1986 1987 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 1988 struct nix_vtag_config *req, 1989 struct msg_rsp *rsp) 1990 { 1991 u16 pcifunc = req->hdr.pcifunc; 1992 int blkaddr, nixlf, err; 1993 1994 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 1995 if (err) 1996 return err; 1997 1998 if (req->cfg_type) { 1999 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 2000 if (err) 2001 return NIX_AF_ERR_PARAM; 2002 } else { 2003 /* TODO: handle tx vtag configuration */ 2004 return 0; 2005 } 2006 2007 return 0; 2008 } 2009 2010 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 2011 int mce, u8 op, u16 pcifunc, int next, bool eol) 2012 { 2013 struct nix_aq_enq_req aq_req; 2014 int err; 2015 2016 aq_req.hdr.pcifunc = 0; 2017 aq_req.ctype = NIX_AQ_CTYPE_MCE; 2018 aq_req.op = op; 2019 aq_req.qidx = mce; 2020 2021 /* Forward bcast pkts to RQ0, RSS not needed */ 2022 aq_req.mce.op = 0; 2023 aq_req.mce.index = 0; 2024 aq_req.mce.eol = eol; 2025 aq_req.mce.pf_func = pcifunc; 2026 aq_req.mce.next = next; 2027 2028 /* All fields valid */ 2029 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 2030 2031 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 2032 if (err) { 2033 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 2034 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 2035 return err; 2036 } 2037 return 0; 2038 } 2039 2040 static int nix_update_mce_list(struct nix_mce_list *mce_list, 2041 u16 pcifunc, bool add) 2042 { 2043 struct mce *mce, *tail = NULL; 2044 bool delete = false; 2045 2046 /* Scan through the current list */ 2047 hlist_for_each_entry(mce, &mce_list->head, node) { 2048 /* If already exists, then delete */ 2049 if (mce->pcifunc == pcifunc && !add) { 2050 delete = true; 2051 break; 2052 } 2053 tail = mce; 2054 } 2055 2056 if (delete) { 2057 hlist_del(&mce->node); 2058 kfree(mce); 2059 mce_list->count--; 2060 return 0; 2061 } 2062 2063 if (!add) 2064 return 0; 2065 2066 /* Add a new one to the list, at the tail */ 2067 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 2068 if (!mce) 2069 return -ENOMEM; 2070 mce->pcifunc = pcifunc; 2071 if (!tail) 2072 hlist_add_head(&mce->node, &mce_list->head); 2073 else 2074 hlist_add_behind(&mce->node, &tail->node); 2075 mce_list->count++; 2076 return 0; 2077 } 2078 2079 int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) 2080 { 2081 int err = 0, idx, next_idx, last_idx; 2082 struct nix_mce_list *mce_list; 2083 struct nix_mcast *mcast; 2084 struct nix_hw *nix_hw; 2085 struct rvu_pfvf *pfvf; 2086 struct mce *mce; 2087 int blkaddr; 2088 2089 /* Broadcast pkt replication is not needed for AF's VFs, hence skip */ 2090 if (is_afvf(pcifunc)) 2091 return 0; 2092 2093 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2094 if (blkaddr < 0) 2095 return 0; 2096 2097 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2098 if (!nix_hw) 2099 return 0; 2100 2101 mcast = &nix_hw->mcast; 2102 2103 /* Get this PF/VF func's MCE index */ 2104 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 2105 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 2106 2107 mce_list = &pfvf->bcast_mce_list; 2108 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) { 2109 dev_err(rvu->dev, 2110 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 2111 __func__, idx, mce_list->max, 2112 pcifunc >> RVU_PFVF_PF_SHIFT); 2113 return -EINVAL; 2114 } 2115 2116 mutex_lock(&mcast->mce_lock); 2117 2118 err = nix_update_mce_list(mce_list, pcifunc, add); 2119 if (err) 2120 goto end; 2121 2122 /* Disable MCAM entry in NPC */ 2123 if (!mce_list->count) { 2124 rvu_npc_enable_bcast_entry(rvu, pcifunc, false); 2125 goto end; 2126 } 2127 2128 /* Dump the updated list to HW */ 2129 idx = pfvf->bcast_mce_idx; 2130 last_idx = idx + mce_list->count - 1; 2131 hlist_for_each_entry(mce, &mce_list->head, node) { 2132 if (idx > last_idx) 2133 break; 2134 2135 next_idx = idx + 1; 2136 /* EOL should be set in last MCE */ 2137 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 2138 mce->pcifunc, next_idx, 2139 (next_idx > last_idx) ? true : false); 2140 if (err) 2141 goto end; 2142 idx++; 2143 } 2144 2145 end: 2146 mutex_unlock(&mcast->mce_lock); 2147 return err; 2148 } 2149 2150 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) 2151 { 2152 struct nix_mcast *mcast = &nix_hw->mcast; 2153 int err, pf, numvfs, idx; 2154 struct rvu_pfvf *pfvf; 2155 u16 pcifunc; 2156 u64 cfg; 2157 2158 /* Skip PF0 (i.e AF) */ 2159 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 2160 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2161 /* If PF is not enabled, nothing to do */ 2162 if (!((cfg >> 20) & 0x01)) 2163 continue; 2164 /* Get numVFs attached to this PF */ 2165 numvfs = (cfg >> 12) & 0xFF; 2166 2167 pfvf = &rvu->pf[pf]; 2168 2169 /* This NIX0/1 block mapped to PF ? */ 2170 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 2171 continue; 2172 2173 /* Save the start MCE */ 2174 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2175 2176 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 2177 2178 for (idx = 0; idx < (numvfs + 1); idx++) { 2179 /* idx-0 is for PF, followed by VFs */ 2180 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 2181 pcifunc |= idx; 2182 /* Add dummy entries now, so that we don't have to check 2183 * for whether AQ_OP should be INIT/WRITE later on. 2184 * Will be updated when a NIXLF is attached/detached to 2185 * these PF/VFs. 2186 */ 2187 err = nix_blk_setup_mce(rvu, nix_hw, 2188 pfvf->bcast_mce_idx + idx, 2189 NIX_AQ_INSTOP_INIT, 2190 pcifunc, 0, true); 2191 if (err) 2192 return err; 2193 } 2194 } 2195 return 0; 2196 } 2197 2198 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2199 { 2200 struct nix_mcast *mcast = &nix_hw->mcast; 2201 struct rvu_hwinfo *hw = rvu->hw; 2202 int err, size; 2203 2204 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 2205 size = (1ULL << size); 2206 2207 /* Alloc memory for multicast/mirror replication entries */ 2208 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 2209 (256UL << MC_TBL_SIZE), size); 2210 if (err) 2211 return -ENOMEM; 2212 2213 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 2214 (u64)mcast->mce_ctx->iova); 2215 2216 /* Set max list length equal to max no of VFs per PF + PF itself */ 2217 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 2218 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 2219 2220 /* Alloc memory for multicast replication buffers */ 2221 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 2222 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 2223 (8UL << MC_BUF_CNT), size); 2224 if (err) 2225 return -ENOMEM; 2226 2227 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 2228 (u64)mcast->mcast_buf->iova); 2229 2230 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 2231 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 2232 2233 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 2234 BIT_ULL(63) | (mcast->replay_pkind << 24) | 2235 BIT_ULL(20) | MC_BUF_CNT); 2236 2237 mutex_init(&mcast->mce_lock); 2238 2239 return nix_setup_bcast_tables(rvu, nix_hw); 2240 } 2241 2242 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2243 { 2244 struct nix_txsch *txsch; 2245 int err, lvl, schq; 2246 u64 cfg, reg; 2247 2248 /* Get scheduler queue count of each type and alloc 2249 * bitmap for each for alloc/free/attach operations. 2250 */ 2251 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2252 txsch = &nix_hw->txsch[lvl]; 2253 txsch->lvl = lvl; 2254 switch (lvl) { 2255 case NIX_TXSCH_LVL_SMQ: 2256 reg = NIX_AF_MDQ_CONST; 2257 break; 2258 case NIX_TXSCH_LVL_TL4: 2259 reg = NIX_AF_TL4_CONST; 2260 break; 2261 case NIX_TXSCH_LVL_TL3: 2262 reg = NIX_AF_TL3_CONST; 2263 break; 2264 case NIX_TXSCH_LVL_TL2: 2265 reg = NIX_AF_TL2_CONST; 2266 break; 2267 case NIX_TXSCH_LVL_TL1: 2268 reg = NIX_AF_TL1_CONST; 2269 break; 2270 } 2271 cfg = rvu_read64(rvu, blkaddr, reg); 2272 txsch->schq.max = cfg & 0xFFFF; 2273 err = rvu_alloc_bitmap(&txsch->schq); 2274 if (err) 2275 return err; 2276 2277 /* Allocate memory for scheduler queues to 2278 * PF/VF pcifunc mapping info. 2279 */ 2280 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 2281 sizeof(u32), GFP_KERNEL); 2282 if (!txsch->pfvf_map) 2283 return -ENOMEM; 2284 for (schq = 0; schq < txsch->schq.max; schq++) 2285 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2286 } 2287 return 0; 2288 } 2289 2290 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 2291 int blkaddr, u32 cfg) 2292 { 2293 int fmt_idx; 2294 2295 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 2296 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 2297 return fmt_idx; 2298 } 2299 if (fmt_idx >= nix_hw->mark_format.total) 2300 return -ERANGE; 2301 2302 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 2303 nix_hw->mark_format.cfg[fmt_idx] = cfg; 2304 nix_hw->mark_format.in_use++; 2305 return fmt_idx; 2306 } 2307 2308 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 2309 int blkaddr) 2310 { 2311 u64 cfgs[] = { 2312 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 2313 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 2314 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 2315 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 2316 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 2317 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 2318 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 2319 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 2320 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 2321 }; 2322 int i, rc; 2323 u64 total; 2324 2325 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 2326 nix_hw->mark_format.total = (u8)total; 2327 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 2328 GFP_KERNEL); 2329 if (!nix_hw->mark_format.cfg) 2330 return -ENOMEM; 2331 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 2332 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 2333 if (rc < 0) 2334 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 2335 i, rc); 2336 } 2337 2338 return 0; 2339 } 2340 2341 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 2342 struct msg_rsp *rsp) 2343 { 2344 u16 pcifunc = req->hdr.pcifunc; 2345 int i, nixlf, blkaddr, err; 2346 u64 stats; 2347 2348 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2349 if (err) 2350 return err; 2351 2352 /* Get stats count supported by HW */ 2353 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 2354 2355 /* Reset tx stats */ 2356 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 2357 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 2358 2359 /* Reset rx stats */ 2360 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 2361 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 2362 2363 return 0; 2364 } 2365 2366 /* Returns the ALG index to be set into NPC_RX_ACTION */ 2367 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 2368 { 2369 int i; 2370 2371 /* Scan over exiting algo entries to find a match */ 2372 for (i = 0; i < nix_hw->flowkey.in_use; i++) 2373 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 2374 return i; 2375 2376 return -ERANGE; 2377 } 2378 2379 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 2380 { 2381 int idx, nr_field, key_off, field_marker, keyoff_marker; 2382 int max_key_off, max_bit_pos, group_member; 2383 struct nix_rx_flowkey_alg *field; 2384 struct nix_rx_flowkey_alg tmp; 2385 u32 key_type, valid_key; 2386 2387 if (!alg) 2388 return -EINVAL; 2389 2390 #define FIELDS_PER_ALG 5 2391 #define MAX_KEY_OFF 40 2392 /* Clear all fields */ 2393 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 2394 2395 /* Each of the 32 possible flow key algorithm definitions should 2396 * fall into above incremental config (except ALG0). Otherwise a 2397 * single NPC MCAM entry is not sufficient for supporting RSS. 2398 * 2399 * If a different definition or combination needed then NPC MCAM 2400 * has to be programmed to filter such pkts and it's action should 2401 * point to this definition to calculate flowtag or hash. 2402 * 2403 * The `for loop` goes over _all_ protocol field and the following 2404 * variables depicts the state machine forward progress logic. 2405 * 2406 * keyoff_marker - Enabled when hash byte length needs to be accounted 2407 * in field->key_offset update. 2408 * field_marker - Enabled when a new field needs to be selected. 2409 * group_member - Enabled when protocol is part of a group. 2410 */ 2411 2412 keyoff_marker = 0; max_key_off = 0; group_member = 0; 2413 nr_field = 0; key_off = 0; field_marker = 1; 2414 field = &tmp; max_bit_pos = fls(flow_cfg); 2415 for (idx = 0; 2416 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 2417 key_off < MAX_KEY_OFF; idx++) { 2418 key_type = BIT(idx); 2419 valid_key = flow_cfg & key_type; 2420 /* Found a field marker, reset the field values */ 2421 if (field_marker) 2422 memset(&tmp, 0, sizeof(tmp)); 2423 2424 field_marker = true; 2425 keyoff_marker = true; 2426 switch (key_type) { 2427 case NIX_FLOW_KEY_TYPE_PORT: 2428 field->sel_chan = true; 2429 /* This should be set to 1, when SEL_CHAN is set */ 2430 field->bytesm1 = 1; 2431 break; 2432 case NIX_FLOW_KEY_TYPE_IPV4: 2433 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 2434 field->lid = NPC_LID_LC; 2435 field->ltype_match = NPC_LT_LC_IP; 2436 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 2437 field->lid = NPC_LID_LG; 2438 field->ltype_match = NPC_LT_LG_TU_IP; 2439 } 2440 field->hdr_offset = 12; /* SIP offset */ 2441 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 2442 field->ltype_mask = 0xF; /* Match only IPv4 */ 2443 keyoff_marker = false; 2444 break; 2445 case NIX_FLOW_KEY_TYPE_IPV6: 2446 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 2447 field->lid = NPC_LID_LC; 2448 field->ltype_match = NPC_LT_LC_IP6; 2449 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 2450 field->lid = NPC_LID_LG; 2451 field->ltype_match = NPC_LT_LG_TU_IP6; 2452 } 2453 field->hdr_offset = 8; /* SIP offset */ 2454 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 2455 field->ltype_mask = 0xF; /* Match only IPv6 */ 2456 break; 2457 case NIX_FLOW_KEY_TYPE_TCP: 2458 case NIX_FLOW_KEY_TYPE_UDP: 2459 case NIX_FLOW_KEY_TYPE_SCTP: 2460 case NIX_FLOW_KEY_TYPE_INNR_TCP: 2461 case NIX_FLOW_KEY_TYPE_INNR_UDP: 2462 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 2463 field->lid = NPC_LID_LD; 2464 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 2465 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 2466 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 2467 field->lid = NPC_LID_LH; 2468 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 2469 2470 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 2471 * so no need to change the ltype_match, just change 2472 * the lid for inner protocols 2473 */ 2474 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 2475 (int)NPC_LT_LH_TU_TCP); 2476 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 2477 (int)NPC_LT_LH_TU_UDP); 2478 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 2479 (int)NPC_LT_LH_TU_SCTP); 2480 2481 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 2482 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 2483 valid_key) { 2484 field->ltype_match |= NPC_LT_LD_TCP; 2485 group_member = true; 2486 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 2487 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 2488 valid_key) { 2489 field->ltype_match |= NPC_LT_LD_UDP; 2490 group_member = true; 2491 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 2492 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 2493 valid_key) { 2494 field->ltype_match |= NPC_LT_LD_SCTP; 2495 group_member = true; 2496 } 2497 field->ltype_mask = ~field->ltype_match; 2498 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 2499 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 2500 /* Handle the case where any of the group item 2501 * is enabled in the group but not the final one 2502 */ 2503 if (group_member) { 2504 valid_key = true; 2505 group_member = false; 2506 } 2507 } else { 2508 field_marker = false; 2509 keyoff_marker = false; 2510 } 2511 break; 2512 case NIX_FLOW_KEY_TYPE_NVGRE: 2513 field->lid = NPC_LID_LD; 2514 field->hdr_offset = 4; /* VSID offset */ 2515 field->bytesm1 = 2; 2516 field->ltype_match = NPC_LT_LD_NVGRE; 2517 field->ltype_mask = 0xF; 2518 break; 2519 case NIX_FLOW_KEY_TYPE_VXLAN: 2520 case NIX_FLOW_KEY_TYPE_GENEVE: 2521 field->lid = NPC_LID_LE; 2522 field->bytesm1 = 2; 2523 field->hdr_offset = 4; 2524 field->ltype_mask = 0xF; 2525 field_marker = false; 2526 keyoff_marker = false; 2527 2528 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 2529 field->ltype_match |= NPC_LT_LE_VXLAN; 2530 group_member = true; 2531 } 2532 2533 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 2534 field->ltype_match |= NPC_LT_LE_GENEVE; 2535 group_member = true; 2536 } 2537 2538 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 2539 if (group_member) { 2540 field->ltype_mask = ~field->ltype_match; 2541 field_marker = true; 2542 keyoff_marker = true; 2543 valid_key = true; 2544 group_member = false; 2545 } 2546 } 2547 break; 2548 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 2549 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 2550 field->lid = NPC_LID_LA; 2551 field->ltype_match = NPC_LT_LA_ETHER; 2552 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 2553 field->lid = NPC_LID_LF; 2554 field->ltype_match = NPC_LT_LF_TU_ETHER; 2555 } 2556 field->hdr_offset = 0; 2557 field->bytesm1 = 5; /* DMAC 6 Byte */ 2558 field->ltype_mask = 0xF; 2559 break; 2560 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 2561 field->lid = NPC_LID_LC; 2562 field->hdr_offset = 40; /* IPV6 hdr */ 2563 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 2564 field->ltype_match = NPC_LT_LC_IP6_EXT; 2565 field->ltype_mask = 0xF; 2566 break; 2567 case NIX_FLOW_KEY_TYPE_GTPU: 2568 field->lid = NPC_LID_LE; 2569 field->hdr_offset = 4; 2570 field->bytesm1 = 3; /* 4 bytes TID*/ 2571 field->ltype_match = NPC_LT_LE_GTPU; 2572 field->ltype_mask = 0xF; 2573 break; 2574 case NIX_FLOW_KEY_TYPE_VLAN: 2575 field->lid = NPC_LID_LB; 2576 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 2577 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 2578 field->ltype_match = NPC_LT_LB_CTAG; 2579 field->ltype_mask = 0xF; 2580 field->fn_mask = 1; /* Mask out the first nibble */ 2581 break; 2582 } 2583 field->ena = 1; 2584 2585 /* Found a valid flow key type */ 2586 if (valid_key) { 2587 field->key_offset = key_off; 2588 memcpy(&alg[nr_field], field, sizeof(*field)); 2589 max_key_off = max(max_key_off, field->bytesm1 + 1); 2590 2591 /* Found a field marker, get the next field */ 2592 if (field_marker) 2593 nr_field++; 2594 } 2595 2596 /* Found a keyoff marker, update the new key_off */ 2597 if (keyoff_marker) { 2598 key_off += max_key_off; 2599 max_key_off = 0; 2600 } 2601 } 2602 /* Processed all the flow key types */ 2603 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 2604 return 0; 2605 else 2606 return NIX_AF_ERR_RSS_NOSPC_FIELD; 2607 } 2608 2609 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 2610 { 2611 u64 field[FIELDS_PER_ALG]; 2612 struct nix_hw *hw; 2613 int fid, rc; 2614 2615 hw = get_nix_hw(rvu->hw, blkaddr); 2616 if (!hw) 2617 return -EINVAL; 2618 2619 /* No room to add new flow hash algoritham */ 2620 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 2621 return NIX_AF_ERR_RSS_NOSPC_ALGO; 2622 2623 /* Generate algo fields for the given flow_cfg */ 2624 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 2625 if (rc) 2626 return rc; 2627 2628 /* Update ALGX_FIELDX register with generated fields */ 2629 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 2630 rvu_write64(rvu, blkaddr, 2631 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 2632 fid), field[fid]); 2633 2634 /* Store the flow_cfg for futher lookup */ 2635 rc = hw->flowkey.in_use; 2636 hw->flowkey.flowkey[rc] = flow_cfg; 2637 hw->flowkey.in_use++; 2638 2639 return rc; 2640 } 2641 2642 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 2643 struct nix_rss_flowkey_cfg *req, 2644 struct nix_rss_flowkey_cfg_rsp *rsp) 2645 { 2646 u16 pcifunc = req->hdr.pcifunc; 2647 int alg_idx, nixlf, blkaddr; 2648 struct nix_hw *nix_hw; 2649 int err; 2650 2651 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2652 if (err) 2653 return err; 2654 2655 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2656 if (!nix_hw) 2657 return -EINVAL; 2658 2659 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 2660 /* Failed to get algo index from the exiting list, reserve new */ 2661 if (alg_idx < 0) { 2662 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 2663 req->flowkey_cfg); 2664 if (alg_idx < 0) 2665 return alg_idx; 2666 } 2667 rsp->alg_idx = alg_idx; 2668 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 2669 alg_idx, req->mcam_index); 2670 return 0; 2671 } 2672 2673 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 2674 { 2675 u32 flowkey_cfg, minkey_cfg; 2676 int alg, fid, rc; 2677 2678 /* Disable all flow key algx fieldx */ 2679 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 2680 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 2681 rvu_write64(rvu, blkaddr, 2682 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 2683 0); 2684 } 2685 2686 /* IPv4/IPv6 SIP/DIPs */ 2687 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 2688 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2689 if (rc < 0) 2690 return rc; 2691 2692 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 2693 minkey_cfg = flowkey_cfg; 2694 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 2695 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2696 if (rc < 0) 2697 return rc; 2698 2699 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 2700 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 2701 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2702 if (rc < 0) 2703 return rc; 2704 2705 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 2706 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 2707 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2708 if (rc < 0) 2709 return rc; 2710 2711 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 2712 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 2713 NIX_FLOW_KEY_TYPE_UDP; 2714 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2715 if (rc < 0) 2716 return rc; 2717 2718 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 2719 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 2720 NIX_FLOW_KEY_TYPE_SCTP; 2721 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2722 if (rc < 0) 2723 return rc; 2724 2725 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 2726 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 2727 NIX_FLOW_KEY_TYPE_SCTP; 2728 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2729 if (rc < 0) 2730 return rc; 2731 2732 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 2733 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 2734 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 2735 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2736 if (rc < 0) 2737 return rc; 2738 2739 return 0; 2740 } 2741 2742 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 2743 struct nix_set_mac_addr *req, 2744 struct msg_rsp *rsp) 2745 { 2746 u16 pcifunc = req->hdr.pcifunc; 2747 int blkaddr, nixlf, err; 2748 struct rvu_pfvf *pfvf; 2749 2750 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2751 if (err) 2752 return err; 2753 2754 pfvf = rvu_get_pfvf(rvu, pcifunc); 2755 2756 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 2757 2758 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 2759 pfvf->rx_chan_base, req->mac_addr); 2760 2761 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); 2762 2763 return 0; 2764 } 2765 2766 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 2767 struct msg_req *req, 2768 struct nix_get_mac_addr_rsp *rsp) 2769 { 2770 u16 pcifunc = req->hdr.pcifunc; 2771 struct rvu_pfvf *pfvf; 2772 2773 if (!is_nixlf_attached(rvu, pcifunc)) 2774 return NIX_AF_ERR_AF_LF_INVALID; 2775 2776 pfvf = rvu_get_pfvf(rvu, pcifunc); 2777 2778 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 2779 2780 return 0; 2781 } 2782 2783 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 2784 struct msg_rsp *rsp) 2785 { 2786 bool allmulti = false, disable_promisc = false; 2787 u16 pcifunc = req->hdr.pcifunc; 2788 int blkaddr, nixlf, err; 2789 struct rvu_pfvf *pfvf; 2790 2791 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2792 if (err) 2793 return err; 2794 2795 pfvf = rvu_get_pfvf(rvu, pcifunc); 2796 2797 if (req->mode & NIX_RX_MODE_PROMISC) 2798 allmulti = false; 2799 else if (req->mode & NIX_RX_MODE_ALLMULTI) 2800 allmulti = true; 2801 else 2802 disable_promisc = true; 2803 2804 if (disable_promisc) 2805 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); 2806 else 2807 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 2808 pfvf->rx_chan_base, allmulti); 2809 2810 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); 2811 2812 return 0; 2813 } 2814 2815 static void nix_find_link_frs(struct rvu *rvu, 2816 struct nix_frs_cfg *req, u16 pcifunc) 2817 { 2818 int pf = rvu_get_pf(pcifunc); 2819 struct rvu_pfvf *pfvf; 2820 int maxlen, minlen; 2821 int numvfs, hwvf; 2822 int vf; 2823 2824 /* Update with requester's min/max lengths */ 2825 pfvf = rvu_get_pfvf(rvu, pcifunc); 2826 pfvf->maxlen = req->maxlen; 2827 if (req->update_minlen) 2828 pfvf->minlen = req->minlen; 2829 2830 maxlen = req->maxlen; 2831 minlen = req->update_minlen ? req->minlen : 0; 2832 2833 /* Get this PF's numVFs and starting hwvf */ 2834 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 2835 2836 /* For each VF, compare requested max/minlen */ 2837 for (vf = 0; vf < numvfs; vf++) { 2838 pfvf = &rvu->hwvf[hwvf + vf]; 2839 if (pfvf->maxlen > maxlen) 2840 maxlen = pfvf->maxlen; 2841 if (req->update_minlen && 2842 pfvf->minlen && pfvf->minlen < minlen) 2843 minlen = pfvf->minlen; 2844 } 2845 2846 /* Compare requested max/minlen with PF's max/minlen */ 2847 pfvf = &rvu->pf[pf]; 2848 if (pfvf->maxlen > maxlen) 2849 maxlen = pfvf->maxlen; 2850 if (req->update_minlen && 2851 pfvf->minlen && pfvf->minlen < minlen) 2852 minlen = pfvf->minlen; 2853 2854 /* Update the request with max/min PF's and it's VF's max/min */ 2855 req->maxlen = maxlen; 2856 if (req->update_minlen) 2857 req->minlen = minlen; 2858 } 2859 2860 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 2861 struct msg_rsp *rsp) 2862 { 2863 struct rvu_hwinfo *hw = rvu->hw; 2864 u16 pcifunc = req->hdr.pcifunc; 2865 int pf = rvu_get_pf(pcifunc); 2866 int blkaddr, schq, link = -1; 2867 struct nix_txsch *txsch; 2868 u64 cfg, lmac_fifo_len; 2869 struct nix_hw *nix_hw; 2870 u8 cgx = 0, lmac = 0; 2871 2872 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2873 if (blkaddr < 0) 2874 return NIX_AF_ERR_AF_LF_INVALID; 2875 2876 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2877 if (!nix_hw) 2878 return -EINVAL; 2879 2880 if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS) 2881 return NIX_AF_ERR_FRS_INVALID; 2882 2883 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 2884 return NIX_AF_ERR_FRS_INVALID; 2885 2886 /* Check if requester wants to update SMQ's */ 2887 if (!req->update_smq) 2888 goto rx_frscfg; 2889 2890 /* Update min/maxlen in each of the SMQ attached to this PF/VF */ 2891 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 2892 mutex_lock(&rvu->rsrc_lock); 2893 for (schq = 0; schq < txsch->schq.max; schq++) { 2894 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 2895 continue; 2896 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); 2897 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); 2898 if (req->update_minlen) 2899 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); 2900 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); 2901 } 2902 mutex_unlock(&rvu->rsrc_lock); 2903 2904 rx_frscfg: 2905 /* Check if config is for SDP link */ 2906 if (req->sdp_link) { 2907 if (!hw->sdp_links) 2908 return NIX_AF_ERR_RX_LINK_INVALID; 2909 link = hw->cgx_links + hw->lbk_links; 2910 goto linkcfg; 2911 } 2912 2913 /* Check if the request is from CGX mapped RVU PF */ 2914 if (is_pf_cgxmapped(rvu, pf)) { 2915 /* Get CGX and LMAC to which this PF is mapped and find link */ 2916 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 2917 link = (cgx * hw->lmac_per_cgx) + lmac; 2918 } else if (pf == 0) { 2919 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 2920 link = hw->cgx_links; 2921 } 2922 2923 if (link < 0) 2924 return NIX_AF_ERR_RX_LINK_INVALID; 2925 2926 nix_find_link_frs(rvu, req, pcifunc); 2927 2928 linkcfg: 2929 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 2930 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 2931 if (req->update_minlen) 2932 cfg = (cfg & ~0xFFFFULL) | req->minlen; 2933 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 2934 2935 if (req->sdp_link || pf == 0) 2936 return 0; 2937 2938 /* Update transmit credits for CGX links */ 2939 lmac_fifo_len = 2940 CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 2941 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); 2942 cfg &= ~(0xFFFFFULL << 12); 2943 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; 2944 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 2945 return 0; 2946 } 2947 2948 int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, 2949 struct msg_rsp *rsp) 2950 { 2951 struct npc_mcam_alloc_entry_req alloc_req = { }; 2952 struct npc_mcam_alloc_entry_rsp alloc_rsp = { }; 2953 struct npc_mcam_free_entry_req free_req = { }; 2954 u16 pcifunc = req->hdr.pcifunc; 2955 int blkaddr, nixlf, err; 2956 struct rvu_pfvf *pfvf; 2957 2958 /* LBK VFs do not have separate MCAM UCAST entry hence 2959 * skip allocating rxvlan for them 2960 */ 2961 if (is_afvf(pcifunc)) 2962 return 0; 2963 2964 pfvf = rvu_get_pfvf(rvu, pcifunc); 2965 if (pfvf->rxvlan) 2966 return 0; 2967 2968 /* alloc new mcam entry */ 2969 alloc_req.hdr.pcifunc = pcifunc; 2970 alloc_req.count = 1; 2971 2972 err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, 2973 &alloc_rsp); 2974 if (err) 2975 return err; 2976 2977 /* update entry to enable rxvlan offload */ 2978 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2979 if (blkaddr < 0) { 2980 err = NIX_AF_ERR_AF_LF_INVALID; 2981 goto free_entry; 2982 } 2983 2984 nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0); 2985 if (nixlf < 0) { 2986 err = NIX_AF_ERR_AF_LF_INVALID; 2987 goto free_entry; 2988 } 2989 2990 pfvf->rxvlan_index = alloc_rsp.entry_list[0]; 2991 /* all it means is that rxvlan_index is valid */ 2992 pfvf->rxvlan = true; 2993 2994 err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); 2995 if (err) 2996 goto free_entry; 2997 2998 return 0; 2999 free_entry: 3000 free_req.hdr.pcifunc = pcifunc; 3001 free_req.entry = alloc_rsp.entry_list[0]; 3002 rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp); 3003 pfvf->rxvlan = false; 3004 return err; 3005 } 3006 3007 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 3008 struct msg_rsp *rsp) 3009 { 3010 int nixlf, blkaddr, err; 3011 u64 cfg; 3012 3013 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 3014 if (err) 3015 return err; 3016 3017 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 3018 /* Set the interface configuration */ 3019 if (req->len_verify & BIT(0)) 3020 cfg |= BIT_ULL(41); 3021 else 3022 cfg &= ~BIT_ULL(41); 3023 3024 if (req->len_verify & BIT(1)) 3025 cfg |= BIT_ULL(40); 3026 else 3027 cfg &= ~BIT_ULL(40); 3028 3029 if (req->csum_verify & BIT(0)) 3030 cfg |= BIT_ULL(37); 3031 else 3032 cfg &= ~BIT_ULL(37); 3033 3034 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 3035 3036 return 0; 3037 } 3038 3039 static void nix_link_config(struct rvu *rvu, int blkaddr) 3040 { 3041 struct rvu_hwinfo *hw = rvu->hw; 3042 int cgx, lmac_cnt, slink, link; 3043 u64 tx_credits; 3044 3045 /* Set default min/max packet lengths allowed on NIX Rx links. 3046 * 3047 * With HW reset minlen value of 60byte, HW will treat ARP pkts 3048 * as undersize and report them to SW as error pkts, hence 3049 * setting it to 40 bytes. 3050 */ 3051 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) { 3052 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3053 NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 3054 } 3055 3056 if (hw->sdp_links) { 3057 link = hw->cgx_links + hw->lbk_links; 3058 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3059 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 3060 } 3061 3062 /* Set credits for Tx links assuming max packet length allowed. 3063 * This will be reconfigured based on MTU set for PF/VF. 3064 */ 3065 for (cgx = 0; cgx < hw->cgx; cgx++) { 3066 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3067 tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16; 3068 /* Enable credits and set credit pkt count to max allowed */ 3069 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3070 slink = cgx * hw->lmac_per_cgx; 3071 for (link = slink; link < (slink + lmac_cnt); link++) { 3072 rvu_write64(rvu, blkaddr, 3073 NIX_AF_TX_LINKX_NORM_CREDIT(link), 3074 tx_credits); 3075 } 3076 } 3077 3078 /* Set Tx credits for LBK link */ 3079 slink = hw->cgx_links; 3080 for (link = slink; link < (slink + hw->lbk_links); link++) { 3081 tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */ 3082 /* Enable credits and set credit pkt count to max allowed */ 3083 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3084 rvu_write64(rvu, blkaddr, 3085 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 3086 } 3087 } 3088 3089 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 3090 { 3091 int idx, err; 3092 u64 status; 3093 3094 /* Start X2P bus calibration */ 3095 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3096 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 3097 /* Wait for calibration to complete */ 3098 err = rvu_poll_reg(rvu, blkaddr, 3099 NIX_AF_STATUS, BIT_ULL(10), false); 3100 if (err) { 3101 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 3102 return err; 3103 } 3104 3105 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 3106 /* Check if CGX devices are ready */ 3107 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 3108 /* Skip when cgx port is not available */ 3109 if (!rvu_cgx_pdata(idx, rvu) || 3110 (status & (BIT_ULL(16 + idx)))) 3111 continue; 3112 dev_err(rvu->dev, 3113 "CGX%d didn't respond to NIX X2P calibration\n", idx); 3114 err = -EBUSY; 3115 } 3116 3117 /* Check if LBK is ready */ 3118 if (!(status & BIT_ULL(19))) { 3119 dev_err(rvu->dev, 3120 "LBK didn't respond to NIX X2P calibration\n"); 3121 err = -EBUSY; 3122 } 3123 3124 /* Clear 'calibrate_x2p' bit */ 3125 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3126 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 3127 if (err || (status & 0x3FFULL)) 3128 dev_err(rvu->dev, 3129 "NIX X2P calibration failed, status 0x%llx\n", status); 3130 if (err) 3131 return err; 3132 return 0; 3133 } 3134 3135 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 3136 { 3137 u64 cfg; 3138 int err; 3139 3140 /* Set admin queue endianness */ 3141 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 3142 #ifdef __BIG_ENDIAN 3143 cfg |= BIT_ULL(8); 3144 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 3145 #else 3146 cfg &= ~BIT_ULL(8); 3147 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 3148 #endif 3149 3150 /* Do not bypass NDC cache */ 3151 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 3152 cfg &= ~0x3FFEULL; 3153 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 3154 /* Disable caching of SQB aka SQEs */ 3155 cfg |= 0x04ULL; 3156 #endif 3157 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 3158 3159 /* Result structure can be followed by RQ/SQ/CQ context at 3160 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 3161 * operation type. Alloc sufficient result memory for all operations. 3162 */ 3163 err = rvu_aq_alloc(rvu, &block->aq, 3164 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 3165 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 3166 if (err) 3167 return err; 3168 3169 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 3170 rvu_write64(rvu, block->addr, 3171 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 3172 return 0; 3173 } 3174 3175 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 3176 { 3177 const struct npc_lt_def_cfg *ltdefs; 3178 struct rvu_hwinfo *hw = rvu->hw; 3179 int blkaddr = nix_hw->blkaddr; 3180 struct rvu_block *block; 3181 int err; 3182 u64 cfg; 3183 3184 block = &hw->block[blkaddr]; 3185 3186 if (is_rvu_96xx_B0(rvu)) { 3187 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 3188 * internal state when conditional clocks are turned off. 3189 * Hence enable them. 3190 */ 3191 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3192 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 3193 3194 /* Set chan/link to backpressure TL3 instead of TL2 */ 3195 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 3196 3197 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 3198 * This sticky mode is known to cause SQ stalls when multiple 3199 * SQs are mapped to same SMQ and transmitting pkts at a time. 3200 */ 3201 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 3202 cfg &= ~BIT_ULL(15); 3203 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 3204 } 3205 3206 ltdefs = rvu->kpu.lt_def; 3207 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 3208 err = nix_calibrate_x2p(rvu, blkaddr); 3209 if (err) 3210 return err; 3211 3212 /* Set num of links of each type */ 3213 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 3214 hw->cgx = (cfg >> 12) & 0xF; 3215 hw->lmac_per_cgx = (cfg >> 8) & 0xF; 3216 hw->cgx_links = hw->cgx * hw->lmac_per_cgx; 3217 hw->lbk_links = (cfg >> 24) & 0xF; 3218 hw->sdp_links = 1; 3219 3220 /* Initialize admin queue */ 3221 err = nix_aq_init(rvu, block); 3222 if (err) 3223 return err; 3224 3225 /* Restore CINT timer delay to HW reset values */ 3226 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 3227 3228 if (is_block_implemented(hw, blkaddr)) { 3229 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 3230 if (err) 3231 return err; 3232 3233 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 3234 if (err) 3235 return err; 3236 3237 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 3238 if (err) 3239 return err; 3240 3241 /* Configure segmentation offload formats */ 3242 nix_setup_lso(rvu, nix_hw, blkaddr); 3243 3244 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 3245 * This helps HW protocol checker to identify headers 3246 * and validate length and checksums. 3247 */ 3248 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 3249 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 3250 ltdefs->rx_ol2.ltype_mask); 3251 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 3252 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 3253 ltdefs->rx_oip4.ltype_mask); 3254 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 3255 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 3256 ltdefs->rx_iip4.ltype_mask); 3257 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 3258 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 3259 ltdefs->rx_oip6.ltype_mask); 3260 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 3261 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 3262 ltdefs->rx_iip6.ltype_mask); 3263 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 3264 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 3265 ltdefs->rx_otcp.ltype_mask); 3266 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 3267 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 3268 ltdefs->rx_itcp.ltype_mask); 3269 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 3270 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 3271 ltdefs->rx_oudp.ltype_mask); 3272 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 3273 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 3274 ltdefs->rx_iudp.ltype_mask); 3275 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 3276 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 3277 ltdefs->rx_osctp.ltype_mask); 3278 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 3279 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 3280 ltdefs->rx_isctp.ltype_mask); 3281 3282 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 3283 if (err) 3284 return err; 3285 3286 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 3287 nix_link_config(rvu, blkaddr); 3288 3289 /* Enable Channel backpressure */ 3290 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 3291 } 3292 return 0; 3293 } 3294 3295 int rvu_nix_init(struct rvu *rvu) 3296 { 3297 struct rvu_hwinfo *hw = rvu->hw; 3298 struct nix_hw *nix_hw; 3299 int blkaddr = 0, err; 3300 int i = 0; 3301 3302 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 3303 GFP_KERNEL); 3304 if (!hw->nix) 3305 return -ENOMEM; 3306 3307 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3308 while (blkaddr) { 3309 nix_hw = &hw->nix[i]; 3310 nix_hw->rvu = rvu; 3311 nix_hw->blkaddr = blkaddr; 3312 err = rvu_nix_block_init(rvu, nix_hw); 3313 if (err) 3314 return err; 3315 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3316 i++; 3317 } 3318 3319 return 0; 3320 } 3321 3322 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 3323 struct rvu_block *block) 3324 { 3325 struct nix_txsch *txsch; 3326 struct nix_mcast *mcast; 3327 struct nix_hw *nix_hw; 3328 int lvl; 3329 3330 rvu_aq_free(rvu, block->aq); 3331 3332 if (is_block_implemented(rvu->hw, blkaddr)) { 3333 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3334 if (!nix_hw) 3335 return; 3336 3337 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3338 txsch = &nix_hw->txsch[lvl]; 3339 kfree(txsch->schq.bmap); 3340 } 3341 3342 mcast = &nix_hw->mcast; 3343 qmem_free(rvu->dev, mcast->mce_ctx); 3344 qmem_free(rvu->dev, mcast->mcast_buf); 3345 mutex_destroy(&mcast->mce_lock); 3346 } 3347 } 3348 3349 void rvu_nix_freemem(struct rvu *rvu) 3350 { 3351 struct rvu_hwinfo *hw = rvu->hw; 3352 struct rvu_block *block; 3353 int blkaddr = 0; 3354 3355 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3356 while (blkaddr) { 3357 block = &hw->block[blkaddr]; 3358 rvu_nix_block_freemem(rvu, blkaddr, block); 3359 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3360 } 3361 } 3362 3363 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 3364 struct msg_rsp *rsp) 3365 { 3366 u16 pcifunc = req->hdr.pcifunc; 3367 int nixlf, err; 3368 3369 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3370 if (err) 3371 return err; 3372 3373 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 3374 3375 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 3376 } 3377 3378 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 3379 struct msg_rsp *rsp) 3380 { 3381 u16 pcifunc = req->hdr.pcifunc; 3382 int nixlf, err; 3383 3384 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3385 if (err) 3386 return err; 3387 3388 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 3389 3390 return rvu_cgx_start_stop_io(rvu, pcifunc, false); 3391 } 3392 3393 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 3394 { 3395 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 3396 struct hwctx_disable_req ctx_req; 3397 int err; 3398 3399 ctx_req.hdr.pcifunc = pcifunc; 3400 3401 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 3402 nix_interface_deinit(rvu, pcifunc, nixlf); 3403 nix_rx_sync(rvu, blkaddr); 3404 nix_txschq_free(rvu, pcifunc); 3405 3406 rvu_cgx_start_stop_io(rvu, pcifunc, false); 3407 3408 if (pfvf->sq_ctx) { 3409 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 3410 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3411 if (err) 3412 dev_err(rvu->dev, "SQ ctx disable failed\n"); 3413 } 3414 3415 if (pfvf->rq_ctx) { 3416 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 3417 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3418 if (err) 3419 dev_err(rvu->dev, "RQ ctx disable failed\n"); 3420 } 3421 3422 if (pfvf->cq_ctx) { 3423 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 3424 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3425 if (err) 3426 dev_err(rvu->dev, "CQ ctx disable failed\n"); 3427 } 3428 3429 nix_ctx_free(rvu, pfvf); 3430 } 3431 3432 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 3433 3434 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 3435 { 3436 struct rvu_hwinfo *hw = rvu->hw; 3437 struct rvu_block *block; 3438 int blkaddr; 3439 int nixlf; 3440 u64 cfg; 3441 3442 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3443 if (blkaddr < 0) 3444 return NIX_AF_ERR_AF_LF_INVALID; 3445 3446 block = &hw->block[blkaddr]; 3447 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 3448 if (nixlf < 0) 3449 return NIX_AF_ERR_AF_LF_INVALID; 3450 3451 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 3452 3453 if (enable) 3454 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 3455 else 3456 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 3457 3458 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 3459 3460 return 0; 3461 } 3462 3463 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 3464 struct msg_rsp *rsp) 3465 { 3466 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 3467 } 3468 3469 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 3470 struct msg_rsp *rsp) 3471 { 3472 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 3473 } 3474 3475 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 3476 struct nix_lso_format_cfg *req, 3477 struct nix_lso_format_cfg_rsp *rsp) 3478 { 3479 u16 pcifunc = req->hdr.pcifunc; 3480 struct nix_hw *nix_hw; 3481 struct rvu_pfvf *pfvf; 3482 int blkaddr, idx, f; 3483 u64 reg; 3484 3485 pfvf = rvu_get_pfvf(rvu, pcifunc); 3486 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3487 if (!pfvf->nixlf || blkaddr < 0) 3488 return NIX_AF_ERR_AF_LF_INVALID; 3489 3490 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3491 if (!nix_hw) 3492 return -EINVAL; 3493 3494 /* Find existing matching LSO format, if any */ 3495 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 3496 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 3497 reg = rvu_read64(rvu, blkaddr, 3498 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 3499 if (req->fields[f] != (reg & req->field_mask)) 3500 break; 3501 } 3502 3503 if (f == NIX_LSO_FIELD_MAX) 3504 break; 3505 } 3506 3507 if (idx < nix_hw->lso.in_use) { 3508 /* Match found */ 3509 rsp->lso_format_idx = idx; 3510 return 0; 3511 } 3512 3513 if (nix_hw->lso.in_use == nix_hw->lso.total) 3514 return NIX_AF_ERR_LSO_CFG_FAIL; 3515 3516 rsp->lso_format_idx = nix_hw->lso.in_use++; 3517 3518 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 3519 rvu_write64(rvu, blkaddr, 3520 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 3521 req->fields[f]); 3522 3523 return 0; 3524 } 3525