1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 14 #include "rvu_struct.h" 15 #include "rvu_reg.h" 16 #include "rvu.h" 17 #include "npc.h" 18 #include "cgx.h" 19 20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 22 int type, int chan_id); 23 24 enum mc_tbl_sz { 25 MC_TBL_SZ_256, 26 MC_TBL_SZ_512, 27 MC_TBL_SZ_1K, 28 MC_TBL_SZ_2K, 29 MC_TBL_SZ_4K, 30 MC_TBL_SZ_8K, 31 MC_TBL_SZ_16K, 32 MC_TBL_SZ_32K, 33 MC_TBL_SZ_64K, 34 }; 35 36 enum mc_buf_cnt { 37 MC_BUF_CNT_8, 38 MC_BUF_CNT_16, 39 MC_BUF_CNT_32, 40 MC_BUF_CNT_64, 41 MC_BUF_CNT_128, 42 MC_BUF_CNT_256, 43 MC_BUF_CNT_512, 44 MC_BUF_CNT_1024, 45 MC_BUF_CNT_2048, 46 }; 47 48 enum nix_makr_fmt_indexes { 49 NIX_MARK_CFG_IP_DSCP_RED, 50 NIX_MARK_CFG_IP_DSCP_YELLOW, 51 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 52 NIX_MARK_CFG_IP_ECN_RED, 53 NIX_MARK_CFG_IP_ECN_YELLOW, 54 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 55 NIX_MARK_CFG_VLAN_DEI_RED, 56 NIX_MARK_CFG_VLAN_DEI_YELLOW, 57 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 58 NIX_MARK_CFG_MAX, 59 }; 60 61 /* For now considering MC resources needed for broadcast 62 * pkt replication only. i.e 256 HWVFs + 12 PFs. 63 */ 64 #define MC_TBL_SIZE MC_TBL_SZ_512 65 #define MC_BUF_CNT MC_BUF_CNT_128 66 67 struct mce { 68 struct hlist_node node; 69 u16 pcifunc; 70 }; 71 72 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 73 { 74 int i = 0; 75 76 /*If blkaddr is 0, return the first nix block address*/ 77 if (blkaddr == 0) 78 return rvu->nix_blkaddr[blkaddr]; 79 80 while (i + 1 < MAX_NIX_BLKS) { 81 if (rvu->nix_blkaddr[i] == blkaddr) 82 return rvu->nix_blkaddr[i + 1]; 83 i++; 84 } 85 86 return 0; 87 } 88 89 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 90 { 91 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 92 int blkaddr; 93 94 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 95 if (!pfvf->nixlf || blkaddr < 0) 96 return false; 97 return true; 98 } 99 100 int rvu_get_nixlf_count(struct rvu *rvu) 101 { 102 int blkaddr = 0, max = 0; 103 struct rvu_block *block; 104 105 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 106 while (blkaddr) { 107 block = &rvu->hw->block[blkaddr]; 108 max += block->lf.max; 109 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 110 } 111 return max; 112 } 113 114 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 115 { 116 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 117 struct rvu_hwinfo *hw = rvu->hw; 118 int blkaddr; 119 120 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 121 if (!pfvf->nixlf || blkaddr < 0) 122 return NIX_AF_ERR_AF_LF_INVALID; 123 124 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 125 if (*nixlf < 0) 126 return NIX_AF_ERR_AF_LF_INVALID; 127 128 if (nix_blkaddr) 129 *nix_blkaddr = blkaddr; 130 131 return 0; 132 } 133 134 static void nix_mce_list_init(struct nix_mce_list *list, int max) 135 { 136 INIT_HLIST_HEAD(&list->head); 137 list->count = 0; 138 list->max = max; 139 } 140 141 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) 142 { 143 int idx; 144 145 if (!mcast) 146 return 0; 147 148 idx = mcast->next_free_mce; 149 mcast->next_free_mce += count; 150 return idx; 151 } 152 153 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 154 { 155 int nix_blkaddr = 0, i = 0; 156 struct rvu *rvu = hw->rvu; 157 158 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 159 while (nix_blkaddr) { 160 if (blkaddr == nix_blkaddr && hw->nix) 161 return &hw->nix[i]; 162 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 163 i++; 164 } 165 return NULL; 166 } 167 168 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 169 { 170 int err; 171 172 /*Sync all in flight RX packets to LLC/DRAM */ 173 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 174 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 175 if (err) 176 dev_err(rvu->dev, "NIX RX software sync failed\n"); 177 } 178 179 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 180 int lvl, u16 pcifunc, u16 schq) 181 { 182 struct rvu_hwinfo *hw = rvu->hw; 183 struct nix_txsch *txsch; 184 struct nix_hw *nix_hw; 185 u16 map_func; 186 187 nix_hw = get_nix_hw(rvu->hw, blkaddr); 188 if (!nix_hw) 189 return false; 190 191 txsch = &nix_hw->txsch[lvl]; 192 /* Check out of bounds */ 193 if (schq >= txsch->schq.max) 194 return false; 195 196 mutex_lock(&rvu->rsrc_lock); 197 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 198 mutex_unlock(&rvu->rsrc_lock); 199 200 /* TLs aggegating traffic are shared across PF and VFs */ 201 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 202 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 203 return false; 204 else 205 return true; 206 } 207 208 if (map_func != pcifunc) 209 return false; 210 211 return true; 212 } 213 214 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) 215 { 216 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 217 int pkind, pf, vf, lbkid; 218 u8 cgx_id, lmac_id; 219 int err; 220 221 pf = rvu_get_pf(pcifunc); 222 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 223 return 0; 224 225 switch (type) { 226 case NIX_INTF_TYPE_CGX: 227 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 228 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 229 230 pkind = rvu_npc_get_pkind(rvu, pf); 231 if (pkind < 0) { 232 dev_err(rvu->dev, 233 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 234 return -EINVAL; 235 } 236 pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0); 237 pfvf->tx_chan_base = pfvf->rx_chan_base; 238 pfvf->rx_chan_cnt = 1; 239 pfvf->tx_chan_cnt = 1; 240 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 241 rvu_npc_set_pkind(rvu, pkind, pfvf); 242 243 /* By default we enable pause frames */ 244 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) 245 cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), 246 lmac_id, true, true); 247 break; 248 case NIX_INTF_TYPE_LBK: 249 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 250 251 /* If NIX1 block is present on the silicon then NIXes are 252 * assigned alternatively for lbk interfaces. NIX0 should 253 * send packets on lbk link 1 channels and NIX1 should send 254 * on lbk link 0 channels for the communication between 255 * NIX0 and NIX1. 256 */ 257 lbkid = 0; 258 if (rvu->hw->lbk_links > 1) 259 lbkid = vf & 0x1 ? 0 : 1; 260 261 /* Note that AF's VFs work in pairs and talk over consecutive 262 * loopback channels.Therefore if odd number of AF VFs are 263 * enabled then the last VF remains with no pair. 264 */ 265 pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf); 266 pfvf->tx_chan_base = vf & 0x1 ? 267 NIX_CHAN_LBK_CHX(lbkid, vf - 1) : 268 NIX_CHAN_LBK_CHX(lbkid, vf + 1); 269 pfvf->rx_chan_cnt = 1; 270 pfvf->tx_chan_cnt = 1; 271 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 272 pfvf->rx_chan_base, false); 273 break; 274 } 275 276 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 277 * RVU PF/VF's MAC address. 278 */ 279 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 280 pfvf->rx_chan_base, pfvf->mac_addr); 281 282 /* Add this PF_FUNC to bcast pkt replication list */ 283 err = nix_update_bcast_mce_list(rvu, pcifunc, true); 284 if (err) { 285 dev_err(rvu->dev, 286 "Bcast list, failed to enable PF_FUNC 0x%x\n", 287 pcifunc); 288 return err; 289 } 290 291 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 292 nixlf, pfvf->rx_chan_base); 293 pfvf->maxlen = NIC_HW_MIN_FRS; 294 pfvf->minlen = NIC_HW_MIN_FRS; 295 296 return 0; 297 } 298 299 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 300 { 301 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 302 int err; 303 304 pfvf->maxlen = 0; 305 pfvf->minlen = 0; 306 307 /* Remove this PF_FUNC from bcast pkt replication list */ 308 err = nix_update_bcast_mce_list(rvu, pcifunc, false); 309 if (err) { 310 dev_err(rvu->dev, 311 "Bcast list, failed to disable PF_FUNC 0x%x\n", 312 pcifunc); 313 } 314 315 /* Free and disable any MCAM entries used by this NIX LF */ 316 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 317 } 318 319 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 320 struct nix_bp_cfg_req *req, 321 struct msg_rsp *rsp) 322 { 323 u16 pcifunc = req->hdr.pcifunc; 324 struct rvu_pfvf *pfvf; 325 int blkaddr, pf, type; 326 u16 chan_base, chan; 327 u64 cfg; 328 329 pf = rvu_get_pf(pcifunc); 330 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 331 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 332 return 0; 333 334 pfvf = rvu_get_pfvf(rvu, pcifunc); 335 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 336 337 chan_base = pfvf->rx_chan_base + req->chan_base; 338 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 339 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 340 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 341 cfg & ~BIT_ULL(16)); 342 } 343 return 0; 344 } 345 346 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 347 int type, int chan_id) 348 { 349 int bpid, blkaddr, lmac_chan_cnt; 350 struct rvu_hwinfo *hw = rvu->hw; 351 u16 cgx_bpid_cnt, lbk_bpid_cnt; 352 struct rvu_pfvf *pfvf; 353 u8 cgx_id, lmac_id; 354 u64 cfg; 355 356 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 357 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 358 lmac_chan_cnt = cfg & 0xFF; 359 360 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; 361 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); 362 363 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 364 365 /* Backpressure IDs range division 366 * CGX channles are mapped to (0 - 191) BPIDs 367 * LBK channles are mapped to (192 - 255) BPIDs 368 * SDP channles are mapped to (256 - 511) BPIDs 369 * 370 * Lmac channles and bpids mapped as follows 371 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 372 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 373 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 374 */ 375 switch (type) { 376 case NIX_INTF_TYPE_CGX: 377 if ((req->chan_base + req->chan_cnt) > 15) 378 return -EINVAL; 379 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 380 /* Assign bpid based on cgx, lmac and chan id */ 381 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + 382 (lmac_id * lmac_chan_cnt) + req->chan_base; 383 384 if (req->bpid_per_chan) 385 bpid += chan_id; 386 if (bpid > cgx_bpid_cnt) 387 return -EINVAL; 388 break; 389 390 case NIX_INTF_TYPE_LBK: 391 if ((req->chan_base + req->chan_cnt) > 63) 392 return -EINVAL; 393 bpid = cgx_bpid_cnt + req->chan_base; 394 if (req->bpid_per_chan) 395 bpid += chan_id; 396 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) 397 return -EINVAL; 398 break; 399 default: 400 return -EINVAL; 401 } 402 return bpid; 403 } 404 405 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 406 struct nix_bp_cfg_req *req, 407 struct nix_bp_cfg_rsp *rsp) 408 { 409 int blkaddr, pf, type, chan_id = 0; 410 u16 pcifunc = req->hdr.pcifunc; 411 struct rvu_pfvf *pfvf; 412 u16 chan_base, chan; 413 s16 bpid, bpid_base; 414 u64 cfg; 415 416 pf = rvu_get_pf(pcifunc); 417 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 418 419 /* Enable backpressure only for CGX mapped PFs and LBK interface */ 420 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 421 return 0; 422 423 pfvf = rvu_get_pfvf(rvu, pcifunc); 424 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 425 426 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 427 chan_base = pfvf->rx_chan_base + req->chan_base; 428 bpid = bpid_base; 429 430 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 431 if (bpid < 0) { 432 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 433 return -EINVAL; 434 } 435 436 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 437 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 438 cfg | (bpid & 0xFF) | BIT_ULL(16)); 439 chan_id++; 440 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 441 } 442 443 for (chan = 0; chan < req->chan_cnt; chan++) { 444 /* Map channel and bpid assign to it */ 445 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 446 (bpid_base & 0x3FF); 447 if (req->bpid_per_chan) 448 bpid_base++; 449 } 450 rsp->chan_cnt = req->chan_cnt; 451 452 return 0; 453 } 454 455 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 456 u64 format, bool v4, u64 *fidx) 457 { 458 struct nix_lso_format field = {0}; 459 460 /* IP's Length field */ 461 field.layer = NIX_TXLAYER_OL3; 462 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 463 field.offset = v4 ? 2 : 4; 464 field.sizem1 = 1; /* i.e 2 bytes */ 465 field.alg = NIX_LSOALG_ADD_PAYLEN; 466 rvu_write64(rvu, blkaddr, 467 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 468 *(u64 *)&field); 469 470 /* No ID field in IPv6 header */ 471 if (!v4) 472 return; 473 474 /* IP's ID field */ 475 field.layer = NIX_TXLAYER_OL3; 476 field.offset = 4; 477 field.sizem1 = 1; /* i.e 2 bytes */ 478 field.alg = NIX_LSOALG_ADD_SEGNUM; 479 rvu_write64(rvu, blkaddr, 480 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 481 *(u64 *)&field); 482 } 483 484 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 485 u64 format, u64 *fidx) 486 { 487 struct nix_lso_format field = {0}; 488 489 /* TCP's sequence number field */ 490 field.layer = NIX_TXLAYER_OL4; 491 field.offset = 4; 492 field.sizem1 = 3; /* i.e 4 bytes */ 493 field.alg = NIX_LSOALG_ADD_OFFSET; 494 rvu_write64(rvu, blkaddr, 495 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 496 *(u64 *)&field); 497 498 /* TCP's flags field */ 499 field.layer = NIX_TXLAYER_OL4; 500 field.offset = 12; 501 field.sizem1 = 1; /* 2 bytes */ 502 field.alg = NIX_LSOALG_TCP_FLAGS; 503 rvu_write64(rvu, blkaddr, 504 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 505 *(u64 *)&field); 506 } 507 508 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 509 { 510 u64 cfg, idx, fidx = 0; 511 512 /* Get max HW supported format indices */ 513 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 514 nix_hw->lso.total = cfg; 515 516 /* Enable LSO */ 517 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 518 /* For TSO, set first and middle segment flags to 519 * mask out PSH, RST & FIN flags in TCP packet 520 */ 521 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 522 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 523 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 524 525 /* Setup default static LSO formats 526 * 527 * Configure format fields for TCPv4 segmentation offload 528 */ 529 idx = NIX_LSO_FORMAT_IDX_TSOV4; 530 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 531 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 532 533 /* Set rest of the fields to NOP */ 534 for (; fidx < 8; fidx++) { 535 rvu_write64(rvu, blkaddr, 536 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 537 } 538 nix_hw->lso.in_use++; 539 540 /* Configure format fields for TCPv6 segmentation offload */ 541 idx = NIX_LSO_FORMAT_IDX_TSOV6; 542 fidx = 0; 543 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 544 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 545 546 /* Set rest of the fields to NOP */ 547 for (; fidx < 8; fidx++) { 548 rvu_write64(rvu, blkaddr, 549 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 550 } 551 nix_hw->lso.in_use++; 552 } 553 554 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 555 { 556 kfree(pfvf->rq_bmap); 557 kfree(pfvf->sq_bmap); 558 kfree(pfvf->cq_bmap); 559 if (pfvf->rq_ctx) 560 qmem_free(rvu->dev, pfvf->rq_ctx); 561 if (pfvf->sq_ctx) 562 qmem_free(rvu->dev, pfvf->sq_ctx); 563 if (pfvf->cq_ctx) 564 qmem_free(rvu->dev, pfvf->cq_ctx); 565 if (pfvf->rss_ctx) 566 qmem_free(rvu->dev, pfvf->rss_ctx); 567 if (pfvf->nix_qints_ctx) 568 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 569 if (pfvf->cq_ints_ctx) 570 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 571 572 pfvf->rq_bmap = NULL; 573 pfvf->cq_bmap = NULL; 574 pfvf->sq_bmap = NULL; 575 pfvf->rq_ctx = NULL; 576 pfvf->sq_ctx = NULL; 577 pfvf->cq_ctx = NULL; 578 pfvf->rss_ctx = NULL; 579 pfvf->nix_qints_ctx = NULL; 580 pfvf->cq_ints_ctx = NULL; 581 } 582 583 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 584 struct rvu_pfvf *pfvf, int nixlf, 585 int rss_sz, int rss_grps, int hwctx_size, 586 u64 way_mask) 587 { 588 int err, grp, num_indices; 589 590 /* RSS is not requested for this NIXLF */ 591 if (!rss_sz) 592 return 0; 593 num_indices = rss_sz * rss_grps; 594 595 /* Alloc NIX RSS HW context memory and config the base */ 596 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 597 if (err) 598 return err; 599 600 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 601 (u64)pfvf->rss_ctx->iova); 602 603 /* Config full RSS table size, enable RSS and caching */ 604 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), 605 BIT_ULL(36) | BIT_ULL(4) | 606 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) | 607 way_mask << 20); 608 /* Config RSS group offset and sizes */ 609 for (grp = 0; grp < rss_grps; grp++) 610 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 611 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 612 return 0; 613 } 614 615 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 616 struct nix_aq_inst_s *inst) 617 { 618 struct admin_queue *aq = block->aq; 619 struct nix_aq_res_s *result; 620 int timeout = 1000; 621 u64 reg, head; 622 623 result = (struct nix_aq_res_s *)aq->res->base; 624 625 /* Get current head pointer where to append this instruction */ 626 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 627 head = (reg >> 4) & AQ_PTR_MASK; 628 629 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 630 (void *)inst, aq->inst->entry_sz); 631 memset(result, 0, sizeof(*result)); 632 /* sync into memory */ 633 wmb(); 634 635 /* Ring the doorbell and wait for result */ 636 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 637 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 638 cpu_relax(); 639 udelay(1); 640 timeout--; 641 if (!timeout) 642 return -EBUSY; 643 } 644 645 if (result->compcode != NIX_AQ_COMP_GOOD) 646 /* TODO: Replace this with some error code */ 647 return -EBUSY; 648 649 return 0; 650 } 651 652 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 653 struct nix_aq_enq_req *req, 654 struct nix_aq_enq_rsp *rsp) 655 { 656 struct rvu_hwinfo *hw = rvu->hw; 657 u16 pcifunc = req->hdr.pcifunc; 658 int nixlf, blkaddr, rc = 0; 659 struct nix_aq_inst_s inst; 660 struct rvu_block *block; 661 struct admin_queue *aq; 662 struct rvu_pfvf *pfvf; 663 void *ctx, *mask; 664 bool ena; 665 u64 cfg; 666 667 blkaddr = nix_hw->blkaddr; 668 block = &hw->block[blkaddr]; 669 aq = block->aq; 670 if (!aq) { 671 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 672 return NIX_AF_ERR_AQ_ENQUEUE; 673 } 674 675 pfvf = rvu_get_pfvf(rvu, pcifunc); 676 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 677 678 /* Skip NIXLF check for broadcast MCE entry init */ 679 if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) { 680 if (!pfvf->nixlf || nixlf < 0) 681 return NIX_AF_ERR_AF_LF_INVALID; 682 } 683 684 switch (req->ctype) { 685 case NIX_AQ_CTYPE_RQ: 686 /* Check if index exceeds max no of queues */ 687 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 688 rc = NIX_AF_ERR_AQ_ENQUEUE; 689 break; 690 case NIX_AQ_CTYPE_SQ: 691 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 692 rc = NIX_AF_ERR_AQ_ENQUEUE; 693 break; 694 case NIX_AQ_CTYPE_CQ: 695 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 696 rc = NIX_AF_ERR_AQ_ENQUEUE; 697 break; 698 case NIX_AQ_CTYPE_RSS: 699 /* Check if RSS is enabled and qidx is within range */ 700 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 701 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 702 (req->qidx >= (256UL << (cfg & 0xF)))) 703 rc = NIX_AF_ERR_AQ_ENQUEUE; 704 break; 705 case NIX_AQ_CTYPE_MCE: 706 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 707 708 /* Check if index exceeds MCE list length */ 709 if (!nix_hw->mcast.mce_ctx || 710 (req->qidx >= (256UL << (cfg & 0xF)))) 711 rc = NIX_AF_ERR_AQ_ENQUEUE; 712 713 /* Adding multicast lists for requests from PF/VFs is not 714 * yet supported, so ignore this. 715 */ 716 if (rsp) 717 rc = NIX_AF_ERR_AQ_ENQUEUE; 718 break; 719 default: 720 rc = NIX_AF_ERR_AQ_ENQUEUE; 721 } 722 723 if (rc) 724 return rc; 725 726 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 727 if (req->ctype == NIX_AQ_CTYPE_SQ && 728 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 729 (req->op == NIX_AQ_INSTOP_WRITE && 730 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { 731 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 732 pcifunc, req->sq.smq)) 733 return NIX_AF_ERR_AQ_ENQUEUE; 734 } 735 736 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 737 inst.lf = nixlf; 738 inst.cindex = req->qidx; 739 inst.ctype = req->ctype; 740 inst.op = req->op; 741 /* Currently we are not supporting enqueuing multiple instructions, 742 * so always choose first entry in result memory. 743 */ 744 inst.res_addr = (u64)aq->res->iova; 745 746 /* Hardware uses same aq->res->base for updating result of 747 * previous instruction hence wait here till it is done. 748 */ 749 spin_lock(&aq->lock); 750 751 /* Clean result + context memory */ 752 memset(aq->res->base, 0, aq->res->entry_sz); 753 /* Context needs to be written at RES_ADDR + 128 */ 754 ctx = aq->res->base + 128; 755 /* Mask needs to be written at RES_ADDR + 256 */ 756 mask = aq->res->base + 256; 757 758 switch (req->op) { 759 case NIX_AQ_INSTOP_WRITE: 760 if (req->ctype == NIX_AQ_CTYPE_RQ) 761 memcpy(mask, &req->rq_mask, 762 sizeof(struct nix_rq_ctx_s)); 763 else if (req->ctype == NIX_AQ_CTYPE_SQ) 764 memcpy(mask, &req->sq_mask, 765 sizeof(struct nix_sq_ctx_s)); 766 else if (req->ctype == NIX_AQ_CTYPE_CQ) 767 memcpy(mask, &req->cq_mask, 768 sizeof(struct nix_cq_ctx_s)); 769 else if (req->ctype == NIX_AQ_CTYPE_RSS) 770 memcpy(mask, &req->rss_mask, 771 sizeof(struct nix_rsse_s)); 772 else if (req->ctype == NIX_AQ_CTYPE_MCE) 773 memcpy(mask, &req->mce_mask, 774 sizeof(struct nix_rx_mce_s)); 775 fallthrough; 776 case NIX_AQ_INSTOP_INIT: 777 if (req->ctype == NIX_AQ_CTYPE_RQ) 778 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 779 else if (req->ctype == NIX_AQ_CTYPE_SQ) 780 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 781 else if (req->ctype == NIX_AQ_CTYPE_CQ) 782 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 783 else if (req->ctype == NIX_AQ_CTYPE_RSS) 784 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 785 else if (req->ctype == NIX_AQ_CTYPE_MCE) 786 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 787 break; 788 case NIX_AQ_INSTOP_NOP: 789 case NIX_AQ_INSTOP_READ: 790 case NIX_AQ_INSTOP_LOCK: 791 case NIX_AQ_INSTOP_UNLOCK: 792 break; 793 default: 794 rc = NIX_AF_ERR_AQ_ENQUEUE; 795 spin_unlock(&aq->lock); 796 return rc; 797 } 798 799 /* Submit the instruction to AQ */ 800 rc = nix_aq_enqueue_wait(rvu, block, &inst); 801 if (rc) { 802 spin_unlock(&aq->lock); 803 return rc; 804 } 805 806 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 807 if (req->op == NIX_AQ_INSTOP_INIT) { 808 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 809 __set_bit(req->qidx, pfvf->rq_bmap); 810 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 811 __set_bit(req->qidx, pfvf->sq_bmap); 812 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 813 __set_bit(req->qidx, pfvf->cq_bmap); 814 } 815 816 if (req->op == NIX_AQ_INSTOP_WRITE) { 817 if (req->ctype == NIX_AQ_CTYPE_RQ) { 818 ena = (req->rq.ena & req->rq_mask.ena) | 819 (test_bit(req->qidx, pfvf->rq_bmap) & 820 ~req->rq_mask.ena); 821 if (ena) 822 __set_bit(req->qidx, pfvf->rq_bmap); 823 else 824 __clear_bit(req->qidx, pfvf->rq_bmap); 825 } 826 if (req->ctype == NIX_AQ_CTYPE_SQ) { 827 ena = (req->rq.ena & req->sq_mask.ena) | 828 (test_bit(req->qidx, pfvf->sq_bmap) & 829 ~req->sq_mask.ena); 830 if (ena) 831 __set_bit(req->qidx, pfvf->sq_bmap); 832 else 833 __clear_bit(req->qidx, pfvf->sq_bmap); 834 } 835 if (req->ctype == NIX_AQ_CTYPE_CQ) { 836 ena = (req->rq.ena & req->cq_mask.ena) | 837 (test_bit(req->qidx, pfvf->cq_bmap) & 838 ~req->cq_mask.ena); 839 if (ena) 840 __set_bit(req->qidx, pfvf->cq_bmap); 841 else 842 __clear_bit(req->qidx, pfvf->cq_bmap); 843 } 844 } 845 846 if (rsp) { 847 /* Copy read context into mailbox */ 848 if (req->op == NIX_AQ_INSTOP_READ) { 849 if (req->ctype == NIX_AQ_CTYPE_RQ) 850 memcpy(&rsp->rq, ctx, 851 sizeof(struct nix_rq_ctx_s)); 852 else if (req->ctype == NIX_AQ_CTYPE_SQ) 853 memcpy(&rsp->sq, ctx, 854 sizeof(struct nix_sq_ctx_s)); 855 else if (req->ctype == NIX_AQ_CTYPE_CQ) 856 memcpy(&rsp->cq, ctx, 857 sizeof(struct nix_cq_ctx_s)); 858 else if (req->ctype == NIX_AQ_CTYPE_RSS) 859 memcpy(&rsp->rss, ctx, 860 sizeof(struct nix_rsse_s)); 861 else if (req->ctype == NIX_AQ_CTYPE_MCE) 862 memcpy(&rsp->mce, ctx, 863 sizeof(struct nix_rx_mce_s)); 864 } 865 } 866 867 spin_unlock(&aq->lock); 868 return 0; 869 } 870 871 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 872 struct nix_aq_enq_rsp *rsp) 873 { 874 struct nix_hw *nix_hw; 875 int blkaddr; 876 877 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 878 if (blkaddr < 0) 879 return NIX_AF_ERR_AF_LF_INVALID; 880 881 nix_hw = get_nix_hw(rvu->hw, blkaddr); 882 if (!nix_hw) 883 return -EINVAL; 884 885 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 886 } 887 888 static const char *nix_get_ctx_name(int ctype) 889 { 890 switch (ctype) { 891 case NIX_AQ_CTYPE_CQ: 892 return "CQ"; 893 case NIX_AQ_CTYPE_SQ: 894 return "SQ"; 895 case NIX_AQ_CTYPE_RQ: 896 return "RQ"; 897 case NIX_AQ_CTYPE_RSS: 898 return "RSS"; 899 } 900 return ""; 901 } 902 903 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 904 { 905 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 906 struct nix_aq_enq_req aq_req; 907 unsigned long *bmap; 908 int qidx, q_cnt = 0; 909 int err = 0, rc; 910 911 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 912 return NIX_AF_ERR_AQ_ENQUEUE; 913 914 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 915 aq_req.hdr.pcifunc = req->hdr.pcifunc; 916 917 if (req->ctype == NIX_AQ_CTYPE_CQ) { 918 aq_req.cq.ena = 0; 919 aq_req.cq_mask.ena = 1; 920 aq_req.cq.bp_ena = 0; 921 aq_req.cq_mask.bp_ena = 1; 922 q_cnt = pfvf->cq_ctx->qsize; 923 bmap = pfvf->cq_bmap; 924 } 925 if (req->ctype == NIX_AQ_CTYPE_SQ) { 926 aq_req.sq.ena = 0; 927 aq_req.sq_mask.ena = 1; 928 q_cnt = pfvf->sq_ctx->qsize; 929 bmap = pfvf->sq_bmap; 930 } 931 if (req->ctype == NIX_AQ_CTYPE_RQ) { 932 aq_req.rq.ena = 0; 933 aq_req.rq_mask.ena = 1; 934 q_cnt = pfvf->rq_ctx->qsize; 935 bmap = pfvf->rq_bmap; 936 } 937 938 aq_req.ctype = req->ctype; 939 aq_req.op = NIX_AQ_INSTOP_WRITE; 940 941 for (qidx = 0; qidx < q_cnt; qidx++) { 942 if (!test_bit(qidx, bmap)) 943 continue; 944 aq_req.qidx = qidx; 945 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 946 if (rc) { 947 err = rc; 948 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 949 nix_get_ctx_name(req->ctype), qidx); 950 } 951 } 952 953 return err; 954 } 955 956 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 957 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 958 { 959 struct nix_aq_enq_req lock_ctx_req; 960 int err; 961 962 if (req->op != NIX_AQ_INSTOP_INIT) 963 return 0; 964 965 if (req->ctype == NIX_AQ_CTYPE_MCE || 966 req->ctype == NIX_AQ_CTYPE_DYNO) 967 return 0; 968 969 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 970 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 971 lock_ctx_req.ctype = req->ctype; 972 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 973 lock_ctx_req.qidx = req->qidx; 974 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 975 if (err) 976 dev_err(rvu->dev, 977 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 978 req->hdr.pcifunc, 979 nix_get_ctx_name(req->ctype), req->qidx); 980 return err; 981 } 982 983 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 984 struct nix_aq_enq_req *req, 985 struct nix_aq_enq_rsp *rsp) 986 { 987 int err; 988 989 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 990 if (!err) 991 err = nix_lf_hwctx_lockdown(rvu, req); 992 return err; 993 } 994 #else 995 996 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 997 struct nix_aq_enq_req *req, 998 struct nix_aq_enq_rsp *rsp) 999 { 1000 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1001 } 1002 #endif 1003 1004 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1005 struct hwctx_disable_req *req, 1006 struct msg_rsp *rsp) 1007 { 1008 return nix_lf_hwctx_disable(rvu, req); 1009 } 1010 1011 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1012 struct nix_lf_alloc_req *req, 1013 struct nix_lf_alloc_rsp *rsp) 1014 { 1015 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1016 struct rvu_hwinfo *hw = rvu->hw; 1017 u16 pcifunc = req->hdr.pcifunc; 1018 struct rvu_block *block; 1019 struct rvu_pfvf *pfvf; 1020 u64 cfg, ctx_cfg; 1021 int blkaddr; 1022 1023 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1024 return NIX_AF_ERR_PARAM; 1025 1026 if (req->way_mask) 1027 req->way_mask &= 0xFFFF; 1028 1029 pfvf = rvu_get_pfvf(rvu, pcifunc); 1030 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1031 if (!pfvf->nixlf || blkaddr < 0) 1032 return NIX_AF_ERR_AF_LF_INVALID; 1033 1034 block = &hw->block[blkaddr]; 1035 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1036 if (nixlf < 0) 1037 return NIX_AF_ERR_AF_LF_INVALID; 1038 1039 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1040 if (req->npa_func) { 1041 /* If default, use 'this' NIXLF's PFFUNC */ 1042 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1043 req->npa_func = pcifunc; 1044 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1045 return NIX_AF_INVAL_NPA_PF_FUNC; 1046 } 1047 1048 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1049 if (req->sso_func) { 1050 /* If default, use 'this' NIXLF's PFFUNC */ 1051 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1052 req->sso_func = pcifunc; 1053 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1054 return NIX_AF_INVAL_SSO_PF_FUNC; 1055 } 1056 1057 /* If RSS is being enabled, check if requested config is valid. 1058 * RSS table size should be power of two, otherwise 1059 * RSS_GRP::OFFSET + adder might go beyond that group or 1060 * won't be able to use entire table. 1061 */ 1062 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1063 !is_power_of_2(req->rss_sz))) 1064 return NIX_AF_ERR_RSS_SIZE_INVALID; 1065 1066 if (req->rss_sz && 1067 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1068 return NIX_AF_ERR_RSS_GRPS_INVALID; 1069 1070 /* Reset this NIX LF */ 1071 err = rvu_lf_reset(rvu, block, nixlf); 1072 if (err) { 1073 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1074 block->addr - BLKADDR_NIX0, nixlf); 1075 return NIX_AF_ERR_LF_RESET; 1076 } 1077 1078 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1079 1080 /* Alloc NIX RQ HW context memory and config the base */ 1081 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1082 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1083 if (err) 1084 goto free_mem; 1085 1086 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1087 if (!pfvf->rq_bmap) 1088 goto free_mem; 1089 1090 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1091 (u64)pfvf->rq_ctx->iova); 1092 1093 /* Set caching and queue count in HW */ 1094 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1095 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1096 1097 /* Alloc NIX SQ HW context memory and config the base */ 1098 hwctx_size = 1UL << (ctx_cfg & 0xF); 1099 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1100 if (err) 1101 goto free_mem; 1102 1103 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1104 if (!pfvf->sq_bmap) 1105 goto free_mem; 1106 1107 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1108 (u64)pfvf->sq_ctx->iova); 1109 1110 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1111 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1112 1113 /* Alloc NIX CQ HW context memory and config the base */ 1114 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1115 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1116 if (err) 1117 goto free_mem; 1118 1119 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1120 if (!pfvf->cq_bmap) 1121 goto free_mem; 1122 1123 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1124 (u64)pfvf->cq_ctx->iova); 1125 1126 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1127 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1128 1129 /* Initialize receive side scaling (RSS) */ 1130 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1131 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1132 req->rss_grps, hwctx_size, req->way_mask); 1133 if (err) 1134 goto free_mem; 1135 1136 /* Alloc memory for CQINT's HW contexts */ 1137 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1138 qints = (cfg >> 24) & 0xFFF; 1139 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1140 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1141 if (err) 1142 goto free_mem; 1143 1144 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1145 (u64)pfvf->cq_ints_ctx->iova); 1146 1147 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1148 BIT_ULL(36) | req->way_mask << 20); 1149 1150 /* Alloc memory for QINT's HW contexts */ 1151 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1152 qints = (cfg >> 12) & 0xFFF; 1153 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1154 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1155 if (err) 1156 goto free_mem; 1157 1158 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1159 (u64)pfvf->nix_qints_ctx->iova); 1160 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1161 BIT_ULL(36) | req->way_mask << 20); 1162 1163 /* Setup VLANX TPID's. 1164 * Use VLAN1 for 802.1Q 1165 * and VLAN0 for 802.1AD. 1166 */ 1167 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1168 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1169 1170 /* Enable LMTST for this NIX LF */ 1171 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1172 1173 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1174 if (req->npa_func) 1175 cfg = req->npa_func; 1176 if (req->sso_func) 1177 cfg |= (u64)req->sso_func << 16; 1178 1179 cfg |= (u64)req->xqe_sz << 33; 1180 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1181 1182 /* Config Rx pkt length, csum checks and apad enable / disable */ 1183 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1184 1185 /* Configure pkind for TX parse config */ 1186 cfg = NPC_TX_DEF_PKIND; 1187 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1188 1189 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1190 err = nix_interface_init(rvu, pcifunc, intf, nixlf); 1191 if (err) 1192 goto free_mem; 1193 1194 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1195 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1196 1197 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1198 rvu_write64(rvu, blkaddr, 1199 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1200 VTAGSIZE_T4 | VTAG_STRIP); 1201 1202 goto exit; 1203 1204 free_mem: 1205 nix_ctx_free(rvu, pfvf); 1206 rc = -ENOMEM; 1207 1208 exit: 1209 /* Set macaddr of this PF/VF */ 1210 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1211 1212 /* set SQB size info */ 1213 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1214 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1215 rsp->rx_chan_base = pfvf->rx_chan_base; 1216 rsp->tx_chan_base = pfvf->tx_chan_base; 1217 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1218 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1219 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1220 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1221 /* Get HW supported stat count */ 1222 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1223 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1224 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1225 /* Get count of CQ IRQs and error IRQs supported per LF */ 1226 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1227 rsp->qints = ((cfg >> 12) & 0xFFF); 1228 rsp->cints = ((cfg >> 24) & 0xFFF); 1229 rsp->cgx_links = hw->cgx_links; 1230 rsp->lbk_links = hw->lbk_links; 1231 rsp->sdp_links = hw->sdp_links; 1232 1233 return rc; 1234 } 1235 1236 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1237 struct msg_rsp *rsp) 1238 { 1239 struct rvu_hwinfo *hw = rvu->hw; 1240 u16 pcifunc = req->hdr.pcifunc; 1241 struct rvu_block *block; 1242 int blkaddr, nixlf, err; 1243 struct rvu_pfvf *pfvf; 1244 1245 pfvf = rvu_get_pfvf(rvu, pcifunc); 1246 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1247 if (!pfvf->nixlf || blkaddr < 0) 1248 return NIX_AF_ERR_AF_LF_INVALID; 1249 1250 block = &hw->block[blkaddr]; 1251 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1252 if (nixlf < 0) 1253 return NIX_AF_ERR_AF_LF_INVALID; 1254 1255 if (req->flags & NIX_LF_DISABLE_FLOWS) 1256 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1257 else 1258 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1259 1260 /* Free any tx vtag def entries used by this NIX LF */ 1261 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1262 nix_free_tx_vtag_entries(rvu, pcifunc); 1263 1264 nix_interface_deinit(rvu, pcifunc, nixlf); 1265 1266 /* Reset this NIX LF */ 1267 err = rvu_lf_reset(rvu, block, nixlf); 1268 if (err) { 1269 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1270 block->addr - BLKADDR_NIX0, nixlf); 1271 return NIX_AF_ERR_LF_RESET; 1272 } 1273 1274 nix_ctx_free(rvu, pfvf); 1275 1276 return 0; 1277 } 1278 1279 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1280 struct nix_mark_format_cfg *req, 1281 struct nix_mark_format_cfg_rsp *rsp) 1282 { 1283 u16 pcifunc = req->hdr.pcifunc; 1284 struct nix_hw *nix_hw; 1285 struct rvu_pfvf *pfvf; 1286 int blkaddr, rc; 1287 u32 cfg; 1288 1289 pfvf = rvu_get_pfvf(rvu, pcifunc); 1290 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1291 if (!pfvf->nixlf || blkaddr < 0) 1292 return NIX_AF_ERR_AF_LF_INVALID; 1293 1294 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1295 if (!nix_hw) 1296 return -EINVAL; 1297 1298 cfg = (((u32)req->offset & 0x7) << 16) | 1299 (((u32)req->y_mask & 0xF) << 12) | 1300 (((u32)req->y_val & 0xF) << 8) | 1301 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1302 1303 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1304 if (rc < 0) { 1305 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1306 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1307 return NIX_AF_ERR_MARK_CFG_FAIL; 1308 } 1309 1310 rsp->mark_format_idx = rc; 1311 return 0; 1312 } 1313 1314 /* Disable shaping of pkts by a scheduler queue 1315 * at a given scheduler level. 1316 */ 1317 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1318 int lvl, int schq) 1319 { 1320 u64 cir_reg = 0, pir_reg = 0; 1321 u64 cfg; 1322 1323 switch (lvl) { 1324 case NIX_TXSCH_LVL_TL1: 1325 cir_reg = NIX_AF_TL1X_CIR(schq); 1326 pir_reg = 0; /* PIR not available at TL1 */ 1327 break; 1328 case NIX_TXSCH_LVL_TL2: 1329 cir_reg = NIX_AF_TL2X_CIR(schq); 1330 pir_reg = NIX_AF_TL2X_PIR(schq); 1331 break; 1332 case NIX_TXSCH_LVL_TL3: 1333 cir_reg = NIX_AF_TL3X_CIR(schq); 1334 pir_reg = NIX_AF_TL3X_PIR(schq); 1335 break; 1336 case NIX_TXSCH_LVL_TL4: 1337 cir_reg = NIX_AF_TL4X_CIR(schq); 1338 pir_reg = NIX_AF_TL4X_PIR(schq); 1339 break; 1340 } 1341 1342 if (!cir_reg) 1343 return; 1344 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1345 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1346 1347 if (!pir_reg) 1348 return; 1349 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1350 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1351 } 1352 1353 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1354 int lvl, int schq) 1355 { 1356 struct rvu_hwinfo *hw = rvu->hw; 1357 int link; 1358 1359 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1360 return; 1361 1362 /* Reset TL4's SDP link config */ 1363 if (lvl == NIX_TXSCH_LVL_TL4) 1364 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1365 1366 if (lvl != NIX_TXSCH_LVL_TL2) 1367 return; 1368 1369 /* Reset TL2's CGX or LBK link config */ 1370 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1371 rvu_write64(rvu, blkaddr, 1372 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1373 } 1374 1375 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1376 { 1377 struct rvu_hwinfo *hw = rvu->hw; 1378 int pf = rvu_get_pf(pcifunc); 1379 u8 cgx_id = 0, lmac_id = 0; 1380 1381 if (is_afvf(pcifunc)) {/* LBK links */ 1382 return hw->cgx_links; 1383 } else if (is_pf_cgxmapped(rvu, pf)) { 1384 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1385 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1386 } 1387 1388 /* SDP link */ 1389 return hw->cgx_links + hw->lbk_links; 1390 } 1391 1392 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 1393 int link, int *start, int *end) 1394 { 1395 struct rvu_hwinfo *hw = rvu->hw; 1396 int pf = rvu_get_pf(pcifunc); 1397 1398 if (is_afvf(pcifunc)) { /* LBK links */ 1399 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1400 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 1401 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 1402 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1403 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 1404 } else { /* SDP link */ 1405 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 1406 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 1407 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 1408 } 1409 } 1410 1411 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 1412 struct nix_hw *nix_hw, 1413 struct nix_txsch_alloc_req *req) 1414 { 1415 struct rvu_hwinfo *hw = rvu->hw; 1416 int schq, req_schq, free_cnt; 1417 struct nix_txsch *txsch; 1418 int link, start, end; 1419 1420 txsch = &nix_hw->txsch[lvl]; 1421 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 1422 1423 if (!req_schq) 1424 return 0; 1425 1426 link = nix_get_tx_link(rvu, pcifunc); 1427 1428 /* For traffic aggregating scheduler level, one queue is enough */ 1429 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1430 if (req_schq != 1) 1431 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1432 return 0; 1433 } 1434 1435 /* Get free SCHQ count and check if request can be accomodated */ 1436 if (hw->cap.nix_fixed_txschq_mapping) { 1437 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1438 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 1439 if (end <= txsch->schq.max && schq < end && 1440 !test_bit(schq, txsch->schq.bmap)) 1441 free_cnt = 1; 1442 else 1443 free_cnt = 0; 1444 } else { 1445 free_cnt = rvu_rsrc_free_count(&txsch->schq); 1446 } 1447 1448 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) 1449 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1450 1451 /* If contiguous queues are needed, check for availability */ 1452 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 1453 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 1454 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1455 1456 return 0; 1457 } 1458 1459 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 1460 struct nix_txsch_alloc_rsp *rsp, 1461 int lvl, int start, int end) 1462 { 1463 struct rvu_hwinfo *hw = rvu->hw; 1464 u16 pcifunc = rsp->hdr.pcifunc; 1465 int idx, schq; 1466 1467 /* For traffic aggregating levels, queue alloc is based 1468 * on transmit link to which PF_FUNC is mapped to. 1469 */ 1470 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1471 /* A single TL queue is allocated */ 1472 if (rsp->schq_contig[lvl]) { 1473 rsp->schq_contig[lvl] = 1; 1474 rsp->schq_contig_list[lvl][0] = start; 1475 } 1476 1477 /* Both contig and non-contig reqs doesn't make sense here */ 1478 if (rsp->schq_contig[lvl]) 1479 rsp->schq[lvl] = 0; 1480 1481 if (rsp->schq[lvl]) { 1482 rsp->schq[lvl] = 1; 1483 rsp->schq_list[lvl][0] = start; 1484 } 1485 return; 1486 } 1487 1488 /* Adjust the queue request count if HW supports 1489 * only one queue per level configuration. 1490 */ 1491 if (hw->cap.nix_fixed_txschq_mapping) { 1492 idx = pcifunc & RVU_PFVF_FUNC_MASK; 1493 schq = start + idx; 1494 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 1495 rsp->schq_contig[lvl] = 0; 1496 rsp->schq[lvl] = 0; 1497 return; 1498 } 1499 1500 if (rsp->schq_contig[lvl]) { 1501 rsp->schq_contig[lvl] = 1; 1502 set_bit(schq, txsch->schq.bmap); 1503 rsp->schq_contig_list[lvl][0] = schq; 1504 rsp->schq[lvl] = 0; 1505 } else if (rsp->schq[lvl]) { 1506 rsp->schq[lvl] = 1; 1507 set_bit(schq, txsch->schq.bmap); 1508 rsp->schq_list[lvl][0] = schq; 1509 } 1510 return; 1511 } 1512 1513 /* Allocate contiguous queue indices requesty first */ 1514 if (rsp->schq_contig[lvl]) { 1515 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 1516 txsch->schq.max, start, 1517 rsp->schq_contig[lvl], 0); 1518 if (schq >= end) 1519 rsp->schq_contig[lvl] = 0; 1520 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 1521 set_bit(schq, txsch->schq.bmap); 1522 rsp->schq_contig_list[lvl][idx] = schq; 1523 schq++; 1524 } 1525 } 1526 1527 /* Allocate non-contiguous queue indices */ 1528 if (rsp->schq[lvl]) { 1529 idx = 0; 1530 for (schq = start; schq < end; schq++) { 1531 if (!test_bit(schq, txsch->schq.bmap)) { 1532 set_bit(schq, txsch->schq.bmap); 1533 rsp->schq_list[lvl][idx++] = schq; 1534 } 1535 if (idx == rsp->schq[lvl]) 1536 break; 1537 } 1538 /* Update how many were allocated */ 1539 rsp->schq[lvl] = idx; 1540 } 1541 } 1542 1543 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 1544 struct nix_txsch_alloc_req *req, 1545 struct nix_txsch_alloc_rsp *rsp) 1546 { 1547 struct rvu_hwinfo *hw = rvu->hw; 1548 u16 pcifunc = req->hdr.pcifunc; 1549 int link, blkaddr, rc = 0; 1550 int lvl, idx, start, end; 1551 struct nix_txsch *txsch; 1552 struct rvu_pfvf *pfvf; 1553 struct nix_hw *nix_hw; 1554 u32 *pfvf_map; 1555 u16 schq; 1556 1557 pfvf = rvu_get_pfvf(rvu, pcifunc); 1558 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1559 if (!pfvf->nixlf || blkaddr < 0) 1560 return NIX_AF_ERR_AF_LF_INVALID; 1561 1562 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1563 if (!nix_hw) 1564 return -EINVAL; 1565 1566 mutex_lock(&rvu->rsrc_lock); 1567 1568 /* Check if request is valid as per HW capabilities 1569 * and can be accomodated. 1570 */ 1571 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1572 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 1573 if (rc) 1574 goto err; 1575 } 1576 1577 /* Allocate requested Tx scheduler queues */ 1578 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1579 txsch = &nix_hw->txsch[lvl]; 1580 pfvf_map = txsch->pfvf_map; 1581 1582 if (!req->schq[lvl] && !req->schq_contig[lvl]) 1583 continue; 1584 1585 rsp->schq[lvl] = req->schq[lvl]; 1586 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 1587 1588 link = nix_get_tx_link(rvu, pcifunc); 1589 1590 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1591 start = link; 1592 end = link; 1593 } else if (hw->cap.nix_fixed_txschq_mapping) { 1594 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1595 } else { 1596 start = 0; 1597 end = txsch->schq.max; 1598 } 1599 1600 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 1601 1602 /* Reset queue config */ 1603 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 1604 schq = rsp->schq_contig_list[lvl][idx]; 1605 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1606 NIX_TXSCHQ_CFG_DONE)) 1607 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1608 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1609 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); 1610 } 1611 1612 for (idx = 0; idx < req->schq[lvl]; idx++) { 1613 schq = rsp->schq_list[lvl][idx]; 1614 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1615 NIX_TXSCHQ_CFG_DONE)) 1616 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1617 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1618 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); 1619 } 1620 } 1621 1622 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 1623 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 1624 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 1625 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1626 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1627 goto exit; 1628 err: 1629 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 1630 exit: 1631 mutex_unlock(&rvu->rsrc_lock); 1632 return rc; 1633 } 1634 1635 static void nix_smq_flush(struct rvu *rvu, int blkaddr, 1636 int smq, u16 pcifunc, int nixlf) 1637 { 1638 int pf = rvu_get_pf(pcifunc); 1639 u8 cgx_id = 0, lmac_id = 0; 1640 int err, restore_tx_en = 0; 1641 u64 cfg; 1642 1643 /* enable cgx tx if disabled */ 1644 if (is_pf_cgxmapped(rvu, pf)) { 1645 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1646 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), 1647 lmac_id, true); 1648 } 1649 1650 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 1651 /* Do SMQ flush and set enqueue xoff */ 1652 cfg |= BIT_ULL(50) | BIT_ULL(49); 1653 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 1654 1655 /* Disable backpressure from physical link, 1656 * otherwise SMQ flush may stall. 1657 */ 1658 rvu_cgx_enadis_rx_bp(rvu, pf, false); 1659 1660 /* Wait for flush to complete */ 1661 err = rvu_poll_reg(rvu, blkaddr, 1662 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 1663 if (err) 1664 dev_err(rvu->dev, 1665 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq); 1666 1667 rvu_cgx_enadis_rx_bp(rvu, pf, true); 1668 /* restore cgx tx state */ 1669 if (restore_tx_en) 1670 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 1671 } 1672 1673 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 1674 { 1675 int blkaddr, nixlf, lvl, schq, err; 1676 struct rvu_hwinfo *hw = rvu->hw; 1677 struct nix_txsch *txsch; 1678 struct nix_hw *nix_hw; 1679 1680 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1681 if (blkaddr < 0) 1682 return NIX_AF_ERR_AF_LF_INVALID; 1683 1684 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1685 if (!nix_hw) 1686 return -EINVAL; 1687 1688 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1689 if (nixlf < 0) 1690 return NIX_AF_ERR_AF_LF_INVALID; 1691 1692 /* Disable TL2/3 queue links before SMQ flush*/ 1693 mutex_lock(&rvu->rsrc_lock); 1694 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1695 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) 1696 continue; 1697 1698 txsch = &nix_hw->txsch[lvl]; 1699 for (schq = 0; schq < txsch->schq.max; schq++) { 1700 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1701 continue; 1702 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1703 } 1704 } 1705 1706 /* Flush SMQs */ 1707 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 1708 for (schq = 0; schq < txsch->schq.max; schq++) { 1709 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1710 continue; 1711 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1712 } 1713 1714 /* Now free scheduler queues to free pool */ 1715 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1716 /* TLs above aggregation level are shared across all PF 1717 * and it's VFs, hence skip freeing them. 1718 */ 1719 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1720 continue; 1721 1722 txsch = &nix_hw->txsch[lvl]; 1723 for (schq = 0; schq < txsch->schq.max; schq++) { 1724 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1725 continue; 1726 rvu_free_rsrc(&txsch->schq, schq); 1727 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 1728 } 1729 } 1730 mutex_unlock(&rvu->rsrc_lock); 1731 1732 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ 1733 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); 1734 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); 1735 if (err) 1736 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 1737 1738 return 0; 1739 } 1740 1741 static int nix_txschq_free_one(struct rvu *rvu, 1742 struct nix_txsch_free_req *req) 1743 { 1744 struct rvu_hwinfo *hw = rvu->hw; 1745 u16 pcifunc = req->hdr.pcifunc; 1746 int lvl, schq, nixlf, blkaddr; 1747 struct nix_txsch *txsch; 1748 struct nix_hw *nix_hw; 1749 u32 *pfvf_map; 1750 1751 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1752 if (blkaddr < 0) 1753 return NIX_AF_ERR_AF_LF_INVALID; 1754 1755 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1756 if (!nix_hw) 1757 return -EINVAL; 1758 1759 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1760 if (nixlf < 0) 1761 return NIX_AF_ERR_AF_LF_INVALID; 1762 1763 lvl = req->schq_lvl; 1764 schq = req->schq; 1765 txsch = &nix_hw->txsch[lvl]; 1766 1767 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 1768 return 0; 1769 1770 pfvf_map = txsch->pfvf_map; 1771 mutex_lock(&rvu->rsrc_lock); 1772 1773 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 1774 mutex_unlock(&rvu->rsrc_lock); 1775 goto err; 1776 } 1777 1778 /* Flush if it is a SMQ. Onus of disabling 1779 * TL2/3 queue links before SMQ flush is on user 1780 */ 1781 if (lvl == NIX_TXSCH_LVL_SMQ) 1782 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1783 1784 /* Free the resource */ 1785 rvu_free_rsrc(&txsch->schq, schq); 1786 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 1787 mutex_unlock(&rvu->rsrc_lock); 1788 return 0; 1789 err: 1790 return NIX_AF_ERR_TLX_INVALID; 1791 } 1792 1793 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 1794 struct nix_txsch_free_req *req, 1795 struct msg_rsp *rsp) 1796 { 1797 if (req->flags & TXSCHQ_FREE_ALL) 1798 return nix_txschq_free(rvu, req->hdr.pcifunc); 1799 else 1800 return nix_txschq_free_one(rvu, req); 1801 } 1802 1803 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 1804 int lvl, u64 reg, u64 regval) 1805 { 1806 u64 regbase = reg & 0xFFFF; 1807 u16 schq, parent; 1808 1809 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 1810 return false; 1811 1812 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1813 /* Check if this schq belongs to this PF/VF or not */ 1814 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 1815 return false; 1816 1817 parent = (regval >> 16) & 0x1FF; 1818 /* Validate MDQ's TL4 parent */ 1819 if (regbase == NIX_AF_MDQX_PARENT(0) && 1820 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 1821 return false; 1822 1823 /* Validate TL4's TL3 parent */ 1824 if (regbase == NIX_AF_TL4X_PARENT(0) && 1825 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 1826 return false; 1827 1828 /* Validate TL3's TL2 parent */ 1829 if (regbase == NIX_AF_TL3X_PARENT(0) && 1830 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 1831 return false; 1832 1833 /* Validate TL2's TL1 parent */ 1834 if (regbase == NIX_AF_TL2X_PARENT(0) && 1835 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 1836 return false; 1837 1838 return true; 1839 } 1840 1841 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 1842 { 1843 u64 regbase; 1844 1845 if (hw->cap.nix_shaping) 1846 return true; 1847 1848 /* If shaping and coloring is not supported, then 1849 * *_CIR and *_PIR registers should not be configured. 1850 */ 1851 regbase = reg & 0xFFFF; 1852 1853 switch (lvl) { 1854 case NIX_TXSCH_LVL_TL1: 1855 if (regbase == NIX_AF_TL1X_CIR(0)) 1856 return false; 1857 break; 1858 case NIX_TXSCH_LVL_TL2: 1859 if (regbase == NIX_AF_TL2X_CIR(0) || 1860 regbase == NIX_AF_TL2X_PIR(0)) 1861 return false; 1862 break; 1863 case NIX_TXSCH_LVL_TL3: 1864 if (regbase == NIX_AF_TL3X_CIR(0) || 1865 regbase == NIX_AF_TL3X_PIR(0)) 1866 return false; 1867 break; 1868 case NIX_TXSCH_LVL_TL4: 1869 if (regbase == NIX_AF_TL4X_CIR(0) || 1870 regbase == NIX_AF_TL4X_PIR(0)) 1871 return false; 1872 break; 1873 } 1874 return true; 1875 } 1876 1877 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 1878 u16 pcifunc, int blkaddr) 1879 { 1880 u32 *pfvf_map; 1881 int schq; 1882 1883 schq = nix_get_tx_link(rvu, pcifunc); 1884 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 1885 /* Skip if PF has already done the config */ 1886 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 1887 return; 1888 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 1889 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 1890 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 1891 TXSCH_TL1_DFLT_RR_QTM); 1892 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 1893 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 1894 } 1895 1896 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 1897 struct nix_txschq_config *req, 1898 struct msg_rsp *rsp) 1899 { 1900 struct rvu_hwinfo *hw = rvu->hw; 1901 u16 pcifunc = req->hdr.pcifunc; 1902 u64 reg, regval, schq_regbase; 1903 struct nix_txsch *txsch; 1904 struct nix_hw *nix_hw; 1905 int blkaddr, idx, err; 1906 int nixlf, schq; 1907 u32 *pfvf_map; 1908 1909 if (req->lvl >= NIX_TXSCH_LVL_CNT || 1910 req->num_regs > MAX_REGS_PER_MBOX_MSG) 1911 return NIX_AF_INVAL_TXSCHQ_CFG; 1912 1913 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 1914 if (err) 1915 return err; 1916 1917 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1918 if (!nix_hw) 1919 return -EINVAL; 1920 1921 txsch = &nix_hw->txsch[req->lvl]; 1922 pfvf_map = txsch->pfvf_map; 1923 1924 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 1925 pcifunc & RVU_PFVF_FUNC_MASK) { 1926 mutex_lock(&rvu->rsrc_lock); 1927 if (req->lvl == NIX_TXSCH_LVL_TL1) 1928 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 1929 mutex_unlock(&rvu->rsrc_lock); 1930 return 0; 1931 } 1932 1933 for (idx = 0; idx < req->num_regs; idx++) { 1934 reg = req->reg[idx]; 1935 regval = req->regval[idx]; 1936 schq_regbase = reg & 0xFFFF; 1937 1938 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 1939 txsch->lvl, reg, regval)) 1940 return NIX_AF_INVAL_TXSCHQ_CFG; 1941 1942 /* Check if shaping and coloring is supported */ 1943 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 1944 continue; 1945 1946 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 1947 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 1948 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 1949 pcifunc, 0); 1950 regval &= ~(0x7FULL << 24); 1951 regval |= ((u64)nixlf << 24); 1952 } 1953 1954 /* Clear 'BP_ENA' config, if it's not allowed */ 1955 if (!hw->cap.nix_tx_link_bp) { 1956 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 1957 (schq_regbase & 0xFF00) == 1958 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 1959 regval &= ~BIT_ULL(13); 1960 } 1961 1962 /* Mark config as done for TL1 by PF */ 1963 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 1964 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 1965 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1966 mutex_lock(&rvu->rsrc_lock); 1967 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 1968 NIX_TXSCHQ_CFG_DONE); 1969 mutex_unlock(&rvu->rsrc_lock); 1970 } 1971 1972 /* SMQ flush is special hence split register writes such 1973 * that flush first and write rest of the bits later. 1974 */ 1975 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 1976 (regval & BIT_ULL(49))) { 1977 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1978 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1979 regval &= ~BIT_ULL(49); 1980 } 1981 rvu_write64(rvu, blkaddr, reg, regval); 1982 } 1983 1984 return 0; 1985 } 1986 1987 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 1988 struct nix_vtag_config *req) 1989 { 1990 u64 regval = req->vtag_size; 1991 1992 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 1993 req->vtag_size > VTAGSIZE_T8) 1994 return -EINVAL; 1995 1996 /* RX VTAG Type 7 reserved for vf vlan */ 1997 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 1998 return NIX_AF_ERR_RX_VTAG_INUSE; 1999 2000 if (req->rx.capture_vtag) 2001 regval |= BIT_ULL(5); 2002 if (req->rx.strip_vtag) 2003 regval |= BIT_ULL(4); 2004 2005 rvu_write64(rvu, blkaddr, 2006 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2007 return 0; 2008 } 2009 2010 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2011 u16 pcifunc, int index) 2012 { 2013 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2014 struct nix_txvlan *vlan = &nix_hw->txvlan; 2015 2016 if (vlan->entry2pfvf_map[index] != pcifunc) 2017 return NIX_AF_ERR_PARAM; 2018 2019 rvu_write64(rvu, blkaddr, 2020 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2021 rvu_write64(rvu, blkaddr, 2022 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2023 2024 vlan->entry2pfvf_map[index] = 0; 2025 rvu_free_rsrc(&vlan->rsrc, index); 2026 2027 return 0; 2028 } 2029 2030 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2031 { 2032 struct nix_txvlan *vlan; 2033 struct nix_hw *nix_hw; 2034 int index, blkaddr; 2035 2036 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2037 if (blkaddr < 0) 2038 return; 2039 2040 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2041 vlan = &nix_hw->txvlan; 2042 2043 mutex_lock(&vlan->rsrc_lock); 2044 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2045 for (index = 0; index < vlan->rsrc.max; index++) { 2046 if (vlan->entry2pfvf_map[index] == pcifunc) 2047 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2048 } 2049 mutex_unlock(&vlan->rsrc_lock); 2050 } 2051 2052 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2053 u64 vtag, u8 size) 2054 { 2055 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2056 struct nix_txvlan *vlan = &nix_hw->txvlan; 2057 u64 regval; 2058 int index; 2059 2060 mutex_lock(&vlan->rsrc_lock); 2061 2062 index = rvu_alloc_rsrc(&vlan->rsrc); 2063 if (index < 0) { 2064 mutex_unlock(&vlan->rsrc_lock); 2065 return index; 2066 } 2067 2068 mutex_unlock(&vlan->rsrc_lock); 2069 2070 regval = size ? vtag : vtag << 32; 2071 2072 rvu_write64(rvu, blkaddr, 2073 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 2074 rvu_write64(rvu, blkaddr, 2075 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 2076 2077 return index; 2078 } 2079 2080 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 2081 struct nix_vtag_config *req) 2082 { 2083 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2084 struct nix_txvlan *vlan = &nix_hw->txvlan; 2085 u16 pcifunc = req->hdr.pcifunc; 2086 int idx0 = req->tx.vtag0_idx; 2087 int idx1 = req->tx.vtag1_idx; 2088 int err = 0; 2089 2090 if (req->tx.free_vtag0 && req->tx.free_vtag1) 2091 if (vlan->entry2pfvf_map[idx0] != pcifunc || 2092 vlan->entry2pfvf_map[idx1] != pcifunc) 2093 return NIX_AF_ERR_PARAM; 2094 2095 mutex_lock(&vlan->rsrc_lock); 2096 2097 if (req->tx.free_vtag0) { 2098 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 2099 if (err) 2100 goto exit; 2101 } 2102 2103 if (req->tx.free_vtag1) 2104 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 2105 2106 exit: 2107 mutex_unlock(&vlan->rsrc_lock); 2108 return err; 2109 } 2110 2111 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 2112 struct nix_vtag_config *req, 2113 struct nix_vtag_config_rsp *rsp) 2114 { 2115 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2116 struct nix_txvlan *vlan = &nix_hw->txvlan; 2117 u16 pcifunc = req->hdr.pcifunc; 2118 2119 if (req->tx.cfg_vtag0) { 2120 rsp->vtag0_idx = 2121 nix_tx_vtag_alloc(rvu, blkaddr, 2122 req->tx.vtag0, req->vtag_size); 2123 2124 if (rsp->vtag0_idx < 0) 2125 return NIX_AF_ERR_TX_VTAG_NOSPC; 2126 2127 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 2128 } 2129 2130 if (req->tx.cfg_vtag1) { 2131 rsp->vtag1_idx = 2132 nix_tx_vtag_alloc(rvu, blkaddr, 2133 req->tx.vtag1, req->vtag_size); 2134 2135 if (rsp->vtag1_idx < 0) 2136 goto err_free; 2137 2138 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 2139 } 2140 2141 return 0; 2142 2143 err_free: 2144 if (req->tx.cfg_vtag0) 2145 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 2146 2147 return NIX_AF_ERR_TX_VTAG_NOSPC; 2148 } 2149 2150 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 2151 struct nix_vtag_config *req, 2152 struct nix_vtag_config_rsp *rsp) 2153 { 2154 u16 pcifunc = req->hdr.pcifunc; 2155 int blkaddr, nixlf, err; 2156 2157 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2158 if (err) 2159 return err; 2160 2161 if (req->cfg_type) { 2162 /* rx vtag configuration */ 2163 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 2164 if (err) 2165 return NIX_AF_ERR_PARAM; 2166 } else { 2167 /* tx vtag configuration */ 2168 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 2169 (req->tx.free_vtag0 || req->tx.free_vtag1)) 2170 return NIX_AF_ERR_PARAM; 2171 2172 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 2173 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 2174 2175 if (req->tx.free_vtag0 || req->tx.free_vtag1) 2176 return nix_tx_vtag_decfg(rvu, blkaddr, req); 2177 } 2178 2179 return 0; 2180 } 2181 2182 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 2183 int mce, u8 op, u16 pcifunc, int next, bool eol) 2184 { 2185 struct nix_aq_enq_req aq_req; 2186 int err; 2187 2188 aq_req.hdr.pcifunc = 0; 2189 aq_req.ctype = NIX_AQ_CTYPE_MCE; 2190 aq_req.op = op; 2191 aq_req.qidx = mce; 2192 2193 /* Forward bcast pkts to RQ0, RSS not needed */ 2194 aq_req.mce.op = 0; 2195 aq_req.mce.index = 0; 2196 aq_req.mce.eol = eol; 2197 aq_req.mce.pf_func = pcifunc; 2198 aq_req.mce.next = next; 2199 2200 /* All fields valid */ 2201 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 2202 2203 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 2204 if (err) { 2205 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 2206 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 2207 return err; 2208 } 2209 return 0; 2210 } 2211 2212 static int nix_update_mce_list(struct nix_mce_list *mce_list, 2213 u16 pcifunc, bool add) 2214 { 2215 struct mce *mce, *tail = NULL; 2216 bool delete = false; 2217 2218 /* Scan through the current list */ 2219 hlist_for_each_entry(mce, &mce_list->head, node) { 2220 /* If already exists, then delete */ 2221 if (mce->pcifunc == pcifunc && !add) { 2222 delete = true; 2223 break; 2224 } 2225 tail = mce; 2226 } 2227 2228 if (delete) { 2229 hlist_del(&mce->node); 2230 kfree(mce); 2231 mce_list->count--; 2232 return 0; 2233 } 2234 2235 if (!add) 2236 return 0; 2237 2238 /* Add a new one to the list, at the tail */ 2239 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 2240 if (!mce) 2241 return -ENOMEM; 2242 mce->pcifunc = pcifunc; 2243 if (!tail) 2244 hlist_add_head(&mce->node, &mce_list->head); 2245 else 2246 hlist_add_behind(&mce->node, &tail->node); 2247 mce_list->count++; 2248 return 0; 2249 } 2250 2251 int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) 2252 { 2253 int err = 0, idx, next_idx, last_idx; 2254 struct nix_mce_list *mce_list; 2255 struct nix_mcast *mcast; 2256 struct nix_hw *nix_hw; 2257 struct rvu_pfvf *pfvf; 2258 struct mce *mce; 2259 int blkaddr; 2260 2261 /* Broadcast pkt replication is not needed for AF's VFs, hence skip */ 2262 if (is_afvf(pcifunc)) 2263 return 0; 2264 2265 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2266 if (blkaddr < 0) 2267 return 0; 2268 2269 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2270 if (!nix_hw) 2271 return 0; 2272 2273 mcast = &nix_hw->mcast; 2274 2275 /* Get this PF/VF func's MCE index */ 2276 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 2277 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 2278 2279 mce_list = &pfvf->bcast_mce_list; 2280 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) { 2281 dev_err(rvu->dev, 2282 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 2283 __func__, idx, mce_list->max, 2284 pcifunc >> RVU_PFVF_PF_SHIFT); 2285 return -EINVAL; 2286 } 2287 2288 mutex_lock(&mcast->mce_lock); 2289 2290 err = nix_update_mce_list(mce_list, pcifunc, add); 2291 if (err) 2292 goto end; 2293 2294 /* Disable MCAM entry in NPC */ 2295 if (!mce_list->count) { 2296 rvu_npc_enable_bcast_entry(rvu, pcifunc, false); 2297 goto end; 2298 } 2299 2300 /* Dump the updated list to HW */ 2301 idx = pfvf->bcast_mce_idx; 2302 last_idx = idx + mce_list->count - 1; 2303 hlist_for_each_entry(mce, &mce_list->head, node) { 2304 if (idx > last_idx) 2305 break; 2306 2307 next_idx = idx + 1; 2308 /* EOL should be set in last MCE */ 2309 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 2310 mce->pcifunc, next_idx, 2311 (next_idx > last_idx) ? true : false); 2312 if (err) 2313 goto end; 2314 idx++; 2315 } 2316 2317 end: 2318 mutex_unlock(&mcast->mce_lock); 2319 return err; 2320 } 2321 2322 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) 2323 { 2324 struct nix_mcast *mcast = &nix_hw->mcast; 2325 int err, pf, numvfs, idx; 2326 struct rvu_pfvf *pfvf; 2327 u16 pcifunc; 2328 u64 cfg; 2329 2330 /* Skip PF0 (i.e AF) */ 2331 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 2332 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2333 /* If PF is not enabled, nothing to do */ 2334 if (!((cfg >> 20) & 0x01)) 2335 continue; 2336 /* Get numVFs attached to this PF */ 2337 numvfs = (cfg >> 12) & 0xFF; 2338 2339 pfvf = &rvu->pf[pf]; 2340 2341 /* This NIX0/1 block mapped to PF ? */ 2342 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 2343 continue; 2344 2345 /* Save the start MCE */ 2346 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2347 2348 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 2349 2350 for (idx = 0; idx < (numvfs + 1); idx++) { 2351 /* idx-0 is for PF, followed by VFs */ 2352 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 2353 pcifunc |= idx; 2354 /* Add dummy entries now, so that we don't have to check 2355 * for whether AQ_OP should be INIT/WRITE later on. 2356 * Will be updated when a NIXLF is attached/detached to 2357 * these PF/VFs. 2358 */ 2359 err = nix_blk_setup_mce(rvu, nix_hw, 2360 pfvf->bcast_mce_idx + idx, 2361 NIX_AQ_INSTOP_INIT, 2362 pcifunc, 0, true); 2363 if (err) 2364 return err; 2365 } 2366 } 2367 return 0; 2368 } 2369 2370 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2371 { 2372 struct nix_mcast *mcast = &nix_hw->mcast; 2373 struct rvu_hwinfo *hw = rvu->hw; 2374 int err, size; 2375 2376 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 2377 size = (1ULL << size); 2378 2379 /* Alloc memory for multicast/mirror replication entries */ 2380 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 2381 (256UL << MC_TBL_SIZE), size); 2382 if (err) 2383 return -ENOMEM; 2384 2385 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 2386 (u64)mcast->mce_ctx->iova); 2387 2388 /* Set max list length equal to max no of VFs per PF + PF itself */ 2389 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 2390 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 2391 2392 /* Alloc memory for multicast replication buffers */ 2393 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 2394 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 2395 (8UL << MC_BUF_CNT), size); 2396 if (err) 2397 return -ENOMEM; 2398 2399 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 2400 (u64)mcast->mcast_buf->iova); 2401 2402 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 2403 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 2404 2405 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 2406 BIT_ULL(63) | (mcast->replay_pkind << 24) | 2407 BIT_ULL(20) | MC_BUF_CNT); 2408 2409 mutex_init(&mcast->mce_lock); 2410 2411 return nix_setup_bcast_tables(rvu, nix_hw); 2412 } 2413 2414 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 2415 { 2416 struct nix_txvlan *vlan = &nix_hw->txvlan; 2417 int err; 2418 2419 /* Allocate resource bimap for tx vtag def registers*/ 2420 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 2421 err = rvu_alloc_bitmap(&vlan->rsrc); 2422 if (err) 2423 return -ENOMEM; 2424 2425 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 2426 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 2427 sizeof(u16), GFP_KERNEL); 2428 if (!vlan->entry2pfvf_map) 2429 goto free_mem; 2430 2431 mutex_init(&vlan->rsrc_lock); 2432 return 0; 2433 2434 free_mem: 2435 kfree(vlan->rsrc.bmap); 2436 return -ENOMEM; 2437 } 2438 2439 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2440 { 2441 struct nix_txsch *txsch; 2442 int err, lvl, schq; 2443 u64 cfg, reg; 2444 2445 /* Get scheduler queue count of each type and alloc 2446 * bitmap for each for alloc/free/attach operations. 2447 */ 2448 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2449 txsch = &nix_hw->txsch[lvl]; 2450 txsch->lvl = lvl; 2451 switch (lvl) { 2452 case NIX_TXSCH_LVL_SMQ: 2453 reg = NIX_AF_MDQ_CONST; 2454 break; 2455 case NIX_TXSCH_LVL_TL4: 2456 reg = NIX_AF_TL4_CONST; 2457 break; 2458 case NIX_TXSCH_LVL_TL3: 2459 reg = NIX_AF_TL3_CONST; 2460 break; 2461 case NIX_TXSCH_LVL_TL2: 2462 reg = NIX_AF_TL2_CONST; 2463 break; 2464 case NIX_TXSCH_LVL_TL1: 2465 reg = NIX_AF_TL1_CONST; 2466 break; 2467 } 2468 cfg = rvu_read64(rvu, blkaddr, reg); 2469 txsch->schq.max = cfg & 0xFFFF; 2470 err = rvu_alloc_bitmap(&txsch->schq); 2471 if (err) 2472 return err; 2473 2474 /* Allocate memory for scheduler queues to 2475 * PF/VF pcifunc mapping info. 2476 */ 2477 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 2478 sizeof(u32), GFP_KERNEL); 2479 if (!txsch->pfvf_map) 2480 return -ENOMEM; 2481 for (schq = 0; schq < txsch->schq.max; schq++) 2482 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2483 } 2484 return 0; 2485 } 2486 2487 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 2488 int blkaddr, u32 cfg) 2489 { 2490 int fmt_idx; 2491 2492 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 2493 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 2494 return fmt_idx; 2495 } 2496 if (fmt_idx >= nix_hw->mark_format.total) 2497 return -ERANGE; 2498 2499 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 2500 nix_hw->mark_format.cfg[fmt_idx] = cfg; 2501 nix_hw->mark_format.in_use++; 2502 return fmt_idx; 2503 } 2504 2505 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 2506 int blkaddr) 2507 { 2508 u64 cfgs[] = { 2509 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 2510 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 2511 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 2512 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 2513 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 2514 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 2515 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 2516 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 2517 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 2518 }; 2519 int i, rc; 2520 u64 total; 2521 2522 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 2523 nix_hw->mark_format.total = (u8)total; 2524 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 2525 GFP_KERNEL); 2526 if (!nix_hw->mark_format.cfg) 2527 return -ENOMEM; 2528 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 2529 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 2530 if (rc < 0) 2531 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 2532 i, rc); 2533 } 2534 2535 return 0; 2536 } 2537 2538 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 2539 struct msg_rsp *rsp) 2540 { 2541 u16 pcifunc = req->hdr.pcifunc; 2542 int i, nixlf, blkaddr, err; 2543 u64 stats; 2544 2545 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2546 if (err) 2547 return err; 2548 2549 /* Get stats count supported by HW */ 2550 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 2551 2552 /* Reset tx stats */ 2553 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 2554 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 2555 2556 /* Reset rx stats */ 2557 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 2558 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 2559 2560 return 0; 2561 } 2562 2563 /* Returns the ALG index to be set into NPC_RX_ACTION */ 2564 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 2565 { 2566 int i; 2567 2568 /* Scan over exiting algo entries to find a match */ 2569 for (i = 0; i < nix_hw->flowkey.in_use; i++) 2570 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 2571 return i; 2572 2573 return -ERANGE; 2574 } 2575 2576 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 2577 { 2578 int idx, nr_field, key_off, field_marker, keyoff_marker; 2579 int max_key_off, max_bit_pos, group_member; 2580 struct nix_rx_flowkey_alg *field; 2581 struct nix_rx_flowkey_alg tmp; 2582 u32 key_type, valid_key; 2583 2584 if (!alg) 2585 return -EINVAL; 2586 2587 #define FIELDS_PER_ALG 5 2588 #define MAX_KEY_OFF 40 2589 /* Clear all fields */ 2590 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 2591 2592 /* Each of the 32 possible flow key algorithm definitions should 2593 * fall into above incremental config (except ALG0). Otherwise a 2594 * single NPC MCAM entry is not sufficient for supporting RSS. 2595 * 2596 * If a different definition or combination needed then NPC MCAM 2597 * has to be programmed to filter such pkts and it's action should 2598 * point to this definition to calculate flowtag or hash. 2599 * 2600 * The `for loop` goes over _all_ protocol field and the following 2601 * variables depicts the state machine forward progress logic. 2602 * 2603 * keyoff_marker - Enabled when hash byte length needs to be accounted 2604 * in field->key_offset update. 2605 * field_marker - Enabled when a new field needs to be selected. 2606 * group_member - Enabled when protocol is part of a group. 2607 */ 2608 2609 keyoff_marker = 0; max_key_off = 0; group_member = 0; 2610 nr_field = 0; key_off = 0; field_marker = 1; 2611 field = &tmp; max_bit_pos = fls(flow_cfg); 2612 for (idx = 0; 2613 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 2614 key_off < MAX_KEY_OFF; idx++) { 2615 key_type = BIT(idx); 2616 valid_key = flow_cfg & key_type; 2617 /* Found a field marker, reset the field values */ 2618 if (field_marker) 2619 memset(&tmp, 0, sizeof(tmp)); 2620 2621 field_marker = true; 2622 keyoff_marker = true; 2623 switch (key_type) { 2624 case NIX_FLOW_KEY_TYPE_PORT: 2625 field->sel_chan = true; 2626 /* This should be set to 1, when SEL_CHAN is set */ 2627 field->bytesm1 = 1; 2628 break; 2629 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 2630 field->lid = NPC_LID_LC; 2631 field->hdr_offset = 9; /* offset */ 2632 field->bytesm1 = 0; /* 1 byte */ 2633 field->ltype_match = NPC_LT_LC_IP; 2634 field->ltype_mask = 0xF; 2635 break; 2636 case NIX_FLOW_KEY_TYPE_IPV4: 2637 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 2638 field->lid = NPC_LID_LC; 2639 field->ltype_match = NPC_LT_LC_IP; 2640 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 2641 field->lid = NPC_LID_LG; 2642 field->ltype_match = NPC_LT_LG_TU_IP; 2643 } 2644 field->hdr_offset = 12; /* SIP offset */ 2645 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 2646 field->ltype_mask = 0xF; /* Match only IPv4 */ 2647 keyoff_marker = false; 2648 break; 2649 case NIX_FLOW_KEY_TYPE_IPV6: 2650 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 2651 field->lid = NPC_LID_LC; 2652 field->ltype_match = NPC_LT_LC_IP6; 2653 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 2654 field->lid = NPC_LID_LG; 2655 field->ltype_match = NPC_LT_LG_TU_IP6; 2656 } 2657 field->hdr_offset = 8; /* SIP offset */ 2658 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 2659 field->ltype_mask = 0xF; /* Match only IPv6 */ 2660 break; 2661 case NIX_FLOW_KEY_TYPE_TCP: 2662 case NIX_FLOW_KEY_TYPE_UDP: 2663 case NIX_FLOW_KEY_TYPE_SCTP: 2664 case NIX_FLOW_KEY_TYPE_INNR_TCP: 2665 case NIX_FLOW_KEY_TYPE_INNR_UDP: 2666 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 2667 field->lid = NPC_LID_LD; 2668 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 2669 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 2670 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 2671 field->lid = NPC_LID_LH; 2672 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 2673 2674 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 2675 * so no need to change the ltype_match, just change 2676 * the lid for inner protocols 2677 */ 2678 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 2679 (int)NPC_LT_LH_TU_TCP); 2680 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 2681 (int)NPC_LT_LH_TU_UDP); 2682 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 2683 (int)NPC_LT_LH_TU_SCTP); 2684 2685 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 2686 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 2687 valid_key) { 2688 field->ltype_match |= NPC_LT_LD_TCP; 2689 group_member = true; 2690 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 2691 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 2692 valid_key) { 2693 field->ltype_match |= NPC_LT_LD_UDP; 2694 group_member = true; 2695 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 2696 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 2697 valid_key) { 2698 field->ltype_match |= NPC_LT_LD_SCTP; 2699 group_member = true; 2700 } 2701 field->ltype_mask = ~field->ltype_match; 2702 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 2703 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 2704 /* Handle the case where any of the group item 2705 * is enabled in the group but not the final one 2706 */ 2707 if (group_member) { 2708 valid_key = true; 2709 group_member = false; 2710 } 2711 } else { 2712 field_marker = false; 2713 keyoff_marker = false; 2714 } 2715 break; 2716 case NIX_FLOW_KEY_TYPE_NVGRE: 2717 field->lid = NPC_LID_LD; 2718 field->hdr_offset = 4; /* VSID offset */ 2719 field->bytesm1 = 2; 2720 field->ltype_match = NPC_LT_LD_NVGRE; 2721 field->ltype_mask = 0xF; 2722 break; 2723 case NIX_FLOW_KEY_TYPE_VXLAN: 2724 case NIX_FLOW_KEY_TYPE_GENEVE: 2725 field->lid = NPC_LID_LE; 2726 field->bytesm1 = 2; 2727 field->hdr_offset = 4; 2728 field->ltype_mask = 0xF; 2729 field_marker = false; 2730 keyoff_marker = false; 2731 2732 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 2733 field->ltype_match |= NPC_LT_LE_VXLAN; 2734 group_member = true; 2735 } 2736 2737 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 2738 field->ltype_match |= NPC_LT_LE_GENEVE; 2739 group_member = true; 2740 } 2741 2742 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 2743 if (group_member) { 2744 field->ltype_mask = ~field->ltype_match; 2745 field_marker = true; 2746 keyoff_marker = true; 2747 valid_key = true; 2748 group_member = false; 2749 } 2750 } 2751 break; 2752 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 2753 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 2754 field->lid = NPC_LID_LA; 2755 field->ltype_match = NPC_LT_LA_ETHER; 2756 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 2757 field->lid = NPC_LID_LF; 2758 field->ltype_match = NPC_LT_LF_TU_ETHER; 2759 } 2760 field->hdr_offset = 0; 2761 field->bytesm1 = 5; /* DMAC 6 Byte */ 2762 field->ltype_mask = 0xF; 2763 break; 2764 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 2765 field->lid = NPC_LID_LC; 2766 field->hdr_offset = 40; /* IPV6 hdr */ 2767 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 2768 field->ltype_match = NPC_LT_LC_IP6_EXT; 2769 field->ltype_mask = 0xF; 2770 break; 2771 case NIX_FLOW_KEY_TYPE_GTPU: 2772 field->lid = NPC_LID_LE; 2773 field->hdr_offset = 4; 2774 field->bytesm1 = 3; /* 4 bytes TID*/ 2775 field->ltype_match = NPC_LT_LE_GTPU; 2776 field->ltype_mask = 0xF; 2777 break; 2778 case NIX_FLOW_KEY_TYPE_VLAN: 2779 field->lid = NPC_LID_LB; 2780 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 2781 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 2782 field->ltype_match = NPC_LT_LB_CTAG; 2783 field->ltype_mask = 0xF; 2784 field->fn_mask = 1; /* Mask out the first nibble */ 2785 break; 2786 } 2787 field->ena = 1; 2788 2789 /* Found a valid flow key type */ 2790 if (valid_key) { 2791 field->key_offset = key_off; 2792 memcpy(&alg[nr_field], field, sizeof(*field)); 2793 max_key_off = max(max_key_off, field->bytesm1 + 1); 2794 2795 /* Found a field marker, get the next field */ 2796 if (field_marker) 2797 nr_field++; 2798 } 2799 2800 /* Found a keyoff marker, update the new key_off */ 2801 if (keyoff_marker) { 2802 key_off += max_key_off; 2803 max_key_off = 0; 2804 } 2805 } 2806 /* Processed all the flow key types */ 2807 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 2808 return 0; 2809 else 2810 return NIX_AF_ERR_RSS_NOSPC_FIELD; 2811 } 2812 2813 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 2814 { 2815 u64 field[FIELDS_PER_ALG]; 2816 struct nix_hw *hw; 2817 int fid, rc; 2818 2819 hw = get_nix_hw(rvu->hw, blkaddr); 2820 if (!hw) 2821 return -EINVAL; 2822 2823 /* No room to add new flow hash algoritham */ 2824 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 2825 return NIX_AF_ERR_RSS_NOSPC_ALGO; 2826 2827 /* Generate algo fields for the given flow_cfg */ 2828 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 2829 if (rc) 2830 return rc; 2831 2832 /* Update ALGX_FIELDX register with generated fields */ 2833 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 2834 rvu_write64(rvu, blkaddr, 2835 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 2836 fid), field[fid]); 2837 2838 /* Store the flow_cfg for futher lookup */ 2839 rc = hw->flowkey.in_use; 2840 hw->flowkey.flowkey[rc] = flow_cfg; 2841 hw->flowkey.in_use++; 2842 2843 return rc; 2844 } 2845 2846 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 2847 struct nix_rss_flowkey_cfg *req, 2848 struct nix_rss_flowkey_cfg_rsp *rsp) 2849 { 2850 u16 pcifunc = req->hdr.pcifunc; 2851 int alg_idx, nixlf, blkaddr; 2852 struct nix_hw *nix_hw; 2853 int err; 2854 2855 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2856 if (err) 2857 return err; 2858 2859 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2860 if (!nix_hw) 2861 return -EINVAL; 2862 2863 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 2864 /* Failed to get algo index from the exiting list, reserve new */ 2865 if (alg_idx < 0) { 2866 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 2867 req->flowkey_cfg); 2868 if (alg_idx < 0) 2869 return alg_idx; 2870 } 2871 rsp->alg_idx = alg_idx; 2872 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 2873 alg_idx, req->mcam_index); 2874 return 0; 2875 } 2876 2877 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 2878 { 2879 u32 flowkey_cfg, minkey_cfg; 2880 int alg, fid, rc; 2881 2882 /* Disable all flow key algx fieldx */ 2883 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 2884 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 2885 rvu_write64(rvu, blkaddr, 2886 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 2887 0); 2888 } 2889 2890 /* IPv4/IPv6 SIP/DIPs */ 2891 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 2892 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2893 if (rc < 0) 2894 return rc; 2895 2896 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 2897 minkey_cfg = flowkey_cfg; 2898 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 2899 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2900 if (rc < 0) 2901 return rc; 2902 2903 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 2904 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 2905 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2906 if (rc < 0) 2907 return rc; 2908 2909 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 2910 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 2911 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2912 if (rc < 0) 2913 return rc; 2914 2915 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 2916 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 2917 NIX_FLOW_KEY_TYPE_UDP; 2918 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2919 if (rc < 0) 2920 return rc; 2921 2922 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 2923 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 2924 NIX_FLOW_KEY_TYPE_SCTP; 2925 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2926 if (rc < 0) 2927 return rc; 2928 2929 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 2930 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 2931 NIX_FLOW_KEY_TYPE_SCTP; 2932 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2933 if (rc < 0) 2934 return rc; 2935 2936 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 2937 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 2938 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 2939 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2940 if (rc < 0) 2941 return rc; 2942 2943 return 0; 2944 } 2945 2946 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 2947 struct nix_set_mac_addr *req, 2948 struct msg_rsp *rsp) 2949 { 2950 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 2951 u16 pcifunc = req->hdr.pcifunc; 2952 int blkaddr, nixlf, err; 2953 struct rvu_pfvf *pfvf; 2954 2955 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2956 if (err) 2957 return err; 2958 2959 pfvf = rvu_get_pfvf(rvu, pcifunc); 2960 2961 /* VF can't overwrite admin(PF) changes */ 2962 if (from_vf && pfvf->pf_set_vf_cfg) 2963 return -EPERM; 2964 2965 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 2966 2967 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 2968 pfvf->rx_chan_base, req->mac_addr); 2969 2970 return 0; 2971 } 2972 2973 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 2974 struct msg_req *req, 2975 struct nix_get_mac_addr_rsp *rsp) 2976 { 2977 u16 pcifunc = req->hdr.pcifunc; 2978 struct rvu_pfvf *pfvf; 2979 2980 if (!is_nixlf_attached(rvu, pcifunc)) 2981 return NIX_AF_ERR_AF_LF_INVALID; 2982 2983 pfvf = rvu_get_pfvf(rvu, pcifunc); 2984 2985 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 2986 2987 return 0; 2988 } 2989 2990 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 2991 struct msg_rsp *rsp) 2992 { 2993 bool allmulti = false, disable_promisc = false; 2994 u16 pcifunc = req->hdr.pcifunc; 2995 int blkaddr, nixlf, err; 2996 struct rvu_pfvf *pfvf; 2997 2998 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2999 if (err) 3000 return err; 3001 3002 pfvf = rvu_get_pfvf(rvu, pcifunc); 3003 3004 if (req->mode & NIX_RX_MODE_PROMISC) 3005 allmulti = false; 3006 else if (req->mode & NIX_RX_MODE_ALLMULTI) 3007 allmulti = true; 3008 else 3009 disable_promisc = true; 3010 3011 if (disable_promisc) 3012 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); 3013 else 3014 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 3015 pfvf->rx_chan_base, allmulti); 3016 return 0; 3017 } 3018 3019 static void nix_find_link_frs(struct rvu *rvu, 3020 struct nix_frs_cfg *req, u16 pcifunc) 3021 { 3022 int pf = rvu_get_pf(pcifunc); 3023 struct rvu_pfvf *pfvf; 3024 int maxlen, minlen; 3025 int numvfs, hwvf; 3026 int vf; 3027 3028 /* Update with requester's min/max lengths */ 3029 pfvf = rvu_get_pfvf(rvu, pcifunc); 3030 pfvf->maxlen = req->maxlen; 3031 if (req->update_minlen) 3032 pfvf->minlen = req->minlen; 3033 3034 maxlen = req->maxlen; 3035 minlen = req->update_minlen ? req->minlen : 0; 3036 3037 /* Get this PF's numVFs and starting hwvf */ 3038 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 3039 3040 /* For each VF, compare requested max/minlen */ 3041 for (vf = 0; vf < numvfs; vf++) { 3042 pfvf = &rvu->hwvf[hwvf + vf]; 3043 if (pfvf->maxlen > maxlen) 3044 maxlen = pfvf->maxlen; 3045 if (req->update_minlen && 3046 pfvf->minlen && pfvf->minlen < minlen) 3047 minlen = pfvf->minlen; 3048 } 3049 3050 /* Compare requested max/minlen with PF's max/minlen */ 3051 pfvf = &rvu->pf[pf]; 3052 if (pfvf->maxlen > maxlen) 3053 maxlen = pfvf->maxlen; 3054 if (req->update_minlen && 3055 pfvf->minlen && pfvf->minlen < minlen) 3056 minlen = pfvf->minlen; 3057 3058 /* Update the request with max/min PF's and it's VF's max/min */ 3059 req->maxlen = maxlen; 3060 if (req->update_minlen) 3061 req->minlen = minlen; 3062 } 3063 3064 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 3065 struct msg_rsp *rsp) 3066 { 3067 struct rvu_hwinfo *hw = rvu->hw; 3068 u16 pcifunc = req->hdr.pcifunc; 3069 int pf = rvu_get_pf(pcifunc); 3070 int blkaddr, schq, link = -1; 3071 struct nix_txsch *txsch; 3072 u64 cfg, lmac_fifo_len; 3073 struct nix_hw *nix_hw; 3074 u8 cgx = 0, lmac = 0; 3075 3076 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3077 if (blkaddr < 0) 3078 return NIX_AF_ERR_AF_LF_INVALID; 3079 3080 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3081 if (!nix_hw) 3082 return -EINVAL; 3083 3084 if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS) 3085 return NIX_AF_ERR_FRS_INVALID; 3086 3087 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 3088 return NIX_AF_ERR_FRS_INVALID; 3089 3090 /* Check if requester wants to update SMQ's */ 3091 if (!req->update_smq) 3092 goto rx_frscfg; 3093 3094 /* Update min/maxlen in each of the SMQ attached to this PF/VF */ 3095 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 3096 mutex_lock(&rvu->rsrc_lock); 3097 for (schq = 0; schq < txsch->schq.max; schq++) { 3098 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 3099 continue; 3100 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); 3101 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); 3102 if (req->update_minlen) 3103 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); 3104 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); 3105 } 3106 mutex_unlock(&rvu->rsrc_lock); 3107 3108 rx_frscfg: 3109 /* Check if config is for SDP link */ 3110 if (req->sdp_link) { 3111 if (!hw->sdp_links) 3112 return NIX_AF_ERR_RX_LINK_INVALID; 3113 link = hw->cgx_links + hw->lbk_links; 3114 goto linkcfg; 3115 } 3116 3117 /* Check if the request is from CGX mapped RVU PF */ 3118 if (is_pf_cgxmapped(rvu, pf)) { 3119 /* Get CGX and LMAC to which this PF is mapped and find link */ 3120 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 3121 link = (cgx * hw->lmac_per_cgx) + lmac; 3122 } else if (pf == 0) { 3123 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 3124 link = hw->cgx_links; 3125 } 3126 3127 if (link < 0) 3128 return NIX_AF_ERR_RX_LINK_INVALID; 3129 3130 nix_find_link_frs(rvu, req, pcifunc); 3131 3132 linkcfg: 3133 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 3134 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 3135 if (req->update_minlen) 3136 cfg = (cfg & ~0xFFFFULL) | req->minlen; 3137 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 3138 3139 if (req->sdp_link || pf == 0) 3140 return 0; 3141 3142 /* Update transmit credits for CGX links */ 3143 lmac_fifo_len = 3144 CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3145 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); 3146 cfg &= ~(0xFFFFFULL << 12); 3147 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; 3148 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 3149 return 0; 3150 } 3151 3152 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 3153 struct msg_rsp *rsp) 3154 { 3155 int nixlf, blkaddr, err; 3156 u64 cfg; 3157 3158 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 3159 if (err) 3160 return err; 3161 3162 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 3163 /* Set the interface configuration */ 3164 if (req->len_verify & BIT(0)) 3165 cfg |= BIT_ULL(41); 3166 else 3167 cfg &= ~BIT_ULL(41); 3168 3169 if (req->len_verify & BIT(1)) 3170 cfg |= BIT_ULL(40); 3171 else 3172 cfg &= ~BIT_ULL(40); 3173 3174 if (req->csum_verify & BIT(0)) 3175 cfg |= BIT_ULL(37); 3176 else 3177 cfg &= ~BIT_ULL(37); 3178 3179 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 3180 3181 return 0; 3182 } 3183 3184 static void nix_link_config(struct rvu *rvu, int blkaddr) 3185 { 3186 struct rvu_hwinfo *hw = rvu->hw; 3187 int cgx, lmac_cnt, slink, link; 3188 u64 tx_credits; 3189 3190 /* Set default min/max packet lengths allowed on NIX Rx links. 3191 * 3192 * With HW reset minlen value of 60byte, HW will treat ARP pkts 3193 * as undersize and report them to SW as error pkts, hence 3194 * setting it to 40 bytes. 3195 */ 3196 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) { 3197 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3198 NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 3199 } 3200 3201 if (hw->sdp_links) { 3202 link = hw->cgx_links + hw->lbk_links; 3203 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3204 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 3205 } 3206 3207 /* Set credits for Tx links assuming max packet length allowed. 3208 * This will be reconfigured based on MTU set for PF/VF. 3209 */ 3210 for (cgx = 0; cgx < hw->cgx; cgx++) { 3211 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3212 tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16; 3213 /* Enable credits and set credit pkt count to max allowed */ 3214 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3215 slink = cgx * hw->lmac_per_cgx; 3216 for (link = slink; link < (slink + lmac_cnt); link++) { 3217 rvu_write64(rvu, blkaddr, 3218 NIX_AF_TX_LINKX_NORM_CREDIT(link), 3219 tx_credits); 3220 } 3221 } 3222 3223 /* Set Tx credits for LBK link */ 3224 slink = hw->cgx_links; 3225 for (link = slink; link < (slink + hw->lbk_links); link++) { 3226 tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */ 3227 /* Enable credits and set credit pkt count to max allowed */ 3228 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3229 rvu_write64(rvu, blkaddr, 3230 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 3231 } 3232 } 3233 3234 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 3235 { 3236 int idx, err; 3237 u64 status; 3238 3239 /* Start X2P bus calibration */ 3240 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3241 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 3242 /* Wait for calibration to complete */ 3243 err = rvu_poll_reg(rvu, blkaddr, 3244 NIX_AF_STATUS, BIT_ULL(10), false); 3245 if (err) { 3246 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 3247 return err; 3248 } 3249 3250 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 3251 /* Check if CGX devices are ready */ 3252 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 3253 /* Skip when cgx port is not available */ 3254 if (!rvu_cgx_pdata(idx, rvu) || 3255 (status & (BIT_ULL(16 + idx)))) 3256 continue; 3257 dev_err(rvu->dev, 3258 "CGX%d didn't respond to NIX X2P calibration\n", idx); 3259 err = -EBUSY; 3260 } 3261 3262 /* Check if LBK is ready */ 3263 if (!(status & BIT_ULL(19))) { 3264 dev_err(rvu->dev, 3265 "LBK didn't respond to NIX X2P calibration\n"); 3266 err = -EBUSY; 3267 } 3268 3269 /* Clear 'calibrate_x2p' bit */ 3270 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3271 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 3272 if (err || (status & 0x3FFULL)) 3273 dev_err(rvu->dev, 3274 "NIX X2P calibration failed, status 0x%llx\n", status); 3275 if (err) 3276 return err; 3277 return 0; 3278 } 3279 3280 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 3281 { 3282 u64 cfg; 3283 int err; 3284 3285 /* Set admin queue endianness */ 3286 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 3287 #ifdef __BIG_ENDIAN 3288 cfg |= BIT_ULL(8); 3289 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 3290 #else 3291 cfg &= ~BIT_ULL(8); 3292 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 3293 #endif 3294 3295 /* Do not bypass NDC cache */ 3296 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 3297 cfg &= ~0x3FFEULL; 3298 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 3299 /* Disable caching of SQB aka SQEs */ 3300 cfg |= 0x04ULL; 3301 #endif 3302 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 3303 3304 /* Result structure can be followed by RQ/SQ/CQ context at 3305 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 3306 * operation type. Alloc sufficient result memory for all operations. 3307 */ 3308 err = rvu_aq_alloc(rvu, &block->aq, 3309 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 3310 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 3311 if (err) 3312 return err; 3313 3314 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 3315 rvu_write64(rvu, block->addr, 3316 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 3317 return 0; 3318 } 3319 3320 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 3321 { 3322 const struct npc_lt_def_cfg *ltdefs; 3323 struct rvu_hwinfo *hw = rvu->hw; 3324 int blkaddr = nix_hw->blkaddr; 3325 struct rvu_block *block; 3326 int err; 3327 u64 cfg; 3328 3329 block = &hw->block[blkaddr]; 3330 3331 if (is_rvu_96xx_B0(rvu)) { 3332 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 3333 * internal state when conditional clocks are turned off. 3334 * Hence enable them. 3335 */ 3336 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3337 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 3338 3339 /* Set chan/link to backpressure TL3 instead of TL2 */ 3340 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 3341 3342 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 3343 * This sticky mode is known to cause SQ stalls when multiple 3344 * SQs are mapped to same SMQ and transmitting pkts at a time. 3345 */ 3346 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 3347 cfg &= ~BIT_ULL(15); 3348 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 3349 } 3350 3351 ltdefs = rvu->kpu.lt_def; 3352 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 3353 err = nix_calibrate_x2p(rvu, blkaddr); 3354 if (err) 3355 return err; 3356 3357 /* Set num of links of each type */ 3358 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 3359 hw->cgx = (cfg >> 12) & 0xF; 3360 hw->lmac_per_cgx = (cfg >> 8) & 0xF; 3361 hw->cgx_links = hw->cgx * hw->lmac_per_cgx; 3362 hw->lbk_links = (cfg >> 24) & 0xF; 3363 hw->sdp_links = 1; 3364 3365 /* Initialize admin queue */ 3366 err = nix_aq_init(rvu, block); 3367 if (err) 3368 return err; 3369 3370 /* Restore CINT timer delay to HW reset values */ 3371 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 3372 3373 if (is_block_implemented(hw, blkaddr)) { 3374 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 3375 if (err) 3376 return err; 3377 3378 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 3379 if (err) 3380 return err; 3381 3382 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 3383 if (err) 3384 return err; 3385 3386 err = nix_setup_txvlan(rvu, nix_hw); 3387 if (err) 3388 return err; 3389 3390 /* Configure segmentation offload formats */ 3391 nix_setup_lso(rvu, nix_hw, blkaddr); 3392 3393 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 3394 * This helps HW protocol checker to identify headers 3395 * and validate length and checksums. 3396 */ 3397 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 3398 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 3399 ltdefs->rx_ol2.ltype_mask); 3400 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 3401 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 3402 ltdefs->rx_oip4.ltype_mask); 3403 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 3404 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 3405 ltdefs->rx_iip4.ltype_mask); 3406 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 3407 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 3408 ltdefs->rx_oip6.ltype_mask); 3409 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 3410 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 3411 ltdefs->rx_iip6.ltype_mask); 3412 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 3413 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 3414 ltdefs->rx_otcp.ltype_mask); 3415 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 3416 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 3417 ltdefs->rx_itcp.ltype_mask); 3418 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 3419 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 3420 ltdefs->rx_oudp.ltype_mask); 3421 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 3422 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 3423 ltdefs->rx_iudp.ltype_mask); 3424 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 3425 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 3426 ltdefs->rx_osctp.ltype_mask); 3427 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 3428 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 3429 ltdefs->rx_isctp.ltype_mask); 3430 3431 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 3432 if (err) 3433 return err; 3434 3435 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 3436 nix_link_config(rvu, blkaddr); 3437 3438 /* Enable Channel backpressure */ 3439 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 3440 } 3441 return 0; 3442 } 3443 3444 int rvu_nix_init(struct rvu *rvu) 3445 { 3446 struct rvu_hwinfo *hw = rvu->hw; 3447 struct nix_hw *nix_hw; 3448 int blkaddr = 0, err; 3449 int i = 0; 3450 3451 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 3452 GFP_KERNEL); 3453 if (!hw->nix) 3454 return -ENOMEM; 3455 3456 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3457 while (blkaddr) { 3458 nix_hw = &hw->nix[i]; 3459 nix_hw->rvu = rvu; 3460 nix_hw->blkaddr = blkaddr; 3461 err = rvu_nix_block_init(rvu, nix_hw); 3462 if (err) 3463 return err; 3464 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3465 i++; 3466 } 3467 3468 return 0; 3469 } 3470 3471 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 3472 struct rvu_block *block) 3473 { 3474 struct nix_txsch *txsch; 3475 struct nix_mcast *mcast; 3476 struct nix_txvlan *vlan; 3477 struct nix_hw *nix_hw; 3478 int lvl; 3479 3480 rvu_aq_free(rvu, block->aq); 3481 3482 if (is_block_implemented(rvu->hw, blkaddr)) { 3483 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3484 if (!nix_hw) 3485 return; 3486 3487 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3488 txsch = &nix_hw->txsch[lvl]; 3489 kfree(txsch->schq.bmap); 3490 } 3491 3492 vlan = &nix_hw->txvlan; 3493 kfree(vlan->rsrc.bmap); 3494 mutex_destroy(&vlan->rsrc_lock); 3495 devm_kfree(rvu->dev, vlan->entry2pfvf_map); 3496 3497 mcast = &nix_hw->mcast; 3498 qmem_free(rvu->dev, mcast->mce_ctx); 3499 qmem_free(rvu->dev, mcast->mcast_buf); 3500 mutex_destroy(&mcast->mce_lock); 3501 } 3502 } 3503 3504 void rvu_nix_freemem(struct rvu *rvu) 3505 { 3506 struct rvu_hwinfo *hw = rvu->hw; 3507 struct rvu_block *block; 3508 int blkaddr = 0; 3509 3510 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3511 while (blkaddr) { 3512 block = &hw->block[blkaddr]; 3513 rvu_nix_block_freemem(rvu, blkaddr, block); 3514 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3515 } 3516 } 3517 3518 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 3519 struct msg_rsp *rsp) 3520 { 3521 u16 pcifunc = req->hdr.pcifunc; 3522 int nixlf, err; 3523 3524 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3525 if (err) 3526 return err; 3527 3528 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 3529 3530 npc_mcam_enable_flows(rvu, pcifunc); 3531 3532 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 3533 } 3534 3535 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 3536 struct msg_rsp *rsp) 3537 { 3538 u16 pcifunc = req->hdr.pcifunc; 3539 int nixlf, err; 3540 3541 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3542 if (err) 3543 return err; 3544 3545 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 3546 3547 npc_mcam_disable_flows(rvu, pcifunc); 3548 3549 return rvu_cgx_start_stop_io(rvu, pcifunc, false); 3550 } 3551 3552 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 3553 { 3554 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 3555 struct hwctx_disable_req ctx_req; 3556 int err; 3557 3558 ctx_req.hdr.pcifunc = pcifunc; 3559 3560 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 3561 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 3562 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 3563 nix_interface_deinit(rvu, pcifunc, nixlf); 3564 nix_rx_sync(rvu, blkaddr); 3565 nix_txschq_free(rvu, pcifunc); 3566 3567 rvu_cgx_start_stop_io(rvu, pcifunc, false); 3568 3569 if (pfvf->sq_ctx) { 3570 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 3571 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3572 if (err) 3573 dev_err(rvu->dev, "SQ ctx disable failed\n"); 3574 } 3575 3576 if (pfvf->rq_ctx) { 3577 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 3578 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3579 if (err) 3580 dev_err(rvu->dev, "RQ ctx disable failed\n"); 3581 } 3582 3583 if (pfvf->cq_ctx) { 3584 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 3585 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3586 if (err) 3587 dev_err(rvu->dev, "CQ ctx disable failed\n"); 3588 } 3589 3590 nix_ctx_free(rvu, pfvf); 3591 } 3592 3593 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 3594 3595 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 3596 { 3597 struct rvu_hwinfo *hw = rvu->hw; 3598 struct rvu_block *block; 3599 int blkaddr; 3600 int nixlf; 3601 u64 cfg; 3602 3603 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3604 if (blkaddr < 0) 3605 return NIX_AF_ERR_AF_LF_INVALID; 3606 3607 block = &hw->block[blkaddr]; 3608 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 3609 if (nixlf < 0) 3610 return NIX_AF_ERR_AF_LF_INVALID; 3611 3612 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 3613 3614 if (enable) 3615 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 3616 else 3617 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 3618 3619 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 3620 3621 return 0; 3622 } 3623 3624 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 3625 struct msg_rsp *rsp) 3626 { 3627 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 3628 } 3629 3630 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 3631 struct msg_rsp *rsp) 3632 { 3633 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 3634 } 3635 3636 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 3637 struct nix_lso_format_cfg *req, 3638 struct nix_lso_format_cfg_rsp *rsp) 3639 { 3640 u16 pcifunc = req->hdr.pcifunc; 3641 struct nix_hw *nix_hw; 3642 struct rvu_pfvf *pfvf; 3643 int blkaddr, idx, f; 3644 u64 reg; 3645 3646 pfvf = rvu_get_pfvf(rvu, pcifunc); 3647 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3648 if (!pfvf->nixlf || blkaddr < 0) 3649 return NIX_AF_ERR_AF_LF_INVALID; 3650 3651 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3652 if (!nix_hw) 3653 return -EINVAL; 3654 3655 /* Find existing matching LSO format, if any */ 3656 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 3657 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 3658 reg = rvu_read64(rvu, blkaddr, 3659 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 3660 if (req->fields[f] != (reg & req->field_mask)) 3661 break; 3662 } 3663 3664 if (f == NIX_LSO_FIELD_MAX) 3665 break; 3666 } 3667 3668 if (idx < nix_hw->lso.in_use) { 3669 /* Match found */ 3670 rsp->lso_format_idx = idx; 3671 return 0; 3672 } 3673 3674 if (nix_hw->lso.in_use == nix_hw->lso.total) 3675 return NIX_AF_ERR_LSO_CFG_FAIL; 3676 3677 rsp->lso_format_idx = nix_hw->lso.in_use++; 3678 3679 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 3680 rvu_write64(rvu, blkaddr, 3681 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 3682 req->fields[f]); 3683 3684 return 0; 3685 } 3686 3687 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 3688 { 3689 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 3690 3691 /* overwrite vf mac address with default_mac */ 3692 if (from_vf) 3693 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 3694 } 3695