1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 14 #include "rvu_struct.h" 15 #include "rvu_reg.h" 16 #include "rvu.h" 17 #include "npc.h" 18 #include "cgx.h" 19 20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 22 int type, int chan_id); 23 24 enum mc_tbl_sz { 25 MC_TBL_SZ_256, 26 MC_TBL_SZ_512, 27 MC_TBL_SZ_1K, 28 MC_TBL_SZ_2K, 29 MC_TBL_SZ_4K, 30 MC_TBL_SZ_8K, 31 MC_TBL_SZ_16K, 32 MC_TBL_SZ_32K, 33 MC_TBL_SZ_64K, 34 }; 35 36 enum mc_buf_cnt { 37 MC_BUF_CNT_8, 38 MC_BUF_CNT_16, 39 MC_BUF_CNT_32, 40 MC_BUF_CNT_64, 41 MC_BUF_CNT_128, 42 MC_BUF_CNT_256, 43 MC_BUF_CNT_512, 44 MC_BUF_CNT_1024, 45 MC_BUF_CNT_2048, 46 }; 47 48 enum nix_makr_fmt_indexes { 49 NIX_MARK_CFG_IP_DSCP_RED, 50 NIX_MARK_CFG_IP_DSCP_YELLOW, 51 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 52 NIX_MARK_CFG_IP_ECN_RED, 53 NIX_MARK_CFG_IP_ECN_YELLOW, 54 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 55 NIX_MARK_CFG_VLAN_DEI_RED, 56 NIX_MARK_CFG_VLAN_DEI_YELLOW, 57 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 58 NIX_MARK_CFG_MAX, 59 }; 60 61 /* For now considering MC resources needed for broadcast 62 * pkt replication only. i.e 256 HWVFs + 12 PFs. 63 */ 64 #define MC_TBL_SIZE MC_TBL_SZ_512 65 #define MC_BUF_CNT MC_BUF_CNT_128 66 67 struct mce { 68 struct hlist_node node; 69 u16 pcifunc; 70 }; 71 72 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 73 { 74 int i = 0; 75 76 /*If blkaddr is 0, return the first nix block address*/ 77 if (blkaddr == 0) 78 return rvu->nix_blkaddr[blkaddr]; 79 80 while (i + 1 < MAX_NIX_BLKS) { 81 if (rvu->nix_blkaddr[i] == blkaddr) 82 return rvu->nix_blkaddr[i + 1]; 83 i++; 84 } 85 86 return 0; 87 } 88 89 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 90 { 91 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 92 int blkaddr; 93 94 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 95 if (!pfvf->nixlf || blkaddr < 0) 96 return false; 97 return true; 98 } 99 100 int rvu_get_nixlf_count(struct rvu *rvu) 101 { 102 int blkaddr = 0, max = 0; 103 struct rvu_block *block; 104 105 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 106 while (blkaddr) { 107 block = &rvu->hw->block[blkaddr]; 108 max += block->lf.max; 109 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 110 } 111 return max; 112 } 113 114 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 115 { 116 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 117 struct rvu_hwinfo *hw = rvu->hw; 118 int blkaddr; 119 120 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 121 if (!pfvf->nixlf || blkaddr < 0) 122 return NIX_AF_ERR_AF_LF_INVALID; 123 124 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 125 if (*nixlf < 0) 126 return NIX_AF_ERR_AF_LF_INVALID; 127 128 if (nix_blkaddr) 129 *nix_blkaddr = blkaddr; 130 131 return 0; 132 } 133 134 static void nix_mce_list_init(struct nix_mce_list *list, int max) 135 { 136 INIT_HLIST_HEAD(&list->head); 137 list->count = 0; 138 list->max = max; 139 } 140 141 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) 142 { 143 int idx; 144 145 if (!mcast) 146 return 0; 147 148 idx = mcast->next_free_mce; 149 mcast->next_free_mce += count; 150 return idx; 151 } 152 153 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 154 { 155 int nix_blkaddr = 0, i = 0; 156 struct rvu *rvu = hw->rvu; 157 158 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 159 while (nix_blkaddr) { 160 if (blkaddr == nix_blkaddr && hw->nix) 161 return &hw->nix[i]; 162 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 163 i++; 164 } 165 return NULL; 166 } 167 168 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 169 { 170 int err; 171 172 /*Sync all in flight RX packets to LLC/DRAM */ 173 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 174 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 175 if (err) 176 dev_err(rvu->dev, "NIX RX software sync failed\n"); 177 } 178 179 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 180 int lvl, u16 pcifunc, u16 schq) 181 { 182 struct rvu_hwinfo *hw = rvu->hw; 183 struct nix_txsch *txsch; 184 struct nix_hw *nix_hw; 185 u16 map_func; 186 187 nix_hw = get_nix_hw(rvu->hw, blkaddr); 188 if (!nix_hw) 189 return false; 190 191 txsch = &nix_hw->txsch[lvl]; 192 /* Check out of bounds */ 193 if (schq >= txsch->schq.max) 194 return false; 195 196 mutex_lock(&rvu->rsrc_lock); 197 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 198 mutex_unlock(&rvu->rsrc_lock); 199 200 /* TLs aggegating traffic are shared across PF and VFs */ 201 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 202 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 203 return false; 204 else 205 return true; 206 } 207 208 if (map_func != pcifunc) 209 return false; 210 211 return true; 212 } 213 214 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) 215 { 216 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 217 int pkind, pf, vf, lbkid; 218 u8 cgx_id, lmac_id; 219 int err; 220 221 pf = rvu_get_pf(pcifunc); 222 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 223 return 0; 224 225 switch (type) { 226 case NIX_INTF_TYPE_CGX: 227 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 228 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 229 230 pkind = rvu_npc_get_pkind(rvu, pf); 231 if (pkind < 0) { 232 dev_err(rvu->dev, 233 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 234 return -EINVAL; 235 } 236 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 237 pfvf->tx_chan_base = pfvf->rx_chan_base; 238 pfvf->rx_chan_cnt = 1; 239 pfvf->tx_chan_cnt = 1; 240 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 241 rvu_npc_set_pkind(rvu, pkind, pfvf); 242 243 /* By default we enable pause frames */ 244 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) 245 cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), 246 lmac_id, true, true); 247 break; 248 case NIX_INTF_TYPE_LBK: 249 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 250 251 /* If NIX1 block is present on the silicon then NIXes are 252 * assigned alternatively for lbk interfaces. NIX0 should 253 * send packets on lbk link 1 channels and NIX1 should send 254 * on lbk link 0 channels for the communication between 255 * NIX0 and NIX1. 256 */ 257 lbkid = 0; 258 if (rvu->hw->lbk_links > 1) 259 lbkid = vf & 0x1 ? 0 : 1; 260 261 /* Note that AF's VFs work in pairs and talk over consecutive 262 * loopback channels.Therefore if odd number of AF VFs are 263 * enabled then the last VF remains with no pair. 264 */ 265 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 266 pfvf->tx_chan_base = vf & 0x1 ? 267 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 268 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 269 pfvf->rx_chan_cnt = 1; 270 pfvf->tx_chan_cnt = 1; 271 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 272 pfvf->rx_chan_base, false); 273 break; 274 } 275 276 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 277 * RVU PF/VF's MAC address. 278 */ 279 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 280 pfvf->rx_chan_base, pfvf->mac_addr); 281 282 /* Add this PF_FUNC to bcast pkt replication list */ 283 err = nix_update_bcast_mce_list(rvu, pcifunc, true); 284 if (err) { 285 dev_err(rvu->dev, 286 "Bcast list, failed to enable PF_FUNC 0x%x\n", 287 pcifunc); 288 return err; 289 } 290 291 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 292 nixlf, pfvf->rx_chan_base); 293 pfvf->maxlen = NIC_HW_MIN_FRS; 294 pfvf->minlen = NIC_HW_MIN_FRS; 295 296 return 0; 297 } 298 299 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 300 { 301 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 302 int err; 303 304 pfvf->maxlen = 0; 305 pfvf->minlen = 0; 306 307 /* Remove this PF_FUNC from bcast pkt replication list */ 308 err = nix_update_bcast_mce_list(rvu, pcifunc, false); 309 if (err) { 310 dev_err(rvu->dev, 311 "Bcast list, failed to disable PF_FUNC 0x%x\n", 312 pcifunc); 313 } 314 315 /* Free and disable any MCAM entries used by this NIX LF */ 316 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 317 } 318 319 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 320 struct nix_bp_cfg_req *req, 321 struct msg_rsp *rsp) 322 { 323 u16 pcifunc = req->hdr.pcifunc; 324 struct rvu_pfvf *pfvf; 325 int blkaddr, pf, type; 326 u16 chan_base, chan; 327 u64 cfg; 328 329 pf = rvu_get_pf(pcifunc); 330 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 331 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 332 return 0; 333 334 pfvf = rvu_get_pfvf(rvu, pcifunc); 335 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 336 337 chan_base = pfvf->rx_chan_base + req->chan_base; 338 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 339 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 340 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 341 cfg & ~BIT_ULL(16)); 342 } 343 return 0; 344 } 345 346 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 347 int type, int chan_id) 348 { 349 int bpid, blkaddr, lmac_chan_cnt; 350 struct rvu_hwinfo *hw = rvu->hw; 351 u16 cgx_bpid_cnt, lbk_bpid_cnt; 352 struct rvu_pfvf *pfvf; 353 u8 cgx_id, lmac_id; 354 u64 cfg; 355 356 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 357 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 358 lmac_chan_cnt = cfg & 0xFF; 359 360 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; 361 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); 362 363 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 364 365 /* Backpressure IDs range division 366 * CGX channles are mapped to (0 - 191) BPIDs 367 * LBK channles are mapped to (192 - 255) BPIDs 368 * SDP channles are mapped to (256 - 511) BPIDs 369 * 370 * Lmac channles and bpids mapped as follows 371 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 372 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 373 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 374 */ 375 switch (type) { 376 case NIX_INTF_TYPE_CGX: 377 if ((req->chan_base + req->chan_cnt) > 15) 378 return -EINVAL; 379 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 380 /* Assign bpid based on cgx, lmac and chan id */ 381 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + 382 (lmac_id * lmac_chan_cnt) + req->chan_base; 383 384 if (req->bpid_per_chan) 385 bpid += chan_id; 386 if (bpid > cgx_bpid_cnt) 387 return -EINVAL; 388 break; 389 390 case NIX_INTF_TYPE_LBK: 391 if ((req->chan_base + req->chan_cnt) > 63) 392 return -EINVAL; 393 bpid = cgx_bpid_cnt + req->chan_base; 394 if (req->bpid_per_chan) 395 bpid += chan_id; 396 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) 397 return -EINVAL; 398 break; 399 default: 400 return -EINVAL; 401 } 402 return bpid; 403 } 404 405 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 406 struct nix_bp_cfg_req *req, 407 struct nix_bp_cfg_rsp *rsp) 408 { 409 int blkaddr, pf, type, chan_id = 0; 410 u16 pcifunc = req->hdr.pcifunc; 411 struct rvu_pfvf *pfvf; 412 u16 chan_base, chan; 413 s16 bpid, bpid_base; 414 u64 cfg; 415 416 pf = rvu_get_pf(pcifunc); 417 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 418 419 /* Enable backpressure only for CGX mapped PFs and LBK interface */ 420 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 421 return 0; 422 423 pfvf = rvu_get_pfvf(rvu, pcifunc); 424 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 425 426 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 427 chan_base = pfvf->rx_chan_base + req->chan_base; 428 bpid = bpid_base; 429 430 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 431 if (bpid < 0) { 432 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 433 return -EINVAL; 434 } 435 436 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 437 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 438 cfg | (bpid & 0xFF) | BIT_ULL(16)); 439 chan_id++; 440 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 441 } 442 443 for (chan = 0; chan < req->chan_cnt; chan++) { 444 /* Map channel and bpid assign to it */ 445 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 446 (bpid_base & 0x3FF); 447 if (req->bpid_per_chan) 448 bpid_base++; 449 } 450 rsp->chan_cnt = req->chan_cnt; 451 452 return 0; 453 } 454 455 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 456 u64 format, bool v4, u64 *fidx) 457 { 458 struct nix_lso_format field = {0}; 459 460 /* IP's Length field */ 461 field.layer = NIX_TXLAYER_OL3; 462 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 463 field.offset = v4 ? 2 : 4; 464 field.sizem1 = 1; /* i.e 2 bytes */ 465 field.alg = NIX_LSOALG_ADD_PAYLEN; 466 rvu_write64(rvu, blkaddr, 467 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 468 *(u64 *)&field); 469 470 /* No ID field in IPv6 header */ 471 if (!v4) 472 return; 473 474 /* IP's ID field */ 475 field.layer = NIX_TXLAYER_OL3; 476 field.offset = 4; 477 field.sizem1 = 1; /* i.e 2 bytes */ 478 field.alg = NIX_LSOALG_ADD_SEGNUM; 479 rvu_write64(rvu, blkaddr, 480 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 481 *(u64 *)&field); 482 } 483 484 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 485 u64 format, u64 *fidx) 486 { 487 struct nix_lso_format field = {0}; 488 489 /* TCP's sequence number field */ 490 field.layer = NIX_TXLAYER_OL4; 491 field.offset = 4; 492 field.sizem1 = 3; /* i.e 4 bytes */ 493 field.alg = NIX_LSOALG_ADD_OFFSET; 494 rvu_write64(rvu, blkaddr, 495 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 496 *(u64 *)&field); 497 498 /* TCP's flags field */ 499 field.layer = NIX_TXLAYER_OL4; 500 field.offset = 12; 501 field.sizem1 = 1; /* 2 bytes */ 502 field.alg = NIX_LSOALG_TCP_FLAGS; 503 rvu_write64(rvu, blkaddr, 504 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 505 *(u64 *)&field); 506 } 507 508 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 509 { 510 u64 cfg, idx, fidx = 0; 511 512 /* Get max HW supported format indices */ 513 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 514 nix_hw->lso.total = cfg; 515 516 /* Enable LSO */ 517 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 518 /* For TSO, set first and middle segment flags to 519 * mask out PSH, RST & FIN flags in TCP packet 520 */ 521 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 522 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 523 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 524 525 /* Setup default static LSO formats 526 * 527 * Configure format fields for TCPv4 segmentation offload 528 */ 529 idx = NIX_LSO_FORMAT_IDX_TSOV4; 530 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 531 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 532 533 /* Set rest of the fields to NOP */ 534 for (; fidx < 8; fidx++) { 535 rvu_write64(rvu, blkaddr, 536 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 537 } 538 nix_hw->lso.in_use++; 539 540 /* Configure format fields for TCPv6 segmentation offload */ 541 idx = NIX_LSO_FORMAT_IDX_TSOV6; 542 fidx = 0; 543 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 544 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 545 546 /* Set rest of the fields to NOP */ 547 for (; fidx < 8; fidx++) { 548 rvu_write64(rvu, blkaddr, 549 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 550 } 551 nix_hw->lso.in_use++; 552 } 553 554 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 555 { 556 kfree(pfvf->rq_bmap); 557 kfree(pfvf->sq_bmap); 558 kfree(pfvf->cq_bmap); 559 if (pfvf->rq_ctx) 560 qmem_free(rvu->dev, pfvf->rq_ctx); 561 if (pfvf->sq_ctx) 562 qmem_free(rvu->dev, pfvf->sq_ctx); 563 if (pfvf->cq_ctx) 564 qmem_free(rvu->dev, pfvf->cq_ctx); 565 if (pfvf->rss_ctx) 566 qmem_free(rvu->dev, pfvf->rss_ctx); 567 if (pfvf->nix_qints_ctx) 568 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 569 if (pfvf->cq_ints_ctx) 570 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 571 572 pfvf->rq_bmap = NULL; 573 pfvf->cq_bmap = NULL; 574 pfvf->sq_bmap = NULL; 575 pfvf->rq_ctx = NULL; 576 pfvf->sq_ctx = NULL; 577 pfvf->cq_ctx = NULL; 578 pfvf->rss_ctx = NULL; 579 pfvf->nix_qints_ctx = NULL; 580 pfvf->cq_ints_ctx = NULL; 581 } 582 583 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 584 struct rvu_pfvf *pfvf, int nixlf, 585 int rss_sz, int rss_grps, int hwctx_size, 586 u64 way_mask) 587 { 588 int err, grp, num_indices; 589 590 /* RSS is not requested for this NIXLF */ 591 if (!rss_sz) 592 return 0; 593 num_indices = rss_sz * rss_grps; 594 595 /* Alloc NIX RSS HW context memory and config the base */ 596 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 597 if (err) 598 return err; 599 600 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 601 (u64)pfvf->rss_ctx->iova); 602 603 /* Config full RSS table size, enable RSS and caching */ 604 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), 605 BIT_ULL(36) | BIT_ULL(4) | 606 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) | 607 way_mask << 20); 608 /* Config RSS group offset and sizes */ 609 for (grp = 0; grp < rss_grps; grp++) 610 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 611 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 612 return 0; 613 } 614 615 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 616 struct nix_aq_inst_s *inst) 617 { 618 struct admin_queue *aq = block->aq; 619 struct nix_aq_res_s *result; 620 int timeout = 1000; 621 u64 reg, head; 622 623 result = (struct nix_aq_res_s *)aq->res->base; 624 625 /* Get current head pointer where to append this instruction */ 626 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 627 head = (reg >> 4) & AQ_PTR_MASK; 628 629 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 630 (void *)inst, aq->inst->entry_sz); 631 memset(result, 0, sizeof(*result)); 632 /* sync into memory */ 633 wmb(); 634 635 /* Ring the doorbell and wait for result */ 636 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 637 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 638 cpu_relax(); 639 udelay(1); 640 timeout--; 641 if (!timeout) 642 return -EBUSY; 643 } 644 645 if (result->compcode != NIX_AQ_COMP_GOOD) 646 /* TODO: Replace this with some error code */ 647 return -EBUSY; 648 649 return 0; 650 } 651 652 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 653 struct nix_aq_enq_req *req, 654 struct nix_aq_enq_rsp *rsp) 655 { 656 struct rvu_hwinfo *hw = rvu->hw; 657 u16 pcifunc = req->hdr.pcifunc; 658 int nixlf, blkaddr, rc = 0; 659 struct nix_aq_inst_s inst; 660 struct rvu_block *block; 661 struct admin_queue *aq; 662 struct rvu_pfvf *pfvf; 663 void *ctx, *mask; 664 bool ena; 665 u64 cfg; 666 667 blkaddr = nix_hw->blkaddr; 668 block = &hw->block[blkaddr]; 669 aq = block->aq; 670 if (!aq) { 671 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 672 return NIX_AF_ERR_AQ_ENQUEUE; 673 } 674 675 pfvf = rvu_get_pfvf(rvu, pcifunc); 676 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 677 678 /* Skip NIXLF check for broadcast MCE entry init */ 679 if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) { 680 if (!pfvf->nixlf || nixlf < 0) 681 return NIX_AF_ERR_AF_LF_INVALID; 682 } 683 684 switch (req->ctype) { 685 case NIX_AQ_CTYPE_RQ: 686 /* Check if index exceeds max no of queues */ 687 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 688 rc = NIX_AF_ERR_AQ_ENQUEUE; 689 break; 690 case NIX_AQ_CTYPE_SQ: 691 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 692 rc = NIX_AF_ERR_AQ_ENQUEUE; 693 break; 694 case NIX_AQ_CTYPE_CQ: 695 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 696 rc = NIX_AF_ERR_AQ_ENQUEUE; 697 break; 698 case NIX_AQ_CTYPE_RSS: 699 /* Check if RSS is enabled and qidx is within range */ 700 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 701 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 702 (req->qidx >= (256UL << (cfg & 0xF)))) 703 rc = NIX_AF_ERR_AQ_ENQUEUE; 704 break; 705 case NIX_AQ_CTYPE_MCE: 706 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 707 708 /* Check if index exceeds MCE list length */ 709 if (!nix_hw->mcast.mce_ctx || 710 (req->qidx >= (256UL << (cfg & 0xF)))) 711 rc = NIX_AF_ERR_AQ_ENQUEUE; 712 713 /* Adding multicast lists for requests from PF/VFs is not 714 * yet supported, so ignore this. 715 */ 716 if (rsp) 717 rc = NIX_AF_ERR_AQ_ENQUEUE; 718 break; 719 default: 720 rc = NIX_AF_ERR_AQ_ENQUEUE; 721 } 722 723 if (rc) 724 return rc; 725 726 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 727 if (req->ctype == NIX_AQ_CTYPE_SQ && 728 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 729 (req->op == NIX_AQ_INSTOP_WRITE && 730 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { 731 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 732 pcifunc, req->sq.smq)) 733 return NIX_AF_ERR_AQ_ENQUEUE; 734 } 735 736 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 737 inst.lf = nixlf; 738 inst.cindex = req->qidx; 739 inst.ctype = req->ctype; 740 inst.op = req->op; 741 /* Currently we are not supporting enqueuing multiple instructions, 742 * so always choose first entry in result memory. 743 */ 744 inst.res_addr = (u64)aq->res->iova; 745 746 /* Hardware uses same aq->res->base for updating result of 747 * previous instruction hence wait here till it is done. 748 */ 749 spin_lock(&aq->lock); 750 751 /* Clean result + context memory */ 752 memset(aq->res->base, 0, aq->res->entry_sz); 753 /* Context needs to be written at RES_ADDR + 128 */ 754 ctx = aq->res->base + 128; 755 /* Mask needs to be written at RES_ADDR + 256 */ 756 mask = aq->res->base + 256; 757 758 switch (req->op) { 759 case NIX_AQ_INSTOP_WRITE: 760 if (req->ctype == NIX_AQ_CTYPE_RQ) 761 memcpy(mask, &req->rq_mask, 762 sizeof(struct nix_rq_ctx_s)); 763 else if (req->ctype == NIX_AQ_CTYPE_SQ) 764 memcpy(mask, &req->sq_mask, 765 sizeof(struct nix_sq_ctx_s)); 766 else if (req->ctype == NIX_AQ_CTYPE_CQ) 767 memcpy(mask, &req->cq_mask, 768 sizeof(struct nix_cq_ctx_s)); 769 else if (req->ctype == NIX_AQ_CTYPE_RSS) 770 memcpy(mask, &req->rss_mask, 771 sizeof(struct nix_rsse_s)); 772 else if (req->ctype == NIX_AQ_CTYPE_MCE) 773 memcpy(mask, &req->mce_mask, 774 sizeof(struct nix_rx_mce_s)); 775 fallthrough; 776 case NIX_AQ_INSTOP_INIT: 777 if (req->ctype == NIX_AQ_CTYPE_RQ) 778 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 779 else if (req->ctype == NIX_AQ_CTYPE_SQ) 780 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 781 else if (req->ctype == NIX_AQ_CTYPE_CQ) 782 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 783 else if (req->ctype == NIX_AQ_CTYPE_RSS) 784 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 785 else if (req->ctype == NIX_AQ_CTYPE_MCE) 786 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 787 break; 788 case NIX_AQ_INSTOP_NOP: 789 case NIX_AQ_INSTOP_READ: 790 case NIX_AQ_INSTOP_LOCK: 791 case NIX_AQ_INSTOP_UNLOCK: 792 break; 793 default: 794 rc = NIX_AF_ERR_AQ_ENQUEUE; 795 spin_unlock(&aq->lock); 796 return rc; 797 } 798 799 /* Submit the instruction to AQ */ 800 rc = nix_aq_enqueue_wait(rvu, block, &inst); 801 if (rc) { 802 spin_unlock(&aq->lock); 803 return rc; 804 } 805 806 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 807 if (req->op == NIX_AQ_INSTOP_INIT) { 808 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 809 __set_bit(req->qidx, pfvf->rq_bmap); 810 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 811 __set_bit(req->qidx, pfvf->sq_bmap); 812 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 813 __set_bit(req->qidx, pfvf->cq_bmap); 814 } 815 816 if (req->op == NIX_AQ_INSTOP_WRITE) { 817 if (req->ctype == NIX_AQ_CTYPE_RQ) { 818 ena = (req->rq.ena & req->rq_mask.ena) | 819 (test_bit(req->qidx, pfvf->rq_bmap) & 820 ~req->rq_mask.ena); 821 if (ena) 822 __set_bit(req->qidx, pfvf->rq_bmap); 823 else 824 __clear_bit(req->qidx, pfvf->rq_bmap); 825 } 826 if (req->ctype == NIX_AQ_CTYPE_SQ) { 827 ena = (req->rq.ena & req->sq_mask.ena) | 828 (test_bit(req->qidx, pfvf->sq_bmap) & 829 ~req->sq_mask.ena); 830 if (ena) 831 __set_bit(req->qidx, pfvf->sq_bmap); 832 else 833 __clear_bit(req->qidx, pfvf->sq_bmap); 834 } 835 if (req->ctype == NIX_AQ_CTYPE_CQ) { 836 ena = (req->rq.ena & req->cq_mask.ena) | 837 (test_bit(req->qidx, pfvf->cq_bmap) & 838 ~req->cq_mask.ena); 839 if (ena) 840 __set_bit(req->qidx, pfvf->cq_bmap); 841 else 842 __clear_bit(req->qidx, pfvf->cq_bmap); 843 } 844 } 845 846 if (rsp) { 847 /* Copy read context into mailbox */ 848 if (req->op == NIX_AQ_INSTOP_READ) { 849 if (req->ctype == NIX_AQ_CTYPE_RQ) 850 memcpy(&rsp->rq, ctx, 851 sizeof(struct nix_rq_ctx_s)); 852 else if (req->ctype == NIX_AQ_CTYPE_SQ) 853 memcpy(&rsp->sq, ctx, 854 sizeof(struct nix_sq_ctx_s)); 855 else if (req->ctype == NIX_AQ_CTYPE_CQ) 856 memcpy(&rsp->cq, ctx, 857 sizeof(struct nix_cq_ctx_s)); 858 else if (req->ctype == NIX_AQ_CTYPE_RSS) 859 memcpy(&rsp->rss, ctx, 860 sizeof(struct nix_rsse_s)); 861 else if (req->ctype == NIX_AQ_CTYPE_MCE) 862 memcpy(&rsp->mce, ctx, 863 sizeof(struct nix_rx_mce_s)); 864 } 865 } 866 867 spin_unlock(&aq->lock); 868 return 0; 869 } 870 871 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 872 struct nix_aq_enq_rsp *rsp) 873 { 874 struct nix_hw *nix_hw; 875 int blkaddr; 876 877 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 878 if (blkaddr < 0) 879 return NIX_AF_ERR_AF_LF_INVALID; 880 881 nix_hw = get_nix_hw(rvu->hw, blkaddr); 882 if (!nix_hw) 883 return -EINVAL; 884 885 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 886 } 887 888 static const char *nix_get_ctx_name(int ctype) 889 { 890 switch (ctype) { 891 case NIX_AQ_CTYPE_CQ: 892 return "CQ"; 893 case NIX_AQ_CTYPE_SQ: 894 return "SQ"; 895 case NIX_AQ_CTYPE_RQ: 896 return "RQ"; 897 case NIX_AQ_CTYPE_RSS: 898 return "RSS"; 899 } 900 return ""; 901 } 902 903 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 904 { 905 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 906 struct nix_aq_enq_req aq_req; 907 unsigned long *bmap; 908 int qidx, q_cnt = 0; 909 int err = 0, rc; 910 911 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 912 return NIX_AF_ERR_AQ_ENQUEUE; 913 914 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 915 aq_req.hdr.pcifunc = req->hdr.pcifunc; 916 917 if (req->ctype == NIX_AQ_CTYPE_CQ) { 918 aq_req.cq.ena = 0; 919 aq_req.cq_mask.ena = 1; 920 aq_req.cq.bp_ena = 0; 921 aq_req.cq_mask.bp_ena = 1; 922 q_cnt = pfvf->cq_ctx->qsize; 923 bmap = pfvf->cq_bmap; 924 } 925 if (req->ctype == NIX_AQ_CTYPE_SQ) { 926 aq_req.sq.ena = 0; 927 aq_req.sq_mask.ena = 1; 928 q_cnt = pfvf->sq_ctx->qsize; 929 bmap = pfvf->sq_bmap; 930 } 931 if (req->ctype == NIX_AQ_CTYPE_RQ) { 932 aq_req.rq.ena = 0; 933 aq_req.rq_mask.ena = 1; 934 q_cnt = pfvf->rq_ctx->qsize; 935 bmap = pfvf->rq_bmap; 936 } 937 938 aq_req.ctype = req->ctype; 939 aq_req.op = NIX_AQ_INSTOP_WRITE; 940 941 for (qidx = 0; qidx < q_cnt; qidx++) { 942 if (!test_bit(qidx, bmap)) 943 continue; 944 aq_req.qidx = qidx; 945 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 946 if (rc) { 947 err = rc; 948 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 949 nix_get_ctx_name(req->ctype), qidx); 950 } 951 } 952 953 return err; 954 } 955 956 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 957 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 958 { 959 struct nix_aq_enq_req lock_ctx_req; 960 int err; 961 962 if (req->op != NIX_AQ_INSTOP_INIT) 963 return 0; 964 965 if (req->ctype == NIX_AQ_CTYPE_MCE || 966 req->ctype == NIX_AQ_CTYPE_DYNO) 967 return 0; 968 969 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 970 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 971 lock_ctx_req.ctype = req->ctype; 972 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 973 lock_ctx_req.qidx = req->qidx; 974 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 975 if (err) 976 dev_err(rvu->dev, 977 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 978 req->hdr.pcifunc, 979 nix_get_ctx_name(req->ctype), req->qidx); 980 return err; 981 } 982 983 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 984 struct nix_aq_enq_req *req, 985 struct nix_aq_enq_rsp *rsp) 986 { 987 int err; 988 989 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 990 if (!err) 991 err = nix_lf_hwctx_lockdown(rvu, req); 992 return err; 993 } 994 #else 995 996 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 997 struct nix_aq_enq_req *req, 998 struct nix_aq_enq_rsp *rsp) 999 { 1000 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1001 } 1002 #endif 1003 /* CN10K mbox handler */ 1004 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1005 struct nix_cn10k_aq_enq_req *req, 1006 struct nix_cn10k_aq_enq_rsp *rsp) 1007 { 1008 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1009 (struct nix_aq_enq_rsp *)rsp); 1010 } 1011 1012 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1013 struct hwctx_disable_req *req, 1014 struct msg_rsp *rsp) 1015 { 1016 return nix_lf_hwctx_disable(rvu, req); 1017 } 1018 1019 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1020 struct nix_lf_alloc_req *req, 1021 struct nix_lf_alloc_rsp *rsp) 1022 { 1023 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1024 struct rvu_hwinfo *hw = rvu->hw; 1025 u16 pcifunc = req->hdr.pcifunc; 1026 struct rvu_block *block; 1027 struct rvu_pfvf *pfvf; 1028 u64 cfg, ctx_cfg; 1029 int blkaddr; 1030 1031 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1032 return NIX_AF_ERR_PARAM; 1033 1034 if (req->way_mask) 1035 req->way_mask &= 0xFFFF; 1036 1037 pfvf = rvu_get_pfvf(rvu, pcifunc); 1038 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1039 if (!pfvf->nixlf || blkaddr < 0) 1040 return NIX_AF_ERR_AF_LF_INVALID; 1041 1042 block = &hw->block[blkaddr]; 1043 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1044 if (nixlf < 0) 1045 return NIX_AF_ERR_AF_LF_INVALID; 1046 1047 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1048 if (req->npa_func) { 1049 /* If default, use 'this' NIXLF's PFFUNC */ 1050 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1051 req->npa_func = pcifunc; 1052 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1053 return NIX_AF_INVAL_NPA_PF_FUNC; 1054 } 1055 1056 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1057 if (req->sso_func) { 1058 /* If default, use 'this' NIXLF's PFFUNC */ 1059 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1060 req->sso_func = pcifunc; 1061 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1062 return NIX_AF_INVAL_SSO_PF_FUNC; 1063 } 1064 1065 /* If RSS is being enabled, check if requested config is valid. 1066 * RSS table size should be power of two, otherwise 1067 * RSS_GRP::OFFSET + adder might go beyond that group or 1068 * won't be able to use entire table. 1069 */ 1070 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1071 !is_power_of_2(req->rss_sz))) 1072 return NIX_AF_ERR_RSS_SIZE_INVALID; 1073 1074 if (req->rss_sz && 1075 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1076 return NIX_AF_ERR_RSS_GRPS_INVALID; 1077 1078 /* Reset this NIX LF */ 1079 err = rvu_lf_reset(rvu, block, nixlf); 1080 if (err) { 1081 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1082 block->addr - BLKADDR_NIX0, nixlf); 1083 return NIX_AF_ERR_LF_RESET; 1084 } 1085 1086 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1087 1088 /* Alloc NIX RQ HW context memory and config the base */ 1089 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1090 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1091 if (err) 1092 goto free_mem; 1093 1094 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1095 if (!pfvf->rq_bmap) 1096 goto free_mem; 1097 1098 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1099 (u64)pfvf->rq_ctx->iova); 1100 1101 /* Set caching and queue count in HW */ 1102 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1103 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1104 1105 /* Alloc NIX SQ HW context memory and config the base */ 1106 hwctx_size = 1UL << (ctx_cfg & 0xF); 1107 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1108 if (err) 1109 goto free_mem; 1110 1111 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1112 if (!pfvf->sq_bmap) 1113 goto free_mem; 1114 1115 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1116 (u64)pfvf->sq_ctx->iova); 1117 1118 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1119 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1120 1121 /* Alloc NIX CQ HW context memory and config the base */ 1122 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1123 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1124 if (err) 1125 goto free_mem; 1126 1127 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1128 if (!pfvf->cq_bmap) 1129 goto free_mem; 1130 1131 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1132 (u64)pfvf->cq_ctx->iova); 1133 1134 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1135 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1136 1137 /* Initialize receive side scaling (RSS) */ 1138 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1139 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1140 req->rss_grps, hwctx_size, req->way_mask); 1141 if (err) 1142 goto free_mem; 1143 1144 /* Alloc memory for CQINT's HW contexts */ 1145 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1146 qints = (cfg >> 24) & 0xFFF; 1147 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1148 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1149 if (err) 1150 goto free_mem; 1151 1152 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1153 (u64)pfvf->cq_ints_ctx->iova); 1154 1155 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1156 BIT_ULL(36) | req->way_mask << 20); 1157 1158 /* Alloc memory for QINT's HW contexts */ 1159 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1160 qints = (cfg >> 12) & 0xFFF; 1161 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1162 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1163 if (err) 1164 goto free_mem; 1165 1166 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1167 (u64)pfvf->nix_qints_ctx->iova); 1168 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1169 BIT_ULL(36) | req->way_mask << 20); 1170 1171 /* Setup VLANX TPID's. 1172 * Use VLAN1 for 802.1Q 1173 * and VLAN0 for 802.1AD. 1174 */ 1175 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1176 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1177 1178 /* Enable LMTST for this NIX LF */ 1179 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1180 1181 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1182 if (req->npa_func) 1183 cfg = req->npa_func; 1184 if (req->sso_func) 1185 cfg |= (u64)req->sso_func << 16; 1186 1187 cfg |= (u64)req->xqe_sz << 33; 1188 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1189 1190 /* Config Rx pkt length, csum checks and apad enable / disable */ 1191 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1192 1193 /* Configure pkind for TX parse config */ 1194 cfg = NPC_TX_DEF_PKIND; 1195 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1196 1197 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1198 err = nix_interface_init(rvu, pcifunc, intf, nixlf); 1199 if (err) 1200 goto free_mem; 1201 1202 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1203 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1204 1205 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1206 rvu_write64(rvu, blkaddr, 1207 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1208 VTAGSIZE_T4 | VTAG_STRIP); 1209 1210 goto exit; 1211 1212 free_mem: 1213 nix_ctx_free(rvu, pfvf); 1214 rc = -ENOMEM; 1215 1216 exit: 1217 /* Set macaddr of this PF/VF */ 1218 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1219 1220 /* set SQB size info */ 1221 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1222 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1223 rsp->rx_chan_base = pfvf->rx_chan_base; 1224 rsp->tx_chan_base = pfvf->tx_chan_base; 1225 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1226 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1227 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1228 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1229 /* Get HW supported stat count */ 1230 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1231 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1232 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1233 /* Get count of CQ IRQs and error IRQs supported per LF */ 1234 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1235 rsp->qints = ((cfg >> 12) & 0xFFF); 1236 rsp->cints = ((cfg >> 24) & 0xFFF); 1237 rsp->cgx_links = hw->cgx_links; 1238 rsp->lbk_links = hw->lbk_links; 1239 rsp->sdp_links = hw->sdp_links; 1240 1241 return rc; 1242 } 1243 1244 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1245 struct msg_rsp *rsp) 1246 { 1247 struct rvu_hwinfo *hw = rvu->hw; 1248 u16 pcifunc = req->hdr.pcifunc; 1249 struct rvu_block *block; 1250 int blkaddr, nixlf, err; 1251 struct rvu_pfvf *pfvf; 1252 1253 pfvf = rvu_get_pfvf(rvu, pcifunc); 1254 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1255 if (!pfvf->nixlf || blkaddr < 0) 1256 return NIX_AF_ERR_AF_LF_INVALID; 1257 1258 block = &hw->block[blkaddr]; 1259 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1260 if (nixlf < 0) 1261 return NIX_AF_ERR_AF_LF_INVALID; 1262 1263 if (req->flags & NIX_LF_DISABLE_FLOWS) 1264 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1265 else 1266 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1267 1268 /* Free any tx vtag def entries used by this NIX LF */ 1269 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1270 nix_free_tx_vtag_entries(rvu, pcifunc); 1271 1272 nix_interface_deinit(rvu, pcifunc, nixlf); 1273 1274 /* Reset this NIX LF */ 1275 err = rvu_lf_reset(rvu, block, nixlf); 1276 if (err) { 1277 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1278 block->addr - BLKADDR_NIX0, nixlf); 1279 return NIX_AF_ERR_LF_RESET; 1280 } 1281 1282 nix_ctx_free(rvu, pfvf); 1283 1284 return 0; 1285 } 1286 1287 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1288 struct nix_mark_format_cfg *req, 1289 struct nix_mark_format_cfg_rsp *rsp) 1290 { 1291 u16 pcifunc = req->hdr.pcifunc; 1292 struct nix_hw *nix_hw; 1293 struct rvu_pfvf *pfvf; 1294 int blkaddr, rc; 1295 u32 cfg; 1296 1297 pfvf = rvu_get_pfvf(rvu, pcifunc); 1298 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1299 if (!pfvf->nixlf || blkaddr < 0) 1300 return NIX_AF_ERR_AF_LF_INVALID; 1301 1302 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1303 if (!nix_hw) 1304 return -EINVAL; 1305 1306 cfg = (((u32)req->offset & 0x7) << 16) | 1307 (((u32)req->y_mask & 0xF) << 12) | 1308 (((u32)req->y_val & 0xF) << 8) | 1309 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1310 1311 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1312 if (rc < 0) { 1313 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1314 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1315 return NIX_AF_ERR_MARK_CFG_FAIL; 1316 } 1317 1318 rsp->mark_format_idx = rc; 1319 return 0; 1320 } 1321 1322 /* Disable shaping of pkts by a scheduler queue 1323 * at a given scheduler level. 1324 */ 1325 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1326 int lvl, int schq) 1327 { 1328 u64 cir_reg = 0, pir_reg = 0; 1329 u64 cfg; 1330 1331 switch (lvl) { 1332 case NIX_TXSCH_LVL_TL1: 1333 cir_reg = NIX_AF_TL1X_CIR(schq); 1334 pir_reg = 0; /* PIR not available at TL1 */ 1335 break; 1336 case NIX_TXSCH_LVL_TL2: 1337 cir_reg = NIX_AF_TL2X_CIR(schq); 1338 pir_reg = NIX_AF_TL2X_PIR(schq); 1339 break; 1340 case NIX_TXSCH_LVL_TL3: 1341 cir_reg = NIX_AF_TL3X_CIR(schq); 1342 pir_reg = NIX_AF_TL3X_PIR(schq); 1343 break; 1344 case NIX_TXSCH_LVL_TL4: 1345 cir_reg = NIX_AF_TL4X_CIR(schq); 1346 pir_reg = NIX_AF_TL4X_PIR(schq); 1347 break; 1348 } 1349 1350 if (!cir_reg) 1351 return; 1352 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1353 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1354 1355 if (!pir_reg) 1356 return; 1357 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1358 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1359 } 1360 1361 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1362 int lvl, int schq) 1363 { 1364 struct rvu_hwinfo *hw = rvu->hw; 1365 int link; 1366 1367 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1368 return; 1369 1370 /* Reset TL4's SDP link config */ 1371 if (lvl == NIX_TXSCH_LVL_TL4) 1372 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1373 1374 if (lvl != NIX_TXSCH_LVL_TL2) 1375 return; 1376 1377 /* Reset TL2's CGX or LBK link config */ 1378 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1379 rvu_write64(rvu, blkaddr, 1380 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1381 } 1382 1383 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1384 { 1385 struct rvu_hwinfo *hw = rvu->hw; 1386 int pf = rvu_get_pf(pcifunc); 1387 u8 cgx_id = 0, lmac_id = 0; 1388 1389 if (is_afvf(pcifunc)) {/* LBK links */ 1390 return hw->cgx_links; 1391 } else if (is_pf_cgxmapped(rvu, pf)) { 1392 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1393 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1394 } 1395 1396 /* SDP link */ 1397 return hw->cgx_links + hw->lbk_links; 1398 } 1399 1400 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 1401 int link, int *start, int *end) 1402 { 1403 struct rvu_hwinfo *hw = rvu->hw; 1404 int pf = rvu_get_pf(pcifunc); 1405 1406 if (is_afvf(pcifunc)) { /* LBK links */ 1407 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1408 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 1409 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 1410 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1411 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 1412 } else { /* SDP link */ 1413 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 1414 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 1415 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 1416 } 1417 } 1418 1419 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 1420 struct nix_hw *nix_hw, 1421 struct nix_txsch_alloc_req *req) 1422 { 1423 struct rvu_hwinfo *hw = rvu->hw; 1424 int schq, req_schq, free_cnt; 1425 struct nix_txsch *txsch; 1426 int link, start, end; 1427 1428 txsch = &nix_hw->txsch[lvl]; 1429 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 1430 1431 if (!req_schq) 1432 return 0; 1433 1434 link = nix_get_tx_link(rvu, pcifunc); 1435 1436 /* For traffic aggregating scheduler level, one queue is enough */ 1437 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1438 if (req_schq != 1) 1439 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1440 return 0; 1441 } 1442 1443 /* Get free SCHQ count and check if request can be accomodated */ 1444 if (hw->cap.nix_fixed_txschq_mapping) { 1445 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1446 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 1447 if (end <= txsch->schq.max && schq < end && 1448 !test_bit(schq, txsch->schq.bmap)) 1449 free_cnt = 1; 1450 else 1451 free_cnt = 0; 1452 } else { 1453 free_cnt = rvu_rsrc_free_count(&txsch->schq); 1454 } 1455 1456 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) 1457 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1458 1459 /* If contiguous queues are needed, check for availability */ 1460 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 1461 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 1462 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1463 1464 return 0; 1465 } 1466 1467 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 1468 struct nix_txsch_alloc_rsp *rsp, 1469 int lvl, int start, int end) 1470 { 1471 struct rvu_hwinfo *hw = rvu->hw; 1472 u16 pcifunc = rsp->hdr.pcifunc; 1473 int idx, schq; 1474 1475 /* For traffic aggregating levels, queue alloc is based 1476 * on transmit link to which PF_FUNC is mapped to. 1477 */ 1478 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1479 /* A single TL queue is allocated */ 1480 if (rsp->schq_contig[lvl]) { 1481 rsp->schq_contig[lvl] = 1; 1482 rsp->schq_contig_list[lvl][0] = start; 1483 } 1484 1485 /* Both contig and non-contig reqs doesn't make sense here */ 1486 if (rsp->schq_contig[lvl]) 1487 rsp->schq[lvl] = 0; 1488 1489 if (rsp->schq[lvl]) { 1490 rsp->schq[lvl] = 1; 1491 rsp->schq_list[lvl][0] = start; 1492 } 1493 return; 1494 } 1495 1496 /* Adjust the queue request count if HW supports 1497 * only one queue per level configuration. 1498 */ 1499 if (hw->cap.nix_fixed_txschq_mapping) { 1500 idx = pcifunc & RVU_PFVF_FUNC_MASK; 1501 schq = start + idx; 1502 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 1503 rsp->schq_contig[lvl] = 0; 1504 rsp->schq[lvl] = 0; 1505 return; 1506 } 1507 1508 if (rsp->schq_contig[lvl]) { 1509 rsp->schq_contig[lvl] = 1; 1510 set_bit(schq, txsch->schq.bmap); 1511 rsp->schq_contig_list[lvl][0] = schq; 1512 rsp->schq[lvl] = 0; 1513 } else if (rsp->schq[lvl]) { 1514 rsp->schq[lvl] = 1; 1515 set_bit(schq, txsch->schq.bmap); 1516 rsp->schq_list[lvl][0] = schq; 1517 } 1518 return; 1519 } 1520 1521 /* Allocate contiguous queue indices requesty first */ 1522 if (rsp->schq_contig[lvl]) { 1523 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 1524 txsch->schq.max, start, 1525 rsp->schq_contig[lvl], 0); 1526 if (schq >= end) 1527 rsp->schq_contig[lvl] = 0; 1528 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 1529 set_bit(schq, txsch->schq.bmap); 1530 rsp->schq_contig_list[lvl][idx] = schq; 1531 schq++; 1532 } 1533 } 1534 1535 /* Allocate non-contiguous queue indices */ 1536 if (rsp->schq[lvl]) { 1537 idx = 0; 1538 for (schq = start; schq < end; schq++) { 1539 if (!test_bit(schq, txsch->schq.bmap)) { 1540 set_bit(schq, txsch->schq.bmap); 1541 rsp->schq_list[lvl][idx++] = schq; 1542 } 1543 if (idx == rsp->schq[lvl]) 1544 break; 1545 } 1546 /* Update how many were allocated */ 1547 rsp->schq[lvl] = idx; 1548 } 1549 } 1550 1551 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 1552 struct nix_txsch_alloc_req *req, 1553 struct nix_txsch_alloc_rsp *rsp) 1554 { 1555 struct rvu_hwinfo *hw = rvu->hw; 1556 u16 pcifunc = req->hdr.pcifunc; 1557 int link, blkaddr, rc = 0; 1558 int lvl, idx, start, end; 1559 struct nix_txsch *txsch; 1560 struct rvu_pfvf *pfvf; 1561 struct nix_hw *nix_hw; 1562 u32 *pfvf_map; 1563 u16 schq; 1564 1565 pfvf = rvu_get_pfvf(rvu, pcifunc); 1566 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1567 if (!pfvf->nixlf || blkaddr < 0) 1568 return NIX_AF_ERR_AF_LF_INVALID; 1569 1570 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1571 if (!nix_hw) 1572 return -EINVAL; 1573 1574 mutex_lock(&rvu->rsrc_lock); 1575 1576 /* Check if request is valid as per HW capabilities 1577 * and can be accomodated. 1578 */ 1579 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1580 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 1581 if (rc) 1582 goto err; 1583 } 1584 1585 /* Allocate requested Tx scheduler queues */ 1586 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1587 txsch = &nix_hw->txsch[lvl]; 1588 pfvf_map = txsch->pfvf_map; 1589 1590 if (!req->schq[lvl] && !req->schq_contig[lvl]) 1591 continue; 1592 1593 rsp->schq[lvl] = req->schq[lvl]; 1594 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 1595 1596 link = nix_get_tx_link(rvu, pcifunc); 1597 1598 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1599 start = link; 1600 end = link; 1601 } else if (hw->cap.nix_fixed_txschq_mapping) { 1602 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1603 } else { 1604 start = 0; 1605 end = txsch->schq.max; 1606 } 1607 1608 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 1609 1610 /* Reset queue config */ 1611 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 1612 schq = rsp->schq_contig_list[lvl][idx]; 1613 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1614 NIX_TXSCHQ_CFG_DONE)) 1615 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1616 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1617 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); 1618 } 1619 1620 for (idx = 0; idx < req->schq[lvl]; idx++) { 1621 schq = rsp->schq_list[lvl][idx]; 1622 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1623 NIX_TXSCHQ_CFG_DONE)) 1624 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1625 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1626 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); 1627 } 1628 } 1629 1630 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 1631 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 1632 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 1633 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1634 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1635 goto exit; 1636 err: 1637 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 1638 exit: 1639 mutex_unlock(&rvu->rsrc_lock); 1640 return rc; 1641 } 1642 1643 static void nix_smq_flush(struct rvu *rvu, int blkaddr, 1644 int smq, u16 pcifunc, int nixlf) 1645 { 1646 int pf = rvu_get_pf(pcifunc); 1647 u8 cgx_id = 0, lmac_id = 0; 1648 int err, restore_tx_en = 0; 1649 u64 cfg; 1650 1651 /* enable cgx tx if disabled */ 1652 if (is_pf_cgxmapped(rvu, pf)) { 1653 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1654 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), 1655 lmac_id, true); 1656 } 1657 1658 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 1659 /* Do SMQ flush and set enqueue xoff */ 1660 cfg |= BIT_ULL(50) | BIT_ULL(49); 1661 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 1662 1663 /* Disable backpressure from physical link, 1664 * otherwise SMQ flush may stall. 1665 */ 1666 rvu_cgx_enadis_rx_bp(rvu, pf, false); 1667 1668 /* Wait for flush to complete */ 1669 err = rvu_poll_reg(rvu, blkaddr, 1670 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 1671 if (err) 1672 dev_err(rvu->dev, 1673 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq); 1674 1675 rvu_cgx_enadis_rx_bp(rvu, pf, true); 1676 /* restore cgx tx state */ 1677 if (restore_tx_en) 1678 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 1679 } 1680 1681 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 1682 { 1683 int blkaddr, nixlf, lvl, schq, err; 1684 struct rvu_hwinfo *hw = rvu->hw; 1685 struct nix_txsch *txsch; 1686 struct nix_hw *nix_hw; 1687 1688 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1689 if (blkaddr < 0) 1690 return NIX_AF_ERR_AF_LF_INVALID; 1691 1692 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1693 if (!nix_hw) 1694 return -EINVAL; 1695 1696 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1697 if (nixlf < 0) 1698 return NIX_AF_ERR_AF_LF_INVALID; 1699 1700 /* Disable TL2/3 queue links before SMQ flush*/ 1701 mutex_lock(&rvu->rsrc_lock); 1702 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1703 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) 1704 continue; 1705 1706 txsch = &nix_hw->txsch[lvl]; 1707 for (schq = 0; schq < txsch->schq.max; schq++) { 1708 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1709 continue; 1710 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1711 } 1712 } 1713 1714 /* Flush SMQs */ 1715 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 1716 for (schq = 0; schq < txsch->schq.max; schq++) { 1717 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1718 continue; 1719 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1720 } 1721 1722 /* Now free scheduler queues to free pool */ 1723 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1724 /* TLs above aggregation level are shared across all PF 1725 * and it's VFs, hence skip freeing them. 1726 */ 1727 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1728 continue; 1729 1730 txsch = &nix_hw->txsch[lvl]; 1731 for (schq = 0; schq < txsch->schq.max; schq++) { 1732 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1733 continue; 1734 rvu_free_rsrc(&txsch->schq, schq); 1735 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 1736 } 1737 } 1738 mutex_unlock(&rvu->rsrc_lock); 1739 1740 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ 1741 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); 1742 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); 1743 if (err) 1744 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 1745 1746 return 0; 1747 } 1748 1749 static int nix_txschq_free_one(struct rvu *rvu, 1750 struct nix_txsch_free_req *req) 1751 { 1752 struct rvu_hwinfo *hw = rvu->hw; 1753 u16 pcifunc = req->hdr.pcifunc; 1754 int lvl, schq, nixlf, blkaddr; 1755 struct nix_txsch *txsch; 1756 struct nix_hw *nix_hw; 1757 u32 *pfvf_map; 1758 1759 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1760 if (blkaddr < 0) 1761 return NIX_AF_ERR_AF_LF_INVALID; 1762 1763 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1764 if (!nix_hw) 1765 return -EINVAL; 1766 1767 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1768 if (nixlf < 0) 1769 return NIX_AF_ERR_AF_LF_INVALID; 1770 1771 lvl = req->schq_lvl; 1772 schq = req->schq; 1773 txsch = &nix_hw->txsch[lvl]; 1774 1775 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 1776 return 0; 1777 1778 pfvf_map = txsch->pfvf_map; 1779 mutex_lock(&rvu->rsrc_lock); 1780 1781 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 1782 mutex_unlock(&rvu->rsrc_lock); 1783 goto err; 1784 } 1785 1786 /* Flush if it is a SMQ. Onus of disabling 1787 * TL2/3 queue links before SMQ flush is on user 1788 */ 1789 if (lvl == NIX_TXSCH_LVL_SMQ) 1790 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1791 1792 /* Free the resource */ 1793 rvu_free_rsrc(&txsch->schq, schq); 1794 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 1795 mutex_unlock(&rvu->rsrc_lock); 1796 return 0; 1797 err: 1798 return NIX_AF_ERR_TLX_INVALID; 1799 } 1800 1801 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 1802 struct nix_txsch_free_req *req, 1803 struct msg_rsp *rsp) 1804 { 1805 if (req->flags & TXSCHQ_FREE_ALL) 1806 return nix_txschq_free(rvu, req->hdr.pcifunc); 1807 else 1808 return nix_txschq_free_one(rvu, req); 1809 } 1810 1811 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 1812 int lvl, u64 reg, u64 regval) 1813 { 1814 u64 regbase = reg & 0xFFFF; 1815 u16 schq, parent; 1816 1817 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 1818 return false; 1819 1820 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1821 /* Check if this schq belongs to this PF/VF or not */ 1822 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 1823 return false; 1824 1825 parent = (regval >> 16) & 0x1FF; 1826 /* Validate MDQ's TL4 parent */ 1827 if (regbase == NIX_AF_MDQX_PARENT(0) && 1828 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 1829 return false; 1830 1831 /* Validate TL4's TL3 parent */ 1832 if (regbase == NIX_AF_TL4X_PARENT(0) && 1833 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 1834 return false; 1835 1836 /* Validate TL3's TL2 parent */ 1837 if (regbase == NIX_AF_TL3X_PARENT(0) && 1838 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 1839 return false; 1840 1841 /* Validate TL2's TL1 parent */ 1842 if (regbase == NIX_AF_TL2X_PARENT(0) && 1843 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 1844 return false; 1845 1846 return true; 1847 } 1848 1849 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 1850 { 1851 u64 regbase; 1852 1853 if (hw->cap.nix_shaping) 1854 return true; 1855 1856 /* If shaping and coloring is not supported, then 1857 * *_CIR and *_PIR registers should not be configured. 1858 */ 1859 regbase = reg & 0xFFFF; 1860 1861 switch (lvl) { 1862 case NIX_TXSCH_LVL_TL1: 1863 if (regbase == NIX_AF_TL1X_CIR(0)) 1864 return false; 1865 break; 1866 case NIX_TXSCH_LVL_TL2: 1867 if (regbase == NIX_AF_TL2X_CIR(0) || 1868 regbase == NIX_AF_TL2X_PIR(0)) 1869 return false; 1870 break; 1871 case NIX_TXSCH_LVL_TL3: 1872 if (regbase == NIX_AF_TL3X_CIR(0) || 1873 regbase == NIX_AF_TL3X_PIR(0)) 1874 return false; 1875 break; 1876 case NIX_TXSCH_LVL_TL4: 1877 if (regbase == NIX_AF_TL4X_CIR(0) || 1878 regbase == NIX_AF_TL4X_PIR(0)) 1879 return false; 1880 break; 1881 } 1882 return true; 1883 } 1884 1885 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 1886 u16 pcifunc, int blkaddr) 1887 { 1888 u32 *pfvf_map; 1889 int schq; 1890 1891 schq = nix_get_tx_link(rvu, pcifunc); 1892 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 1893 /* Skip if PF has already done the config */ 1894 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 1895 return; 1896 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 1897 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 1898 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 1899 TXSCH_TL1_DFLT_RR_QTM); 1900 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 1901 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 1902 } 1903 1904 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 1905 struct nix_txschq_config *req, 1906 struct msg_rsp *rsp) 1907 { 1908 struct rvu_hwinfo *hw = rvu->hw; 1909 u16 pcifunc = req->hdr.pcifunc; 1910 u64 reg, regval, schq_regbase; 1911 struct nix_txsch *txsch; 1912 struct nix_hw *nix_hw; 1913 int blkaddr, idx, err; 1914 int nixlf, schq; 1915 u32 *pfvf_map; 1916 1917 if (req->lvl >= NIX_TXSCH_LVL_CNT || 1918 req->num_regs > MAX_REGS_PER_MBOX_MSG) 1919 return NIX_AF_INVAL_TXSCHQ_CFG; 1920 1921 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 1922 if (err) 1923 return err; 1924 1925 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1926 if (!nix_hw) 1927 return -EINVAL; 1928 1929 txsch = &nix_hw->txsch[req->lvl]; 1930 pfvf_map = txsch->pfvf_map; 1931 1932 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 1933 pcifunc & RVU_PFVF_FUNC_MASK) { 1934 mutex_lock(&rvu->rsrc_lock); 1935 if (req->lvl == NIX_TXSCH_LVL_TL1) 1936 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 1937 mutex_unlock(&rvu->rsrc_lock); 1938 return 0; 1939 } 1940 1941 for (idx = 0; idx < req->num_regs; idx++) { 1942 reg = req->reg[idx]; 1943 regval = req->regval[idx]; 1944 schq_regbase = reg & 0xFFFF; 1945 1946 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 1947 txsch->lvl, reg, regval)) 1948 return NIX_AF_INVAL_TXSCHQ_CFG; 1949 1950 /* Check if shaping and coloring is supported */ 1951 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 1952 continue; 1953 1954 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 1955 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 1956 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 1957 pcifunc, 0); 1958 regval &= ~(0x7FULL << 24); 1959 regval |= ((u64)nixlf << 24); 1960 } 1961 1962 /* Clear 'BP_ENA' config, if it's not allowed */ 1963 if (!hw->cap.nix_tx_link_bp) { 1964 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 1965 (schq_regbase & 0xFF00) == 1966 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 1967 regval &= ~BIT_ULL(13); 1968 } 1969 1970 /* Mark config as done for TL1 by PF */ 1971 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 1972 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 1973 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1974 mutex_lock(&rvu->rsrc_lock); 1975 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 1976 NIX_TXSCHQ_CFG_DONE); 1977 mutex_unlock(&rvu->rsrc_lock); 1978 } 1979 1980 /* SMQ flush is special hence split register writes such 1981 * that flush first and write rest of the bits later. 1982 */ 1983 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 1984 (regval & BIT_ULL(49))) { 1985 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1986 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1987 regval &= ~BIT_ULL(49); 1988 } 1989 rvu_write64(rvu, blkaddr, reg, regval); 1990 } 1991 1992 return 0; 1993 } 1994 1995 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 1996 struct nix_vtag_config *req) 1997 { 1998 u64 regval = req->vtag_size; 1999 2000 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2001 req->vtag_size > VTAGSIZE_T8) 2002 return -EINVAL; 2003 2004 /* RX VTAG Type 7 reserved for vf vlan */ 2005 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2006 return NIX_AF_ERR_RX_VTAG_INUSE; 2007 2008 if (req->rx.capture_vtag) 2009 regval |= BIT_ULL(5); 2010 if (req->rx.strip_vtag) 2011 regval |= BIT_ULL(4); 2012 2013 rvu_write64(rvu, blkaddr, 2014 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2015 return 0; 2016 } 2017 2018 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2019 u16 pcifunc, int index) 2020 { 2021 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2022 struct nix_txvlan *vlan = &nix_hw->txvlan; 2023 2024 if (vlan->entry2pfvf_map[index] != pcifunc) 2025 return NIX_AF_ERR_PARAM; 2026 2027 rvu_write64(rvu, blkaddr, 2028 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2029 rvu_write64(rvu, blkaddr, 2030 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2031 2032 vlan->entry2pfvf_map[index] = 0; 2033 rvu_free_rsrc(&vlan->rsrc, index); 2034 2035 return 0; 2036 } 2037 2038 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2039 { 2040 struct nix_txvlan *vlan; 2041 struct nix_hw *nix_hw; 2042 int index, blkaddr; 2043 2044 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2045 if (blkaddr < 0) 2046 return; 2047 2048 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2049 vlan = &nix_hw->txvlan; 2050 2051 mutex_lock(&vlan->rsrc_lock); 2052 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2053 for (index = 0; index < vlan->rsrc.max; index++) { 2054 if (vlan->entry2pfvf_map[index] == pcifunc) 2055 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2056 } 2057 mutex_unlock(&vlan->rsrc_lock); 2058 } 2059 2060 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2061 u64 vtag, u8 size) 2062 { 2063 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2064 struct nix_txvlan *vlan = &nix_hw->txvlan; 2065 u64 regval; 2066 int index; 2067 2068 mutex_lock(&vlan->rsrc_lock); 2069 2070 index = rvu_alloc_rsrc(&vlan->rsrc); 2071 if (index < 0) { 2072 mutex_unlock(&vlan->rsrc_lock); 2073 return index; 2074 } 2075 2076 mutex_unlock(&vlan->rsrc_lock); 2077 2078 regval = size ? vtag : vtag << 32; 2079 2080 rvu_write64(rvu, blkaddr, 2081 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 2082 rvu_write64(rvu, blkaddr, 2083 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 2084 2085 return index; 2086 } 2087 2088 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 2089 struct nix_vtag_config *req) 2090 { 2091 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2092 struct nix_txvlan *vlan = &nix_hw->txvlan; 2093 u16 pcifunc = req->hdr.pcifunc; 2094 int idx0 = req->tx.vtag0_idx; 2095 int idx1 = req->tx.vtag1_idx; 2096 int err = 0; 2097 2098 if (req->tx.free_vtag0 && req->tx.free_vtag1) 2099 if (vlan->entry2pfvf_map[idx0] != pcifunc || 2100 vlan->entry2pfvf_map[idx1] != pcifunc) 2101 return NIX_AF_ERR_PARAM; 2102 2103 mutex_lock(&vlan->rsrc_lock); 2104 2105 if (req->tx.free_vtag0) { 2106 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 2107 if (err) 2108 goto exit; 2109 } 2110 2111 if (req->tx.free_vtag1) 2112 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 2113 2114 exit: 2115 mutex_unlock(&vlan->rsrc_lock); 2116 return err; 2117 } 2118 2119 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 2120 struct nix_vtag_config *req, 2121 struct nix_vtag_config_rsp *rsp) 2122 { 2123 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2124 struct nix_txvlan *vlan = &nix_hw->txvlan; 2125 u16 pcifunc = req->hdr.pcifunc; 2126 2127 if (req->tx.cfg_vtag0) { 2128 rsp->vtag0_idx = 2129 nix_tx_vtag_alloc(rvu, blkaddr, 2130 req->tx.vtag0, req->vtag_size); 2131 2132 if (rsp->vtag0_idx < 0) 2133 return NIX_AF_ERR_TX_VTAG_NOSPC; 2134 2135 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 2136 } 2137 2138 if (req->tx.cfg_vtag1) { 2139 rsp->vtag1_idx = 2140 nix_tx_vtag_alloc(rvu, blkaddr, 2141 req->tx.vtag1, req->vtag_size); 2142 2143 if (rsp->vtag1_idx < 0) 2144 goto err_free; 2145 2146 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 2147 } 2148 2149 return 0; 2150 2151 err_free: 2152 if (req->tx.cfg_vtag0) 2153 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 2154 2155 return NIX_AF_ERR_TX_VTAG_NOSPC; 2156 } 2157 2158 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 2159 struct nix_vtag_config *req, 2160 struct nix_vtag_config_rsp *rsp) 2161 { 2162 u16 pcifunc = req->hdr.pcifunc; 2163 int blkaddr, nixlf, err; 2164 2165 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2166 if (err) 2167 return err; 2168 2169 if (req->cfg_type) { 2170 /* rx vtag configuration */ 2171 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 2172 if (err) 2173 return NIX_AF_ERR_PARAM; 2174 } else { 2175 /* tx vtag configuration */ 2176 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 2177 (req->tx.free_vtag0 || req->tx.free_vtag1)) 2178 return NIX_AF_ERR_PARAM; 2179 2180 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 2181 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 2182 2183 if (req->tx.free_vtag0 || req->tx.free_vtag1) 2184 return nix_tx_vtag_decfg(rvu, blkaddr, req); 2185 } 2186 2187 return 0; 2188 } 2189 2190 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 2191 int mce, u8 op, u16 pcifunc, int next, bool eol) 2192 { 2193 struct nix_aq_enq_req aq_req; 2194 int err; 2195 2196 aq_req.hdr.pcifunc = 0; 2197 aq_req.ctype = NIX_AQ_CTYPE_MCE; 2198 aq_req.op = op; 2199 aq_req.qidx = mce; 2200 2201 /* Forward bcast pkts to RQ0, RSS not needed */ 2202 aq_req.mce.op = 0; 2203 aq_req.mce.index = 0; 2204 aq_req.mce.eol = eol; 2205 aq_req.mce.pf_func = pcifunc; 2206 aq_req.mce.next = next; 2207 2208 /* All fields valid */ 2209 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 2210 2211 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 2212 if (err) { 2213 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 2214 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 2215 return err; 2216 } 2217 return 0; 2218 } 2219 2220 static int nix_update_mce_list(struct nix_mce_list *mce_list, 2221 u16 pcifunc, bool add) 2222 { 2223 struct mce *mce, *tail = NULL; 2224 bool delete = false; 2225 2226 /* Scan through the current list */ 2227 hlist_for_each_entry(mce, &mce_list->head, node) { 2228 /* If already exists, then delete */ 2229 if (mce->pcifunc == pcifunc && !add) { 2230 delete = true; 2231 break; 2232 } 2233 tail = mce; 2234 } 2235 2236 if (delete) { 2237 hlist_del(&mce->node); 2238 kfree(mce); 2239 mce_list->count--; 2240 return 0; 2241 } 2242 2243 if (!add) 2244 return 0; 2245 2246 /* Add a new one to the list, at the tail */ 2247 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 2248 if (!mce) 2249 return -ENOMEM; 2250 mce->pcifunc = pcifunc; 2251 if (!tail) 2252 hlist_add_head(&mce->node, &mce_list->head); 2253 else 2254 hlist_add_behind(&mce->node, &tail->node); 2255 mce_list->count++; 2256 return 0; 2257 } 2258 2259 int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) 2260 { 2261 int err = 0, idx, next_idx, last_idx; 2262 struct nix_mce_list *mce_list; 2263 struct nix_mcast *mcast; 2264 struct nix_hw *nix_hw; 2265 struct rvu_pfvf *pfvf; 2266 struct mce *mce; 2267 int blkaddr; 2268 2269 /* Broadcast pkt replication is not needed for AF's VFs, hence skip */ 2270 if (is_afvf(pcifunc)) 2271 return 0; 2272 2273 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2274 if (blkaddr < 0) 2275 return 0; 2276 2277 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2278 if (!nix_hw) 2279 return 0; 2280 2281 mcast = &nix_hw->mcast; 2282 2283 /* Get this PF/VF func's MCE index */ 2284 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 2285 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 2286 2287 mce_list = &pfvf->bcast_mce_list; 2288 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) { 2289 dev_err(rvu->dev, 2290 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 2291 __func__, idx, mce_list->max, 2292 pcifunc >> RVU_PFVF_PF_SHIFT); 2293 return -EINVAL; 2294 } 2295 2296 mutex_lock(&mcast->mce_lock); 2297 2298 err = nix_update_mce_list(mce_list, pcifunc, add); 2299 if (err) 2300 goto end; 2301 2302 /* Disable MCAM entry in NPC */ 2303 if (!mce_list->count) { 2304 rvu_npc_enable_bcast_entry(rvu, pcifunc, false); 2305 goto end; 2306 } 2307 2308 /* Dump the updated list to HW */ 2309 idx = pfvf->bcast_mce_idx; 2310 last_idx = idx + mce_list->count - 1; 2311 hlist_for_each_entry(mce, &mce_list->head, node) { 2312 if (idx > last_idx) 2313 break; 2314 2315 next_idx = idx + 1; 2316 /* EOL should be set in last MCE */ 2317 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 2318 mce->pcifunc, next_idx, 2319 (next_idx > last_idx) ? true : false); 2320 if (err) 2321 goto end; 2322 idx++; 2323 } 2324 2325 end: 2326 mutex_unlock(&mcast->mce_lock); 2327 return err; 2328 } 2329 2330 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) 2331 { 2332 struct nix_mcast *mcast = &nix_hw->mcast; 2333 int err, pf, numvfs, idx; 2334 struct rvu_pfvf *pfvf; 2335 u16 pcifunc; 2336 u64 cfg; 2337 2338 /* Skip PF0 (i.e AF) */ 2339 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 2340 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2341 /* If PF is not enabled, nothing to do */ 2342 if (!((cfg >> 20) & 0x01)) 2343 continue; 2344 /* Get numVFs attached to this PF */ 2345 numvfs = (cfg >> 12) & 0xFF; 2346 2347 pfvf = &rvu->pf[pf]; 2348 2349 /* This NIX0/1 block mapped to PF ? */ 2350 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 2351 continue; 2352 2353 /* Save the start MCE */ 2354 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2355 2356 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 2357 2358 for (idx = 0; idx < (numvfs + 1); idx++) { 2359 /* idx-0 is for PF, followed by VFs */ 2360 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 2361 pcifunc |= idx; 2362 /* Add dummy entries now, so that we don't have to check 2363 * for whether AQ_OP should be INIT/WRITE later on. 2364 * Will be updated when a NIXLF is attached/detached to 2365 * these PF/VFs. 2366 */ 2367 err = nix_blk_setup_mce(rvu, nix_hw, 2368 pfvf->bcast_mce_idx + idx, 2369 NIX_AQ_INSTOP_INIT, 2370 pcifunc, 0, true); 2371 if (err) 2372 return err; 2373 } 2374 } 2375 return 0; 2376 } 2377 2378 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2379 { 2380 struct nix_mcast *mcast = &nix_hw->mcast; 2381 struct rvu_hwinfo *hw = rvu->hw; 2382 int err, size; 2383 2384 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 2385 size = (1ULL << size); 2386 2387 /* Alloc memory for multicast/mirror replication entries */ 2388 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 2389 (256UL << MC_TBL_SIZE), size); 2390 if (err) 2391 return -ENOMEM; 2392 2393 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 2394 (u64)mcast->mce_ctx->iova); 2395 2396 /* Set max list length equal to max no of VFs per PF + PF itself */ 2397 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 2398 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 2399 2400 /* Alloc memory for multicast replication buffers */ 2401 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 2402 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 2403 (8UL << MC_BUF_CNT), size); 2404 if (err) 2405 return -ENOMEM; 2406 2407 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 2408 (u64)mcast->mcast_buf->iova); 2409 2410 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 2411 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 2412 2413 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 2414 BIT_ULL(63) | (mcast->replay_pkind << 24) | 2415 BIT_ULL(20) | MC_BUF_CNT); 2416 2417 mutex_init(&mcast->mce_lock); 2418 2419 return nix_setup_bcast_tables(rvu, nix_hw); 2420 } 2421 2422 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 2423 { 2424 struct nix_txvlan *vlan = &nix_hw->txvlan; 2425 int err; 2426 2427 /* Allocate resource bimap for tx vtag def registers*/ 2428 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 2429 err = rvu_alloc_bitmap(&vlan->rsrc); 2430 if (err) 2431 return -ENOMEM; 2432 2433 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 2434 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 2435 sizeof(u16), GFP_KERNEL); 2436 if (!vlan->entry2pfvf_map) 2437 goto free_mem; 2438 2439 mutex_init(&vlan->rsrc_lock); 2440 return 0; 2441 2442 free_mem: 2443 kfree(vlan->rsrc.bmap); 2444 return -ENOMEM; 2445 } 2446 2447 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2448 { 2449 struct nix_txsch *txsch; 2450 int err, lvl, schq; 2451 u64 cfg, reg; 2452 2453 /* Get scheduler queue count of each type and alloc 2454 * bitmap for each for alloc/free/attach operations. 2455 */ 2456 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2457 txsch = &nix_hw->txsch[lvl]; 2458 txsch->lvl = lvl; 2459 switch (lvl) { 2460 case NIX_TXSCH_LVL_SMQ: 2461 reg = NIX_AF_MDQ_CONST; 2462 break; 2463 case NIX_TXSCH_LVL_TL4: 2464 reg = NIX_AF_TL4_CONST; 2465 break; 2466 case NIX_TXSCH_LVL_TL3: 2467 reg = NIX_AF_TL3_CONST; 2468 break; 2469 case NIX_TXSCH_LVL_TL2: 2470 reg = NIX_AF_TL2_CONST; 2471 break; 2472 case NIX_TXSCH_LVL_TL1: 2473 reg = NIX_AF_TL1_CONST; 2474 break; 2475 } 2476 cfg = rvu_read64(rvu, blkaddr, reg); 2477 txsch->schq.max = cfg & 0xFFFF; 2478 err = rvu_alloc_bitmap(&txsch->schq); 2479 if (err) 2480 return err; 2481 2482 /* Allocate memory for scheduler queues to 2483 * PF/VF pcifunc mapping info. 2484 */ 2485 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 2486 sizeof(u32), GFP_KERNEL); 2487 if (!txsch->pfvf_map) 2488 return -ENOMEM; 2489 for (schq = 0; schq < txsch->schq.max; schq++) 2490 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2491 } 2492 return 0; 2493 } 2494 2495 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 2496 int blkaddr, u32 cfg) 2497 { 2498 int fmt_idx; 2499 2500 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 2501 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 2502 return fmt_idx; 2503 } 2504 if (fmt_idx >= nix_hw->mark_format.total) 2505 return -ERANGE; 2506 2507 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 2508 nix_hw->mark_format.cfg[fmt_idx] = cfg; 2509 nix_hw->mark_format.in_use++; 2510 return fmt_idx; 2511 } 2512 2513 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 2514 int blkaddr) 2515 { 2516 u64 cfgs[] = { 2517 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 2518 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 2519 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 2520 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 2521 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 2522 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 2523 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 2524 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 2525 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 2526 }; 2527 int i, rc; 2528 u64 total; 2529 2530 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 2531 nix_hw->mark_format.total = (u8)total; 2532 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 2533 GFP_KERNEL); 2534 if (!nix_hw->mark_format.cfg) 2535 return -ENOMEM; 2536 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 2537 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 2538 if (rc < 0) 2539 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 2540 i, rc); 2541 } 2542 2543 return 0; 2544 } 2545 2546 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 2547 { 2548 /* CN10K supports LBK FIFO size 72 KB */ 2549 if (rvu->hw->lbk_bufsize == 0x12000) 2550 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 2551 else 2552 *max_mtu = NIC_HW_MAX_FRS; 2553 } 2554 2555 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 2556 { 2557 /* RPM supports FIFO len 128 KB */ 2558 if (rvu_cgx_get_fifolen(rvu) == 0x20000) 2559 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 2560 else 2561 *max_mtu = NIC_HW_MAX_FRS; 2562 } 2563 2564 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 2565 struct nix_hw_info *rsp) 2566 { 2567 u16 pcifunc = req->hdr.pcifunc; 2568 int blkaddr; 2569 2570 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2571 if (blkaddr < 0) 2572 return NIX_AF_ERR_AF_LF_INVALID; 2573 2574 if (is_afvf(pcifunc)) 2575 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 2576 else 2577 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 2578 2579 rsp->min_mtu = NIC_HW_MIN_FRS; 2580 return 0; 2581 } 2582 2583 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 2584 struct msg_rsp *rsp) 2585 { 2586 u16 pcifunc = req->hdr.pcifunc; 2587 int i, nixlf, blkaddr, err; 2588 u64 stats; 2589 2590 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2591 if (err) 2592 return err; 2593 2594 /* Get stats count supported by HW */ 2595 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 2596 2597 /* Reset tx stats */ 2598 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 2599 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 2600 2601 /* Reset rx stats */ 2602 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 2603 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 2604 2605 return 0; 2606 } 2607 2608 /* Returns the ALG index to be set into NPC_RX_ACTION */ 2609 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 2610 { 2611 int i; 2612 2613 /* Scan over exiting algo entries to find a match */ 2614 for (i = 0; i < nix_hw->flowkey.in_use; i++) 2615 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 2616 return i; 2617 2618 return -ERANGE; 2619 } 2620 2621 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 2622 { 2623 int idx, nr_field, key_off, field_marker, keyoff_marker; 2624 int max_key_off, max_bit_pos, group_member; 2625 struct nix_rx_flowkey_alg *field; 2626 struct nix_rx_flowkey_alg tmp; 2627 u32 key_type, valid_key; 2628 int l4_key_offset; 2629 2630 if (!alg) 2631 return -EINVAL; 2632 2633 #define FIELDS_PER_ALG 5 2634 #define MAX_KEY_OFF 40 2635 /* Clear all fields */ 2636 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 2637 2638 /* Each of the 32 possible flow key algorithm definitions should 2639 * fall into above incremental config (except ALG0). Otherwise a 2640 * single NPC MCAM entry is not sufficient for supporting RSS. 2641 * 2642 * If a different definition or combination needed then NPC MCAM 2643 * has to be programmed to filter such pkts and it's action should 2644 * point to this definition to calculate flowtag or hash. 2645 * 2646 * The `for loop` goes over _all_ protocol field and the following 2647 * variables depicts the state machine forward progress logic. 2648 * 2649 * keyoff_marker - Enabled when hash byte length needs to be accounted 2650 * in field->key_offset update. 2651 * field_marker - Enabled when a new field needs to be selected. 2652 * group_member - Enabled when protocol is part of a group. 2653 */ 2654 2655 keyoff_marker = 0; max_key_off = 0; group_member = 0; 2656 nr_field = 0; key_off = 0; field_marker = 1; 2657 field = &tmp; max_bit_pos = fls(flow_cfg); 2658 for (idx = 0; 2659 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 2660 key_off < MAX_KEY_OFF; idx++) { 2661 key_type = BIT(idx); 2662 valid_key = flow_cfg & key_type; 2663 /* Found a field marker, reset the field values */ 2664 if (field_marker) 2665 memset(&tmp, 0, sizeof(tmp)); 2666 2667 field_marker = true; 2668 keyoff_marker = true; 2669 switch (key_type) { 2670 case NIX_FLOW_KEY_TYPE_PORT: 2671 field->sel_chan = true; 2672 /* This should be set to 1, when SEL_CHAN is set */ 2673 field->bytesm1 = 1; 2674 break; 2675 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 2676 field->lid = NPC_LID_LC; 2677 field->hdr_offset = 9; /* offset */ 2678 field->bytesm1 = 0; /* 1 byte */ 2679 field->ltype_match = NPC_LT_LC_IP; 2680 field->ltype_mask = 0xF; 2681 break; 2682 case NIX_FLOW_KEY_TYPE_IPV4: 2683 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 2684 field->lid = NPC_LID_LC; 2685 field->ltype_match = NPC_LT_LC_IP; 2686 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 2687 field->lid = NPC_LID_LG; 2688 field->ltype_match = NPC_LT_LG_TU_IP; 2689 } 2690 field->hdr_offset = 12; /* SIP offset */ 2691 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 2692 field->ltype_mask = 0xF; /* Match only IPv4 */ 2693 keyoff_marker = false; 2694 break; 2695 case NIX_FLOW_KEY_TYPE_IPV6: 2696 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 2697 field->lid = NPC_LID_LC; 2698 field->ltype_match = NPC_LT_LC_IP6; 2699 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 2700 field->lid = NPC_LID_LG; 2701 field->ltype_match = NPC_LT_LG_TU_IP6; 2702 } 2703 field->hdr_offset = 8; /* SIP offset */ 2704 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 2705 field->ltype_mask = 0xF; /* Match only IPv6 */ 2706 break; 2707 case NIX_FLOW_KEY_TYPE_TCP: 2708 case NIX_FLOW_KEY_TYPE_UDP: 2709 case NIX_FLOW_KEY_TYPE_SCTP: 2710 case NIX_FLOW_KEY_TYPE_INNR_TCP: 2711 case NIX_FLOW_KEY_TYPE_INNR_UDP: 2712 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 2713 field->lid = NPC_LID_LD; 2714 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 2715 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 2716 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 2717 field->lid = NPC_LID_LH; 2718 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 2719 2720 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 2721 * so no need to change the ltype_match, just change 2722 * the lid for inner protocols 2723 */ 2724 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 2725 (int)NPC_LT_LH_TU_TCP); 2726 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 2727 (int)NPC_LT_LH_TU_UDP); 2728 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 2729 (int)NPC_LT_LH_TU_SCTP); 2730 2731 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 2732 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 2733 valid_key) { 2734 field->ltype_match |= NPC_LT_LD_TCP; 2735 group_member = true; 2736 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 2737 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 2738 valid_key) { 2739 field->ltype_match |= NPC_LT_LD_UDP; 2740 group_member = true; 2741 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 2742 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 2743 valid_key) { 2744 field->ltype_match |= NPC_LT_LD_SCTP; 2745 group_member = true; 2746 } 2747 field->ltype_mask = ~field->ltype_match; 2748 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 2749 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 2750 /* Handle the case where any of the group item 2751 * is enabled in the group but not the final one 2752 */ 2753 if (group_member) { 2754 valid_key = true; 2755 group_member = false; 2756 } 2757 } else { 2758 field_marker = false; 2759 keyoff_marker = false; 2760 } 2761 2762 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 2763 * remember the TCP key offset of 40 byte hash key. 2764 */ 2765 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 2766 l4_key_offset = key_off; 2767 break; 2768 case NIX_FLOW_KEY_TYPE_NVGRE: 2769 field->lid = NPC_LID_LD; 2770 field->hdr_offset = 4; /* VSID offset */ 2771 field->bytesm1 = 2; 2772 field->ltype_match = NPC_LT_LD_NVGRE; 2773 field->ltype_mask = 0xF; 2774 break; 2775 case NIX_FLOW_KEY_TYPE_VXLAN: 2776 case NIX_FLOW_KEY_TYPE_GENEVE: 2777 field->lid = NPC_LID_LE; 2778 field->bytesm1 = 2; 2779 field->hdr_offset = 4; 2780 field->ltype_mask = 0xF; 2781 field_marker = false; 2782 keyoff_marker = false; 2783 2784 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 2785 field->ltype_match |= NPC_LT_LE_VXLAN; 2786 group_member = true; 2787 } 2788 2789 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 2790 field->ltype_match |= NPC_LT_LE_GENEVE; 2791 group_member = true; 2792 } 2793 2794 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 2795 if (group_member) { 2796 field->ltype_mask = ~field->ltype_match; 2797 field_marker = true; 2798 keyoff_marker = true; 2799 valid_key = true; 2800 group_member = false; 2801 } 2802 } 2803 break; 2804 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 2805 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 2806 field->lid = NPC_LID_LA; 2807 field->ltype_match = NPC_LT_LA_ETHER; 2808 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 2809 field->lid = NPC_LID_LF; 2810 field->ltype_match = NPC_LT_LF_TU_ETHER; 2811 } 2812 field->hdr_offset = 0; 2813 field->bytesm1 = 5; /* DMAC 6 Byte */ 2814 field->ltype_mask = 0xF; 2815 break; 2816 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 2817 field->lid = NPC_LID_LC; 2818 field->hdr_offset = 40; /* IPV6 hdr */ 2819 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 2820 field->ltype_match = NPC_LT_LC_IP6_EXT; 2821 field->ltype_mask = 0xF; 2822 break; 2823 case NIX_FLOW_KEY_TYPE_GTPU: 2824 field->lid = NPC_LID_LE; 2825 field->hdr_offset = 4; 2826 field->bytesm1 = 3; /* 4 bytes TID*/ 2827 field->ltype_match = NPC_LT_LE_GTPU; 2828 field->ltype_mask = 0xF; 2829 break; 2830 case NIX_FLOW_KEY_TYPE_VLAN: 2831 field->lid = NPC_LID_LB; 2832 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 2833 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 2834 field->ltype_match = NPC_LT_LB_CTAG; 2835 field->ltype_mask = 0xF; 2836 field->fn_mask = 1; /* Mask out the first nibble */ 2837 break; 2838 case NIX_FLOW_KEY_TYPE_AH: 2839 case NIX_FLOW_KEY_TYPE_ESP: 2840 field->hdr_offset = 0; 2841 field->bytesm1 = 7; /* SPI + sequence number */ 2842 field->ltype_mask = 0xF; 2843 field->lid = NPC_LID_LE; 2844 field->ltype_match = NPC_LT_LE_ESP; 2845 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 2846 field->lid = NPC_LID_LD; 2847 field->ltype_match = NPC_LT_LD_AH; 2848 field->hdr_offset = 4; 2849 keyoff_marker = false; 2850 } 2851 break; 2852 } 2853 field->ena = 1; 2854 2855 /* Found a valid flow key type */ 2856 if (valid_key) { 2857 /* Use the key offset of TCP/UDP/SCTP fields 2858 * for ESP/AH fields. 2859 */ 2860 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 2861 key_type == NIX_FLOW_KEY_TYPE_AH) 2862 key_off = l4_key_offset; 2863 field->key_offset = key_off; 2864 memcpy(&alg[nr_field], field, sizeof(*field)); 2865 max_key_off = max(max_key_off, field->bytesm1 + 1); 2866 2867 /* Found a field marker, get the next field */ 2868 if (field_marker) 2869 nr_field++; 2870 } 2871 2872 /* Found a keyoff marker, update the new key_off */ 2873 if (keyoff_marker) { 2874 key_off += max_key_off; 2875 max_key_off = 0; 2876 } 2877 } 2878 /* Processed all the flow key types */ 2879 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 2880 return 0; 2881 else 2882 return NIX_AF_ERR_RSS_NOSPC_FIELD; 2883 } 2884 2885 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 2886 { 2887 u64 field[FIELDS_PER_ALG]; 2888 struct nix_hw *hw; 2889 int fid, rc; 2890 2891 hw = get_nix_hw(rvu->hw, blkaddr); 2892 if (!hw) 2893 return -EINVAL; 2894 2895 /* No room to add new flow hash algoritham */ 2896 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 2897 return NIX_AF_ERR_RSS_NOSPC_ALGO; 2898 2899 /* Generate algo fields for the given flow_cfg */ 2900 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 2901 if (rc) 2902 return rc; 2903 2904 /* Update ALGX_FIELDX register with generated fields */ 2905 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 2906 rvu_write64(rvu, blkaddr, 2907 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 2908 fid), field[fid]); 2909 2910 /* Store the flow_cfg for futher lookup */ 2911 rc = hw->flowkey.in_use; 2912 hw->flowkey.flowkey[rc] = flow_cfg; 2913 hw->flowkey.in_use++; 2914 2915 return rc; 2916 } 2917 2918 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 2919 struct nix_rss_flowkey_cfg *req, 2920 struct nix_rss_flowkey_cfg_rsp *rsp) 2921 { 2922 u16 pcifunc = req->hdr.pcifunc; 2923 int alg_idx, nixlf, blkaddr; 2924 struct nix_hw *nix_hw; 2925 int err; 2926 2927 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2928 if (err) 2929 return err; 2930 2931 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2932 if (!nix_hw) 2933 return -EINVAL; 2934 2935 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 2936 /* Failed to get algo index from the exiting list, reserve new */ 2937 if (alg_idx < 0) { 2938 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 2939 req->flowkey_cfg); 2940 if (alg_idx < 0) 2941 return alg_idx; 2942 } 2943 rsp->alg_idx = alg_idx; 2944 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 2945 alg_idx, req->mcam_index); 2946 return 0; 2947 } 2948 2949 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 2950 { 2951 u32 flowkey_cfg, minkey_cfg; 2952 int alg, fid, rc; 2953 2954 /* Disable all flow key algx fieldx */ 2955 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 2956 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 2957 rvu_write64(rvu, blkaddr, 2958 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 2959 0); 2960 } 2961 2962 /* IPv4/IPv6 SIP/DIPs */ 2963 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 2964 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2965 if (rc < 0) 2966 return rc; 2967 2968 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 2969 minkey_cfg = flowkey_cfg; 2970 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 2971 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2972 if (rc < 0) 2973 return rc; 2974 2975 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 2976 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 2977 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2978 if (rc < 0) 2979 return rc; 2980 2981 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 2982 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 2983 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2984 if (rc < 0) 2985 return rc; 2986 2987 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 2988 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 2989 NIX_FLOW_KEY_TYPE_UDP; 2990 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2991 if (rc < 0) 2992 return rc; 2993 2994 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 2995 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 2996 NIX_FLOW_KEY_TYPE_SCTP; 2997 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 2998 if (rc < 0) 2999 return rc; 3000 3001 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3002 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 3003 NIX_FLOW_KEY_TYPE_SCTP; 3004 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3005 if (rc < 0) 3006 return rc; 3007 3008 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3009 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3010 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 3011 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3012 if (rc < 0) 3013 return rc; 3014 3015 return 0; 3016 } 3017 3018 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 3019 struct nix_set_mac_addr *req, 3020 struct msg_rsp *rsp) 3021 { 3022 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 3023 u16 pcifunc = req->hdr.pcifunc; 3024 int blkaddr, nixlf, err; 3025 struct rvu_pfvf *pfvf; 3026 3027 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3028 if (err) 3029 return err; 3030 3031 pfvf = rvu_get_pfvf(rvu, pcifunc); 3032 3033 /* VF can't overwrite admin(PF) changes */ 3034 if (from_vf && pfvf->pf_set_vf_cfg) 3035 return -EPERM; 3036 3037 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 3038 3039 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 3040 pfvf->rx_chan_base, req->mac_addr); 3041 3042 return 0; 3043 } 3044 3045 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 3046 struct msg_req *req, 3047 struct nix_get_mac_addr_rsp *rsp) 3048 { 3049 u16 pcifunc = req->hdr.pcifunc; 3050 struct rvu_pfvf *pfvf; 3051 3052 if (!is_nixlf_attached(rvu, pcifunc)) 3053 return NIX_AF_ERR_AF_LF_INVALID; 3054 3055 pfvf = rvu_get_pfvf(rvu, pcifunc); 3056 3057 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 3058 3059 return 0; 3060 } 3061 3062 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 3063 struct msg_rsp *rsp) 3064 { 3065 bool allmulti = false, disable_promisc = false; 3066 u16 pcifunc = req->hdr.pcifunc; 3067 int blkaddr, nixlf, err; 3068 struct rvu_pfvf *pfvf; 3069 3070 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3071 if (err) 3072 return err; 3073 3074 pfvf = rvu_get_pfvf(rvu, pcifunc); 3075 3076 if (req->mode & NIX_RX_MODE_PROMISC) 3077 allmulti = false; 3078 else if (req->mode & NIX_RX_MODE_ALLMULTI) 3079 allmulti = true; 3080 else 3081 disable_promisc = true; 3082 3083 if (disable_promisc) 3084 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); 3085 else 3086 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 3087 pfvf->rx_chan_base, allmulti); 3088 return 0; 3089 } 3090 3091 static void nix_find_link_frs(struct rvu *rvu, 3092 struct nix_frs_cfg *req, u16 pcifunc) 3093 { 3094 int pf = rvu_get_pf(pcifunc); 3095 struct rvu_pfvf *pfvf; 3096 int maxlen, minlen; 3097 int numvfs, hwvf; 3098 int vf; 3099 3100 /* Update with requester's min/max lengths */ 3101 pfvf = rvu_get_pfvf(rvu, pcifunc); 3102 pfvf->maxlen = req->maxlen; 3103 if (req->update_minlen) 3104 pfvf->minlen = req->minlen; 3105 3106 maxlen = req->maxlen; 3107 minlen = req->update_minlen ? req->minlen : 0; 3108 3109 /* Get this PF's numVFs and starting hwvf */ 3110 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 3111 3112 /* For each VF, compare requested max/minlen */ 3113 for (vf = 0; vf < numvfs; vf++) { 3114 pfvf = &rvu->hwvf[hwvf + vf]; 3115 if (pfvf->maxlen > maxlen) 3116 maxlen = pfvf->maxlen; 3117 if (req->update_minlen && 3118 pfvf->minlen && pfvf->minlen < minlen) 3119 minlen = pfvf->minlen; 3120 } 3121 3122 /* Compare requested max/minlen with PF's max/minlen */ 3123 pfvf = &rvu->pf[pf]; 3124 if (pfvf->maxlen > maxlen) 3125 maxlen = pfvf->maxlen; 3126 if (req->update_minlen && 3127 pfvf->minlen && pfvf->minlen < minlen) 3128 minlen = pfvf->minlen; 3129 3130 /* Update the request with max/min PF's and it's VF's max/min */ 3131 req->maxlen = maxlen; 3132 if (req->update_minlen) 3133 req->minlen = minlen; 3134 } 3135 3136 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 3137 struct msg_rsp *rsp) 3138 { 3139 struct rvu_hwinfo *hw = rvu->hw; 3140 u16 pcifunc = req->hdr.pcifunc; 3141 int pf = rvu_get_pf(pcifunc); 3142 int blkaddr, schq, link = -1; 3143 struct nix_txsch *txsch; 3144 u64 cfg, lmac_fifo_len; 3145 struct nix_hw *nix_hw; 3146 u8 cgx = 0, lmac = 0; 3147 u16 max_mtu; 3148 3149 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3150 if (blkaddr < 0) 3151 return NIX_AF_ERR_AF_LF_INVALID; 3152 3153 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3154 if (!nix_hw) 3155 return -EINVAL; 3156 3157 if (is_afvf(pcifunc)) 3158 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 3159 else 3160 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 3161 3162 if (!req->sdp_link && req->maxlen > max_mtu) 3163 return NIX_AF_ERR_FRS_INVALID; 3164 3165 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 3166 return NIX_AF_ERR_FRS_INVALID; 3167 3168 /* Check if requester wants to update SMQ's */ 3169 if (!req->update_smq) 3170 goto rx_frscfg; 3171 3172 /* Update min/maxlen in each of the SMQ attached to this PF/VF */ 3173 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 3174 mutex_lock(&rvu->rsrc_lock); 3175 for (schq = 0; schq < txsch->schq.max; schq++) { 3176 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 3177 continue; 3178 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); 3179 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); 3180 if (req->update_minlen) 3181 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); 3182 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); 3183 } 3184 mutex_unlock(&rvu->rsrc_lock); 3185 3186 rx_frscfg: 3187 /* Check if config is for SDP link */ 3188 if (req->sdp_link) { 3189 if (!hw->sdp_links) 3190 return NIX_AF_ERR_RX_LINK_INVALID; 3191 link = hw->cgx_links + hw->lbk_links; 3192 goto linkcfg; 3193 } 3194 3195 /* Check if the request is from CGX mapped RVU PF */ 3196 if (is_pf_cgxmapped(rvu, pf)) { 3197 /* Get CGX and LMAC to which this PF is mapped and find link */ 3198 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 3199 link = (cgx * hw->lmac_per_cgx) + lmac; 3200 } else if (pf == 0) { 3201 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 3202 link = hw->cgx_links; 3203 } 3204 3205 if (link < 0) 3206 return NIX_AF_ERR_RX_LINK_INVALID; 3207 3208 nix_find_link_frs(rvu, req, pcifunc); 3209 3210 linkcfg: 3211 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 3212 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 3213 if (req->update_minlen) 3214 cfg = (cfg & ~0xFFFFULL) | req->minlen; 3215 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 3216 3217 if (req->sdp_link || pf == 0) 3218 return 0; 3219 3220 /* Update transmit credits for CGX links */ 3221 lmac_fifo_len = 3222 rvu_cgx_get_fifolen(rvu) / 3223 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3224 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); 3225 cfg &= ~(0xFFFFFULL << 12); 3226 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; 3227 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 3228 return 0; 3229 } 3230 3231 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 3232 struct msg_rsp *rsp) 3233 { 3234 int nixlf, blkaddr, err; 3235 u64 cfg; 3236 3237 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 3238 if (err) 3239 return err; 3240 3241 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 3242 /* Set the interface configuration */ 3243 if (req->len_verify & BIT(0)) 3244 cfg |= BIT_ULL(41); 3245 else 3246 cfg &= ~BIT_ULL(41); 3247 3248 if (req->len_verify & BIT(1)) 3249 cfg |= BIT_ULL(40); 3250 else 3251 cfg &= ~BIT_ULL(40); 3252 3253 if (req->csum_verify & BIT(0)) 3254 cfg |= BIT_ULL(37); 3255 else 3256 cfg &= ~BIT_ULL(37); 3257 3258 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 3259 3260 return 0; 3261 } 3262 3263 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 3264 { 3265 /* CN10k supports 72KB FIFO size and max packet size of 64k */ 3266 if (rvu->hw->lbk_bufsize == 0x12000) 3267 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16; 3268 3269 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 3270 } 3271 3272 static void nix_link_config(struct rvu *rvu, int blkaddr) 3273 { 3274 struct rvu_hwinfo *hw = rvu->hw; 3275 int cgx, lmac_cnt, slink, link; 3276 u16 lbk_max_frs, lmac_max_frs; 3277 u64 tx_credits; 3278 3279 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 3280 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 3281 3282 /* Set default min/max packet lengths allowed on NIX Rx links. 3283 * 3284 * With HW reset minlen value of 60byte, HW will treat ARP pkts 3285 * as undersize and report them to SW as error pkts, hence 3286 * setting it to 40 bytes. 3287 */ 3288 for (link = 0; link < hw->cgx_links; link++) { 3289 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3290 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 3291 } 3292 3293 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 3294 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3295 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 3296 } 3297 if (hw->sdp_links) { 3298 link = hw->cgx_links + hw->lbk_links; 3299 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3300 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 3301 } 3302 3303 /* Set credits for Tx links assuming max packet length allowed. 3304 * This will be reconfigured based on MTU set for PF/VF. 3305 */ 3306 for (cgx = 0; cgx < hw->cgx; cgx++) { 3307 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3308 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) - 3309 lmac_max_frs) / 16; 3310 /* Enable credits and set credit pkt count to max allowed */ 3311 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3312 slink = cgx * hw->lmac_per_cgx; 3313 for (link = slink; link < (slink + lmac_cnt); link++) { 3314 rvu_write64(rvu, blkaddr, 3315 NIX_AF_TX_LINKX_NORM_CREDIT(link), 3316 tx_credits); 3317 } 3318 } 3319 3320 /* Set Tx credits for LBK link */ 3321 slink = hw->cgx_links; 3322 for (link = slink; link < (slink + hw->lbk_links); link++) { 3323 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 3324 /* Enable credits and set credit pkt count to max allowed */ 3325 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3326 rvu_write64(rvu, blkaddr, 3327 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 3328 } 3329 } 3330 3331 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 3332 { 3333 int idx, err; 3334 u64 status; 3335 3336 /* Start X2P bus calibration */ 3337 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3338 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 3339 /* Wait for calibration to complete */ 3340 err = rvu_poll_reg(rvu, blkaddr, 3341 NIX_AF_STATUS, BIT_ULL(10), false); 3342 if (err) { 3343 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 3344 return err; 3345 } 3346 3347 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 3348 /* Check if CGX devices are ready */ 3349 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 3350 /* Skip when cgx port is not available */ 3351 if (!rvu_cgx_pdata(idx, rvu) || 3352 (status & (BIT_ULL(16 + idx)))) 3353 continue; 3354 dev_err(rvu->dev, 3355 "CGX%d didn't respond to NIX X2P calibration\n", idx); 3356 err = -EBUSY; 3357 } 3358 3359 /* Check if LBK is ready */ 3360 if (!(status & BIT_ULL(19))) { 3361 dev_err(rvu->dev, 3362 "LBK didn't respond to NIX X2P calibration\n"); 3363 err = -EBUSY; 3364 } 3365 3366 /* Clear 'calibrate_x2p' bit */ 3367 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3368 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 3369 if (err || (status & 0x3FFULL)) 3370 dev_err(rvu->dev, 3371 "NIX X2P calibration failed, status 0x%llx\n", status); 3372 if (err) 3373 return err; 3374 return 0; 3375 } 3376 3377 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 3378 { 3379 u64 cfg; 3380 int err; 3381 3382 /* Set admin queue endianness */ 3383 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 3384 #ifdef __BIG_ENDIAN 3385 cfg |= BIT_ULL(8); 3386 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 3387 #else 3388 cfg &= ~BIT_ULL(8); 3389 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 3390 #endif 3391 3392 /* Do not bypass NDC cache */ 3393 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 3394 cfg &= ~0x3FFEULL; 3395 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 3396 /* Disable caching of SQB aka SQEs */ 3397 cfg |= 0x04ULL; 3398 #endif 3399 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 3400 3401 /* Result structure can be followed by RQ/SQ/CQ context at 3402 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 3403 * operation type. Alloc sufficient result memory for all operations. 3404 */ 3405 err = rvu_aq_alloc(rvu, &block->aq, 3406 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 3407 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 3408 if (err) 3409 return err; 3410 3411 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 3412 rvu_write64(rvu, block->addr, 3413 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 3414 return 0; 3415 } 3416 3417 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 3418 { 3419 const struct npc_lt_def_cfg *ltdefs; 3420 struct rvu_hwinfo *hw = rvu->hw; 3421 int blkaddr = nix_hw->blkaddr; 3422 struct rvu_block *block; 3423 int err; 3424 u64 cfg; 3425 3426 block = &hw->block[blkaddr]; 3427 3428 if (is_rvu_96xx_B0(rvu)) { 3429 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 3430 * internal state when conditional clocks are turned off. 3431 * Hence enable them. 3432 */ 3433 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3434 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 3435 3436 /* Set chan/link to backpressure TL3 instead of TL2 */ 3437 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 3438 3439 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 3440 * This sticky mode is known to cause SQ stalls when multiple 3441 * SQs are mapped to same SMQ and transmitting pkts at a time. 3442 */ 3443 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 3444 cfg &= ~BIT_ULL(15); 3445 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 3446 } 3447 3448 ltdefs = rvu->kpu.lt_def; 3449 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 3450 err = nix_calibrate_x2p(rvu, blkaddr); 3451 if (err) 3452 return err; 3453 3454 /* Initialize admin queue */ 3455 err = nix_aq_init(rvu, block); 3456 if (err) 3457 return err; 3458 3459 /* Restore CINT timer delay to HW reset values */ 3460 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 3461 3462 if (is_block_implemented(hw, blkaddr)) { 3463 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 3464 if (err) 3465 return err; 3466 3467 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 3468 if (err) 3469 return err; 3470 3471 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 3472 if (err) 3473 return err; 3474 3475 err = nix_setup_txvlan(rvu, nix_hw); 3476 if (err) 3477 return err; 3478 3479 /* Configure segmentation offload formats */ 3480 nix_setup_lso(rvu, nix_hw, blkaddr); 3481 3482 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 3483 * This helps HW protocol checker to identify headers 3484 * and validate length and checksums. 3485 */ 3486 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 3487 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 3488 ltdefs->rx_ol2.ltype_mask); 3489 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 3490 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 3491 ltdefs->rx_oip4.ltype_mask); 3492 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 3493 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 3494 ltdefs->rx_iip4.ltype_mask); 3495 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 3496 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 3497 ltdefs->rx_oip6.ltype_mask); 3498 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 3499 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 3500 ltdefs->rx_iip6.ltype_mask); 3501 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 3502 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 3503 ltdefs->rx_otcp.ltype_mask); 3504 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 3505 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 3506 ltdefs->rx_itcp.ltype_mask); 3507 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 3508 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 3509 ltdefs->rx_oudp.ltype_mask); 3510 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 3511 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 3512 ltdefs->rx_iudp.ltype_mask); 3513 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 3514 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 3515 ltdefs->rx_osctp.ltype_mask); 3516 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 3517 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 3518 ltdefs->rx_isctp.ltype_mask); 3519 3520 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 3521 if (err) 3522 return err; 3523 3524 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 3525 nix_link_config(rvu, blkaddr); 3526 3527 /* Enable Channel backpressure */ 3528 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 3529 } 3530 return 0; 3531 } 3532 3533 int rvu_nix_init(struct rvu *rvu) 3534 { 3535 struct rvu_hwinfo *hw = rvu->hw; 3536 struct nix_hw *nix_hw; 3537 int blkaddr = 0, err; 3538 int i = 0; 3539 3540 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 3541 GFP_KERNEL); 3542 if (!hw->nix) 3543 return -ENOMEM; 3544 3545 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3546 while (blkaddr) { 3547 nix_hw = &hw->nix[i]; 3548 nix_hw->rvu = rvu; 3549 nix_hw->blkaddr = blkaddr; 3550 err = rvu_nix_block_init(rvu, nix_hw); 3551 if (err) 3552 return err; 3553 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3554 i++; 3555 } 3556 3557 return 0; 3558 } 3559 3560 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 3561 struct rvu_block *block) 3562 { 3563 struct nix_txsch *txsch; 3564 struct nix_mcast *mcast; 3565 struct nix_txvlan *vlan; 3566 struct nix_hw *nix_hw; 3567 int lvl; 3568 3569 rvu_aq_free(rvu, block->aq); 3570 3571 if (is_block_implemented(rvu->hw, blkaddr)) { 3572 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3573 if (!nix_hw) 3574 return; 3575 3576 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3577 txsch = &nix_hw->txsch[lvl]; 3578 kfree(txsch->schq.bmap); 3579 } 3580 3581 vlan = &nix_hw->txvlan; 3582 kfree(vlan->rsrc.bmap); 3583 mutex_destroy(&vlan->rsrc_lock); 3584 devm_kfree(rvu->dev, vlan->entry2pfvf_map); 3585 3586 mcast = &nix_hw->mcast; 3587 qmem_free(rvu->dev, mcast->mce_ctx); 3588 qmem_free(rvu->dev, mcast->mcast_buf); 3589 mutex_destroy(&mcast->mce_lock); 3590 } 3591 } 3592 3593 void rvu_nix_freemem(struct rvu *rvu) 3594 { 3595 struct rvu_hwinfo *hw = rvu->hw; 3596 struct rvu_block *block; 3597 int blkaddr = 0; 3598 3599 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3600 while (blkaddr) { 3601 block = &hw->block[blkaddr]; 3602 rvu_nix_block_freemem(rvu, blkaddr, block); 3603 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3604 } 3605 } 3606 3607 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 3608 struct msg_rsp *rsp) 3609 { 3610 u16 pcifunc = req->hdr.pcifunc; 3611 int nixlf, err; 3612 3613 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3614 if (err) 3615 return err; 3616 3617 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 3618 3619 npc_mcam_enable_flows(rvu, pcifunc); 3620 3621 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 3622 } 3623 3624 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 3625 struct msg_rsp *rsp) 3626 { 3627 u16 pcifunc = req->hdr.pcifunc; 3628 int nixlf, err; 3629 3630 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3631 if (err) 3632 return err; 3633 3634 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 3635 3636 npc_mcam_disable_flows(rvu, pcifunc); 3637 3638 return rvu_cgx_start_stop_io(rvu, pcifunc, false); 3639 } 3640 3641 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 3642 { 3643 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 3644 struct hwctx_disable_req ctx_req; 3645 int err; 3646 3647 ctx_req.hdr.pcifunc = pcifunc; 3648 3649 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 3650 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 3651 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 3652 nix_interface_deinit(rvu, pcifunc, nixlf); 3653 nix_rx_sync(rvu, blkaddr); 3654 nix_txschq_free(rvu, pcifunc); 3655 3656 rvu_cgx_start_stop_io(rvu, pcifunc, false); 3657 3658 if (pfvf->sq_ctx) { 3659 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 3660 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3661 if (err) 3662 dev_err(rvu->dev, "SQ ctx disable failed\n"); 3663 } 3664 3665 if (pfvf->rq_ctx) { 3666 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 3667 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3668 if (err) 3669 dev_err(rvu->dev, "RQ ctx disable failed\n"); 3670 } 3671 3672 if (pfvf->cq_ctx) { 3673 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 3674 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3675 if (err) 3676 dev_err(rvu->dev, "CQ ctx disable failed\n"); 3677 } 3678 3679 nix_ctx_free(rvu, pfvf); 3680 } 3681 3682 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 3683 3684 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 3685 { 3686 struct rvu_hwinfo *hw = rvu->hw; 3687 struct rvu_block *block; 3688 int blkaddr, pf; 3689 int nixlf; 3690 u64 cfg; 3691 3692 pf = rvu_get_pf(pcifunc); 3693 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 3694 return 0; 3695 3696 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3697 if (blkaddr < 0) 3698 return NIX_AF_ERR_AF_LF_INVALID; 3699 3700 block = &hw->block[blkaddr]; 3701 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 3702 if (nixlf < 0) 3703 return NIX_AF_ERR_AF_LF_INVALID; 3704 3705 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 3706 3707 if (enable) 3708 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 3709 else 3710 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 3711 3712 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 3713 3714 return 0; 3715 } 3716 3717 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 3718 struct msg_rsp *rsp) 3719 { 3720 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 3721 } 3722 3723 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 3724 struct msg_rsp *rsp) 3725 { 3726 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 3727 } 3728 3729 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 3730 struct nix_lso_format_cfg *req, 3731 struct nix_lso_format_cfg_rsp *rsp) 3732 { 3733 u16 pcifunc = req->hdr.pcifunc; 3734 struct nix_hw *nix_hw; 3735 struct rvu_pfvf *pfvf; 3736 int blkaddr, idx, f; 3737 u64 reg; 3738 3739 pfvf = rvu_get_pfvf(rvu, pcifunc); 3740 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3741 if (!pfvf->nixlf || blkaddr < 0) 3742 return NIX_AF_ERR_AF_LF_INVALID; 3743 3744 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3745 if (!nix_hw) 3746 return -EINVAL; 3747 3748 /* Find existing matching LSO format, if any */ 3749 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 3750 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 3751 reg = rvu_read64(rvu, blkaddr, 3752 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 3753 if (req->fields[f] != (reg & req->field_mask)) 3754 break; 3755 } 3756 3757 if (f == NIX_LSO_FIELD_MAX) 3758 break; 3759 } 3760 3761 if (idx < nix_hw->lso.in_use) { 3762 /* Match found */ 3763 rsp->lso_format_idx = idx; 3764 return 0; 3765 } 3766 3767 if (nix_hw->lso.in_use == nix_hw->lso.total) 3768 return NIX_AF_ERR_LSO_CFG_FAIL; 3769 3770 rsp->lso_format_idx = nix_hw->lso.in_use++; 3771 3772 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 3773 rvu_write64(rvu, blkaddr, 3774 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 3775 req->fields[f]); 3776 3777 return 0; 3778 } 3779 3780 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 3781 { 3782 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 3783 3784 /* overwrite vf mac address with default_mac */ 3785 if (from_vf) 3786 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 3787 } 3788