1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 14 #include "rvu_struct.h" 15 #include "rvu_reg.h" 16 #include "rvu.h" 17 #include "npc.h" 18 #include "cgx.h" 19 #include "lmac_common.h" 20 21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 23 int type, int chan_id); 24 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 25 int type, bool add); 26 static int nix_setup_ipolicers(struct rvu *rvu, 27 struct nix_hw *nix_hw, int blkaddr); 28 static void nix_ipolicer_freemem(struct nix_hw *nix_hw); 29 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 30 struct nix_hw *nix_hw, u16 pcifunc); 31 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); 32 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 33 u32 leaf_prof); 34 35 enum mc_tbl_sz { 36 MC_TBL_SZ_256, 37 MC_TBL_SZ_512, 38 MC_TBL_SZ_1K, 39 MC_TBL_SZ_2K, 40 MC_TBL_SZ_4K, 41 MC_TBL_SZ_8K, 42 MC_TBL_SZ_16K, 43 MC_TBL_SZ_32K, 44 MC_TBL_SZ_64K, 45 }; 46 47 enum mc_buf_cnt { 48 MC_BUF_CNT_8, 49 MC_BUF_CNT_16, 50 MC_BUF_CNT_32, 51 MC_BUF_CNT_64, 52 MC_BUF_CNT_128, 53 MC_BUF_CNT_256, 54 MC_BUF_CNT_512, 55 MC_BUF_CNT_1024, 56 MC_BUF_CNT_2048, 57 }; 58 59 enum nix_makr_fmt_indexes { 60 NIX_MARK_CFG_IP_DSCP_RED, 61 NIX_MARK_CFG_IP_DSCP_YELLOW, 62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 63 NIX_MARK_CFG_IP_ECN_RED, 64 NIX_MARK_CFG_IP_ECN_YELLOW, 65 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 66 NIX_MARK_CFG_VLAN_DEI_RED, 67 NIX_MARK_CFG_VLAN_DEI_YELLOW, 68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 69 NIX_MARK_CFG_MAX, 70 }; 71 72 /* For now considering MC resources needed for broadcast 73 * pkt replication only. i.e 256 HWVFs + 12 PFs. 74 */ 75 #define MC_TBL_SIZE MC_TBL_SZ_512 76 #define MC_BUF_CNT MC_BUF_CNT_128 77 78 struct mce { 79 struct hlist_node node; 80 u16 pcifunc; 81 }; 82 83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 84 { 85 int i = 0; 86 87 /*If blkaddr is 0, return the first nix block address*/ 88 if (blkaddr == 0) 89 return rvu->nix_blkaddr[blkaddr]; 90 91 while (i + 1 < MAX_NIX_BLKS) { 92 if (rvu->nix_blkaddr[i] == blkaddr) 93 return rvu->nix_blkaddr[i + 1]; 94 i++; 95 } 96 97 return 0; 98 } 99 100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 101 { 102 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 103 int blkaddr; 104 105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 106 if (!pfvf->nixlf || blkaddr < 0) 107 return false; 108 return true; 109 } 110 111 int rvu_get_nixlf_count(struct rvu *rvu) 112 { 113 int blkaddr = 0, max = 0; 114 struct rvu_block *block; 115 116 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 117 while (blkaddr) { 118 block = &rvu->hw->block[blkaddr]; 119 max += block->lf.max; 120 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 121 } 122 return max; 123 } 124 125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 126 { 127 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 128 struct rvu_hwinfo *hw = rvu->hw; 129 int blkaddr; 130 131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 132 if (!pfvf->nixlf || blkaddr < 0) 133 return NIX_AF_ERR_AF_LF_INVALID; 134 135 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 136 if (*nixlf < 0) 137 return NIX_AF_ERR_AF_LF_INVALID; 138 139 if (nix_blkaddr) 140 *nix_blkaddr = blkaddr; 141 142 return 0; 143 } 144 145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, 146 struct nix_hw **nix_hw, int *blkaddr) 147 { 148 struct rvu_pfvf *pfvf; 149 150 pfvf = rvu_get_pfvf(rvu, pcifunc); 151 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 152 if (!pfvf->nixlf || *blkaddr < 0) 153 return NIX_AF_ERR_AF_LF_INVALID; 154 155 *nix_hw = get_nix_hw(rvu->hw, *blkaddr); 156 if (!*nix_hw) 157 return NIX_AF_ERR_INVALID_NIXBLK; 158 return 0; 159 } 160 161 static void nix_mce_list_init(struct nix_mce_list *list, int max) 162 { 163 INIT_HLIST_HEAD(&list->head); 164 list->count = 0; 165 list->max = max; 166 } 167 168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) 169 { 170 int idx; 171 172 if (!mcast) 173 return 0; 174 175 idx = mcast->next_free_mce; 176 mcast->next_free_mce += count; 177 return idx; 178 } 179 180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 181 { 182 int nix_blkaddr = 0, i = 0; 183 struct rvu *rvu = hw->rvu; 184 185 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 186 while (nix_blkaddr) { 187 if (blkaddr == nix_blkaddr && hw->nix) 188 return &hw->nix[i]; 189 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 190 i++; 191 } 192 return NULL; 193 } 194 195 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 196 { 197 int err; 198 199 /*Sync all in flight RX packets to LLC/DRAM */ 200 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 201 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 202 if (err) 203 dev_err(rvu->dev, "NIX RX software sync failed\n"); 204 } 205 206 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 207 int lvl, u16 pcifunc, u16 schq) 208 { 209 struct rvu_hwinfo *hw = rvu->hw; 210 struct nix_txsch *txsch; 211 struct nix_hw *nix_hw; 212 u16 map_func; 213 214 nix_hw = get_nix_hw(rvu->hw, blkaddr); 215 if (!nix_hw) 216 return false; 217 218 txsch = &nix_hw->txsch[lvl]; 219 /* Check out of bounds */ 220 if (schq >= txsch->schq.max) 221 return false; 222 223 mutex_lock(&rvu->rsrc_lock); 224 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 225 mutex_unlock(&rvu->rsrc_lock); 226 227 /* TLs aggegating traffic are shared across PF and VFs */ 228 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 229 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 230 return false; 231 else 232 return true; 233 } 234 235 if (map_func != pcifunc) 236 return false; 237 238 return true; 239 } 240 241 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) 242 { 243 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 244 struct mac_ops *mac_ops; 245 int pkind, pf, vf, lbkid; 246 u8 cgx_id, lmac_id; 247 int err; 248 249 pf = rvu_get_pf(pcifunc); 250 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 251 return 0; 252 253 switch (type) { 254 case NIX_INTF_TYPE_CGX: 255 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 256 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 257 258 pkind = rvu_npc_get_pkind(rvu, pf); 259 if (pkind < 0) { 260 dev_err(rvu->dev, 261 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 262 return -EINVAL; 263 } 264 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 265 pfvf->tx_chan_base = pfvf->rx_chan_base; 266 pfvf->rx_chan_cnt = 1; 267 pfvf->tx_chan_cnt = 1; 268 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 269 rvu_npc_set_pkind(rvu, pkind, pfvf); 270 271 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); 272 /* By default we enable pause frames */ 273 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) 274 mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id, 275 rvu), 276 lmac_id, true, true); 277 break; 278 case NIX_INTF_TYPE_LBK: 279 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 280 281 /* If NIX1 block is present on the silicon then NIXes are 282 * assigned alternatively for lbk interfaces. NIX0 should 283 * send packets on lbk link 1 channels and NIX1 should send 284 * on lbk link 0 channels for the communication between 285 * NIX0 and NIX1. 286 */ 287 lbkid = 0; 288 if (rvu->hw->lbk_links > 1) 289 lbkid = vf & 0x1 ? 0 : 1; 290 291 /* Note that AF's VFs work in pairs and talk over consecutive 292 * loopback channels.Therefore if odd number of AF VFs are 293 * enabled then the last VF remains with no pair. 294 */ 295 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 296 pfvf->tx_chan_base = vf & 0x1 ? 297 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 298 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 299 pfvf->rx_chan_cnt = 1; 300 pfvf->tx_chan_cnt = 1; 301 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 302 pfvf->rx_chan_base, 303 pfvf->rx_chan_cnt); 304 break; 305 } 306 307 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 308 * RVU PF/VF's MAC address. 309 */ 310 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 311 pfvf->rx_chan_base, pfvf->mac_addr); 312 313 /* Add this PF_FUNC to bcast pkt replication list */ 314 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); 315 if (err) { 316 dev_err(rvu->dev, 317 "Bcast list, failed to enable PF_FUNC 0x%x\n", 318 pcifunc); 319 return err; 320 } 321 /* Install MCAM rule matching Ethernet broadcast mac address */ 322 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 323 nixlf, pfvf->rx_chan_base); 324 325 pfvf->maxlen = NIC_HW_MIN_FRS; 326 pfvf->minlen = NIC_HW_MIN_FRS; 327 328 return 0; 329 } 330 331 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 332 { 333 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 334 int err; 335 336 pfvf->maxlen = 0; 337 pfvf->minlen = 0; 338 339 /* Remove this PF_FUNC from bcast pkt replication list */ 340 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); 341 if (err) { 342 dev_err(rvu->dev, 343 "Bcast list, failed to disable PF_FUNC 0x%x\n", 344 pcifunc); 345 } 346 347 /* Free and disable any MCAM entries used by this NIX LF */ 348 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 349 350 /* Disable DMAC filters used */ 351 rvu_cgx_disable_dmac_entries(rvu, pcifunc); 352 } 353 354 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 355 struct nix_bp_cfg_req *req, 356 struct msg_rsp *rsp) 357 { 358 u16 pcifunc = req->hdr.pcifunc; 359 struct rvu_pfvf *pfvf; 360 int blkaddr, pf, type; 361 u16 chan_base, chan; 362 u64 cfg; 363 364 pf = rvu_get_pf(pcifunc); 365 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 366 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 367 return 0; 368 369 pfvf = rvu_get_pfvf(rvu, pcifunc); 370 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 371 372 chan_base = pfvf->rx_chan_base + req->chan_base; 373 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 374 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 375 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 376 cfg & ~BIT_ULL(16)); 377 } 378 return 0; 379 } 380 381 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 382 int type, int chan_id) 383 { 384 int bpid, blkaddr, lmac_chan_cnt; 385 struct rvu_hwinfo *hw = rvu->hw; 386 u16 cgx_bpid_cnt, lbk_bpid_cnt; 387 struct rvu_pfvf *pfvf; 388 u8 cgx_id, lmac_id; 389 u64 cfg; 390 391 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 392 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 393 lmac_chan_cnt = cfg & 0xFF; 394 395 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; 396 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); 397 398 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 399 400 /* Backpressure IDs range division 401 * CGX channles are mapped to (0 - 191) BPIDs 402 * LBK channles are mapped to (192 - 255) BPIDs 403 * SDP channles are mapped to (256 - 511) BPIDs 404 * 405 * Lmac channles and bpids mapped as follows 406 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 407 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 408 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 409 */ 410 switch (type) { 411 case NIX_INTF_TYPE_CGX: 412 if ((req->chan_base + req->chan_cnt) > 15) 413 return -EINVAL; 414 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 415 /* Assign bpid based on cgx, lmac and chan id */ 416 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + 417 (lmac_id * lmac_chan_cnt) + req->chan_base; 418 419 if (req->bpid_per_chan) 420 bpid += chan_id; 421 if (bpid > cgx_bpid_cnt) 422 return -EINVAL; 423 break; 424 425 case NIX_INTF_TYPE_LBK: 426 if ((req->chan_base + req->chan_cnt) > 63) 427 return -EINVAL; 428 bpid = cgx_bpid_cnt + req->chan_base; 429 if (req->bpid_per_chan) 430 bpid += chan_id; 431 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) 432 return -EINVAL; 433 break; 434 default: 435 return -EINVAL; 436 } 437 return bpid; 438 } 439 440 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 441 struct nix_bp_cfg_req *req, 442 struct nix_bp_cfg_rsp *rsp) 443 { 444 int blkaddr, pf, type, chan_id = 0; 445 u16 pcifunc = req->hdr.pcifunc; 446 struct rvu_pfvf *pfvf; 447 u16 chan_base, chan; 448 s16 bpid, bpid_base; 449 u64 cfg; 450 451 pf = rvu_get_pf(pcifunc); 452 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 453 454 /* Enable backpressure only for CGX mapped PFs and LBK interface */ 455 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 456 return 0; 457 458 pfvf = rvu_get_pfvf(rvu, pcifunc); 459 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 460 461 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 462 chan_base = pfvf->rx_chan_base + req->chan_base; 463 bpid = bpid_base; 464 465 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 466 if (bpid < 0) { 467 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 468 return -EINVAL; 469 } 470 471 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 472 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 473 cfg | (bpid & 0xFF) | BIT_ULL(16)); 474 chan_id++; 475 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 476 } 477 478 for (chan = 0; chan < req->chan_cnt; chan++) { 479 /* Map channel and bpid assign to it */ 480 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 481 (bpid_base & 0x3FF); 482 if (req->bpid_per_chan) 483 bpid_base++; 484 } 485 rsp->chan_cnt = req->chan_cnt; 486 487 return 0; 488 } 489 490 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 491 u64 format, bool v4, u64 *fidx) 492 { 493 struct nix_lso_format field = {0}; 494 495 /* IP's Length field */ 496 field.layer = NIX_TXLAYER_OL3; 497 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 498 field.offset = v4 ? 2 : 4; 499 field.sizem1 = 1; /* i.e 2 bytes */ 500 field.alg = NIX_LSOALG_ADD_PAYLEN; 501 rvu_write64(rvu, blkaddr, 502 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 503 *(u64 *)&field); 504 505 /* No ID field in IPv6 header */ 506 if (!v4) 507 return; 508 509 /* IP's ID field */ 510 field.layer = NIX_TXLAYER_OL3; 511 field.offset = 4; 512 field.sizem1 = 1; /* i.e 2 bytes */ 513 field.alg = NIX_LSOALG_ADD_SEGNUM; 514 rvu_write64(rvu, blkaddr, 515 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 516 *(u64 *)&field); 517 } 518 519 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 520 u64 format, u64 *fidx) 521 { 522 struct nix_lso_format field = {0}; 523 524 /* TCP's sequence number field */ 525 field.layer = NIX_TXLAYER_OL4; 526 field.offset = 4; 527 field.sizem1 = 3; /* i.e 4 bytes */ 528 field.alg = NIX_LSOALG_ADD_OFFSET; 529 rvu_write64(rvu, blkaddr, 530 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 531 *(u64 *)&field); 532 533 /* TCP's flags field */ 534 field.layer = NIX_TXLAYER_OL4; 535 field.offset = 12; 536 field.sizem1 = 1; /* 2 bytes */ 537 field.alg = NIX_LSOALG_TCP_FLAGS; 538 rvu_write64(rvu, blkaddr, 539 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 540 *(u64 *)&field); 541 } 542 543 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 544 { 545 u64 cfg, idx, fidx = 0; 546 547 /* Get max HW supported format indices */ 548 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 549 nix_hw->lso.total = cfg; 550 551 /* Enable LSO */ 552 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 553 /* For TSO, set first and middle segment flags to 554 * mask out PSH, RST & FIN flags in TCP packet 555 */ 556 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 557 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 558 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 559 560 /* Setup default static LSO formats 561 * 562 * Configure format fields for TCPv4 segmentation offload 563 */ 564 idx = NIX_LSO_FORMAT_IDX_TSOV4; 565 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 566 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 567 568 /* Set rest of the fields to NOP */ 569 for (; fidx < 8; fidx++) { 570 rvu_write64(rvu, blkaddr, 571 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 572 } 573 nix_hw->lso.in_use++; 574 575 /* Configure format fields for TCPv6 segmentation offload */ 576 idx = NIX_LSO_FORMAT_IDX_TSOV6; 577 fidx = 0; 578 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 579 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 580 581 /* Set rest of the fields to NOP */ 582 for (; fidx < 8; fidx++) { 583 rvu_write64(rvu, blkaddr, 584 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 585 } 586 nix_hw->lso.in_use++; 587 } 588 589 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 590 { 591 kfree(pfvf->rq_bmap); 592 kfree(pfvf->sq_bmap); 593 kfree(pfvf->cq_bmap); 594 if (pfvf->rq_ctx) 595 qmem_free(rvu->dev, pfvf->rq_ctx); 596 if (pfvf->sq_ctx) 597 qmem_free(rvu->dev, pfvf->sq_ctx); 598 if (pfvf->cq_ctx) 599 qmem_free(rvu->dev, pfvf->cq_ctx); 600 if (pfvf->rss_ctx) 601 qmem_free(rvu->dev, pfvf->rss_ctx); 602 if (pfvf->nix_qints_ctx) 603 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 604 if (pfvf->cq_ints_ctx) 605 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 606 607 pfvf->rq_bmap = NULL; 608 pfvf->cq_bmap = NULL; 609 pfvf->sq_bmap = NULL; 610 pfvf->rq_ctx = NULL; 611 pfvf->sq_ctx = NULL; 612 pfvf->cq_ctx = NULL; 613 pfvf->rss_ctx = NULL; 614 pfvf->nix_qints_ctx = NULL; 615 pfvf->cq_ints_ctx = NULL; 616 } 617 618 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 619 struct rvu_pfvf *pfvf, int nixlf, 620 int rss_sz, int rss_grps, int hwctx_size, 621 u64 way_mask) 622 { 623 int err, grp, num_indices; 624 625 /* RSS is not requested for this NIXLF */ 626 if (!rss_sz) 627 return 0; 628 num_indices = rss_sz * rss_grps; 629 630 /* Alloc NIX RSS HW context memory and config the base */ 631 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 632 if (err) 633 return err; 634 635 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 636 (u64)pfvf->rss_ctx->iova); 637 638 /* Config full RSS table size, enable RSS and caching */ 639 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), 640 BIT_ULL(36) | BIT_ULL(4) | 641 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) | 642 way_mask << 20); 643 /* Config RSS group offset and sizes */ 644 for (grp = 0; grp < rss_grps; grp++) 645 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 646 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 647 return 0; 648 } 649 650 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 651 struct nix_aq_inst_s *inst) 652 { 653 struct admin_queue *aq = block->aq; 654 struct nix_aq_res_s *result; 655 int timeout = 1000; 656 u64 reg, head; 657 658 result = (struct nix_aq_res_s *)aq->res->base; 659 660 /* Get current head pointer where to append this instruction */ 661 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 662 head = (reg >> 4) & AQ_PTR_MASK; 663 664 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 665 (void *)inst, aq->inst->entry_sz); 666 memset(result, 0, sizeof(*result)); 667 /* sync into memory */ 668 wmb(); 669 670 /* Ring the doorbell and wait for result */ 671 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 672 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 673 cpu_relax(); 674 udelay(1); 675 timeout--; 676 if (!timeout) 677 return -EBUSY; 678 } 679 680 if (result->compcode != NIX_AQ_COMP_GOOD) 681 /* TODO: Replace this with some error code */ 682 return -EBUSY; 683 684 return 0; 685 } 686 687 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 688 struct nix_aq_enq_req *req, 689 struct nix_aq_enq_rsp *rsp) 690 { 691 struct rvu_hwinfo *hw = rvu->hw; 692 u16 pcifunc = req->hdr.pcifunc; 693 int nixlf, blkaddr, rc = 0; 694 struct nix_aq_inst_s inst; 695 struct rvu_block *block; 696 struct admin_queue *aq; 697 struct rvu_pfvf *pfvf; 698 void *ctx, *mask; 699 bool ena; 700 u64 cfg; 701 702 blkaddr = nix_hw->blkaddr; 703 block = &hw->block[blkaddr]; 704 aq = block->aq; 705 if (!aq) { 706 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 707 return NIX_AF_ERR_AQ_ENQUEUE; 708 } 709 710 pfvf = rvu_get_pfvf(rvu, pcifunc); 711 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 712 713 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile 714 * operations done by AF itself. 715 */ 716 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || 717 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { 718 if (!pfvf->nixlf || nixlf < 0) 719 return NIX_AF_ERR_AF_LF_INVALID; 720 } 721 722 switch (req->ctype) { 723 case NIX_AQ_CTYPE_RQ: 724 /* Check if index exceeds max no of queues */ 725 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 726 rc = NIX_AF_ERR_AQ_ENQUEUE; 727 break; 728 case NIX_AQ_CTYPE_SQ: 729 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 730 rc = NIX_AF_ERR_AQ_ENQUEUE; 731 break; 732 case NIX_AQ_CTYPE_CQ: 733 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 734 rc = NIX_AF_ERR_AQ_ENQUEUE; 735 break; 736 case NIX_AQ_CTYPE_RSS: 737 /* Check if RSS is enabled and qidx is within range */ 738 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 739 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 740 (req->qidx >= (256UL << (cfg & 0xF)))) 741 rc = NIX_AF_ERR_AQ_ENQUEUE; 742 break; 743 case NIX_AQ_CTYPE_MCE: 744 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 745 746 /* Check if index exceeds MCE list length */ 747 if (!nix_hw->mcast.mce_ctx || 748 (req->qidx >= (256UL << (cfg & 0xF)))) 749 rc = NIX_AF_ERR_AQ_ENQUEUE; 750 751 /* Adding multicast lists for requests from PF/VFs is not 752 * yet supported, so ignore this. 753 */ 754 if (rsp) 755 rc = NIX_AF_ERR_AQ_ENQUEUE; 756 break; 757 case NIX_AQ_CTYPE_BANDPROF: 758 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, 759 nix_hw, pcifunc)) 760 rc = NIX_AF_ERR_INVALID_BANDPROF; 761 break; 762 default: 763 rc = NIX_AF_ERR_AQ_ENQUEUE; 764 } 765 766 if (rc) 767 return rc; 768 769 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 770 if (req->ctype == NIX_AQ_CTYPE_SQ && 771 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 772 (req->op == NIX_AQ_INSTOP_WRITE && 773 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { 774 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 775 pcifunc, req->sq.smq)) 776 return NIX_AF_ERR_AQ_ENQUEUE; 777 } 778 779 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 780 inst.lf = nixlf; 781 inst.cindex = req->qidx; 782 inst.ctype = req->ctype; 783 inst.op = req->op; 784 /* Currently we are not supporting enqueuing multiple instructions, 785 * so always choose first entry in result memory. 786 */ 787 inst.res_addr = (u64)aq->res->iova; 788 789 /* Hardware uses same aq->res->base for updating result of 790 * previous instruction hence wait here till it is done. 791 */ 792 spin_lock(&aq->lock); 793 794 /* Clean result + context memory */ 795 memset(aq->res->base, 0, aq->res->entry_sz); 796 /* Context needs to be written at RES_ADDR + 128 */ 797 ctx = aq->res->base + 128; 798 /* Mask needs to be written at RES_ADDR + 256 */ 799 mask = aq->res->base + 256; 800 801 switch (req->op) { 802 case NIX_AQ_INSTOP_WRITE: 803 if (req->ctype == NIX_AQ_CTYPE_RQ) 804 memcpy(mask, &req->rq_mask, 805 sizeof(struct nix_rq_ctx_s)); 806 else if (req->ctype == NIX_AQ_CTYPE_SQ) 807 memcpy(mask, &req->sq_mask, 808 sizeof(struct nix_sq_ctx_s)); 809 else if (req->ctype == NIX_AQ_CTYPE_CQ) 810 memcpy(mask, &req->cq_mask, 811 sizeof(struct nix_cq_ctx_s)); 812 else if (req->ctype == NIX_AQ_CTYPE_RSS) 813 memcpy(mask, &req->rss_mask, 814 sizeof(struct nix_rsse_s)); 815 else if (req->ctype == NIX_AQ_CTYPE_MCE) 816 memcpy(mask, &req->mce_mask, 817 sizeof(struct nix_rx_mce_s)); 818 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 819 memcpy(mask, &req->prof_mask, 820 sizeof(struct nix_bandprof_s)); 821 fallthrough; 822 case NIX_AQ_INSTOP_INIT: 823 if (req->ctype == NIX_AQ_CTYPE_RQ) 824 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 825 else if (req->ctype == NIX_AQ_CTYPE_SQ) 826 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 827 else if (req->ctype == NIX_AQ_CTYPE_CQ) 828 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 829 else if (req->ctype == NIX_AQ_CTYPE_RSS) 830 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 831 else if (req->ctype == NIX_AQ_CTYPE_MCE) 832 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 833 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 834 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); 835 break; 836 case NIX_AQ_INSTOP_NOP: 837 case NIX_AQ_INSTOP_READ: 838 case NIX_AQ_INSTOP_LOCK: 839 case NIX_AQ_INSTOP_UNLOCK: 840 break; 841 default: 842 rc = NIX_AF_ERR_AQ_ENQUEUE; 843 spin_unlock(&aq->lock); 844 return rc; 845 } 846 847 /* Submit the instruction to AQ */ 848 rc = nix_aq_enqueue_wait(rvu, block, &inst); 849 if (rc) { 850 spin_unlock(&aq->lock); 851 return rc; 852 } 853 854 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 855 if (req->op == NIX_AQ_INSTOP_INIT) { 856 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 857 __set_bit(req->qidx, pfvf->rq_bmap); 858 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 859 __set_bit(req->qidx, pfvf->sq_bmap); 860 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 861 __set_bit(req->qidx, pfvf->cq_bmap); 862 } 863 864 if (req->op == NIX_AQ_INSTOP_WRITE) { 865 if (req->ctype == NIX_AQ_CTYPE_RQ) { 866 ena = (req->rq.ena & req->rq_mask.ena) | 867 (test_bit(req->qidx, pfvf->rq_bmap) & 868 ~req->rq_mask.ena); 869 if (ena) 870 __set_bit(req->qidx, pfvf->rq_bmap); 871 else 872 __clear_bit(req->qidx, pfvf->rq_bmap); 873 } 874 if (req->ctype == NIX_AQ_CTYPE_SQ) { 875 ena = (req->rq.ena & req->sq_mask.ena) | 876 (test_bit(req->qidx, pfvf->sq_bmap) & 877 ~req->sq_mask.ena); 878 if (ena) 879 __set_bit(req->qidx, pfvf->sq_bmap); 880 else 881 __clear_bit(req->qidx, pfvf->sq_bmap); 882 } 883 if (req->ctype == NIX_AQ_CTYPE_CQ) { 884 ena = (req->rq.ena & req->cq_mask.ena) | 885 (test_bit(req->qidx, pfvf->cq_bmap) & 886 ~req->cq_mask.ena); 887 if (ena) 888 __set_bit(req->qidx, pfvf->cq_bmap); 889 else 890 __clear_bit(req->qidx, pfvf->cq_bmap); 891 } 892 } 893 894 if (rsp) { 895 /* Copy read context into mailbox */ 896 if (req->op == NIX_AQ_INSTOP_READ) { 897 if (req->ctype == NIX_AQ_CTYPE_RQ) 898 memcpy(&rsp->rq, ctx, 899 sizeof(struct nix_rq_ctx_s)); 900 else if (req->ctype == NIX_AQ_CTYPE_SQ) 901 memcpy(&rsp->sq, ctx, 902 sizeof(struct nix_sq_ctx_s)); 903 else if (req->ctype == NIX_AQ_CTYPE_CQ) 904 memcpy(&rsp->cq, ctx, 905 sizeof(struct nix_cq_ctx_s)); 906 else if (req->ctype == NIX_AQ_CTYPE_RSS) 907 memcpy(&rsp->rss, ctx, 908 sizeof(struct nix_rsse_s)); 909 else if (req->ctype == NIX_AQ_CTYPE_MCE) 910 memcpy(&rsp->mce, ctx, 911 sizeof(struct nix_rx_mce_s)); 912 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 913 memcpy(&rsp->prof, ctx, 914 sizeof(struct nix_bandprof_s)); 915 } 916 } 917 918 spin_unlock(&aq->lock); 919 return 0; 920 } 921 922 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 923 struct nix_aq_enq_rsp *rsp) 924 { 925 struct nix_hw *nix_hw; 926 int blkaddr; 927 928 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 929 if (blkaddr < 0) 930 return NIX_AF_ERR_AF_LF_INVALID; 931 932 nix_hw = get_nix_hw(rvu->hw, blkaddr); 933 if (!nix_hw) 934 return -EINVAL; 935 936 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 937 } 938 939 static const char *nix_get_ctx_name(int ctype) 940 { 941 switch (ctype) { 942 case NIX_AQ_CTYPE_CQ: 943 return "CQ"; 944 case NIX_AQ_CTYPE_SQ: 945 return "SQ"; 946 case NIX_AQ_CTYPE_RQ: 947 return "RQ"; 948 case NIX_AQ_CTYPE_RSS: 949 return "RSS"; 950 } 951 return ""; 952 } 953 954 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 955 { 956 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 957 struct nix_aq_enq_req aq_req; 958 unsigned long *bmap; 959 int qidx, q_cnt = 0; 960 int err = 0, rc; 961 962 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 963 return NIX_AF_ERR_AQ_ENQUEUE; 964 965 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 966 aq_req.hdr.pcifunc = req->hdr.pcifunc; 967 968 if (req->ctype == NIX_AQ_CTYPE_CQ) { 969 aq_req.cq.ena = 0; 970 aq_req.cq_mask.ena = 1; 971 aq_req.cq.bp_ena = 0; 972 aq_req.cq_mask.bp_ena = 1; 973 q_cnt = pfvf->cq_ctx->qsize; 974 bmap = pfvf->cq_bmap; 975 } 976 if (req->ctype == NIX_AQ_CTYPE_SQ) { 977 aq_req.sq.ena = 0; 978 aq_req.sq_mask.ena = 1; 979 q_cnt = pfvf->sq_ctx->qsize; 980 bmap = pfvf->sq_bmap; 981 } 982 if (req->ctype == NIX_AQ_CTYPE_RQ) { 983 aq_req.rq.ena = 0; 984 aq_req.rq_mask.ena = 1; 985 q_cnt = pfvf->rq_ctx->qsize; 986 bmap = pfvf->rq_bmap; 987 } 988 989 aq_req.ctype = req->ctype; 990 aq_req.op = NIX_AQ_INSTOP_WRITE; 991 992 for (qidx = 0; qidx < q_cnt; qidx++) { 993 if (!test_bit(qidx, bmap)) 994 continue; 995 aq_req.qidx = qidx; 996 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 997 if (rc) { 998 err = rc; 999 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 1000 nix_get_ctx_name(req->ctype), qidx); 1001 } 1002 } 1003 1004 return err; 1005 } 1006 1007 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 1008 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 1009 { 1010 struct nix_aq_enq_req lock_ctx_req; 1011 int err; 1012 1013 if (req->op != NIX_AQ_INSTOP_INIT) 1014 return 0; 1015 1016 if (req->ctype == NIX_AQ_CTYPE_MCE || 1017 req->ctype == NIX_AQ_CTYPE_DYNO) 1018 return 0; 1019 1020 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 1021 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 1022 lock_ctx_req.ctype = req->ctype; 1023 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 1024 lock_ctx_req.qidx = req->qidx; 1025 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 1026 if (err) 1027 dev_err(rvu->dev, 1028 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 1029 req->hdr.pcifunc, 1030 nix_get_ctx_name(req->ctype), req->qidx); 1031 return err; 1032 } 1033 1034 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1035 struct nix_aq_enq_req *req, 1036 struct nix_aq_enq_rsp *rsp) 1037 { 1038 int err; 1039 1040 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 1041 if (!err) 1042 err = nix_lf_hwctx_lockdown(rvu, req); 1043 return err; 1044 } 1045 #else 1046 1047 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1048 struct nix_aq_enq_req *req, 1049 struct nix_aq_enq_rsp *rsp) 1050 { 1051 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1052 } 1053 #endif 1054 /* CN10K mbox handler */ 1055 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1056 struct nix_cn10k_aq_enq_req *req, 1057 struct nix_cn10k_aq_enq_rsp *rsp) 1058 { 1059 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1060 (struct nix_aq_enq_rsp *)rsp); 1061 } 1062 1063 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1064 struct hwctx_disable_req *req, 1065 struct msg_rsp *rsp) 1066 { 1067 return nix_lf_hwctx_disable(rvu, req); 1068 } 1069 1070 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1071 struct nix_lf_alloc_req *req, 1072 struct nix_lf_alloc_rsp *rsp) 1073 { 1074 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1075 struct rvu_hwinfo *hw = rvu->hw; 1076 u16 pcifunc = req->hdr.pcifunc; 1077 struct rvu_block *block; 1078 struct rvu_pfvf *pfvf; 1079 u64 cfg, ctx_cfg; 1080 int blkaddr; 1081 1082 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1083 return NIX_AF_ERR_PARAM; 1084 1085 if (req->way_mask) 1086 req->way_mask &= 0xFFFF; 1087 1088 pfvf = rvu_get_pfvf(rvu, pcifunc); 1089 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1090 if (!pfvf->nixlf || blkaddr < 0) 1091 return NIX_AF_ERR_AF_LF_INVALID; 1092 1093 block = &hw->block[blkaddr]; 1094 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1095 if (nixlf < 0) 1096 return NIX_AF_ERR_AF_LF_INVALID; 1097 1098 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1099 if (req->npa_func) { 1100 /* If default, use 'this' NIXLF's PFFUNC */ 1101 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1102 req->npa_func = pcifunc; 1103 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1104 return NIX_AF_INVAL_NPA_PF_FUNC; 1105 } 1106 1107 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1108 if (req->sso_func) { 1109 /* If default, use 'this' NIXLF's PFFUNC */ 1110 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1111 req->sso_func = pcifunc; 1112 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1113 return NIX_AF_INVAL_SSO_PF_FUNC; 1114 } 1115 1116 /* If RSS is being enabled, check if requested config is valid. 1117 * RSS table size should be power of two, otherwise 1118 * RSS_GRP::OFFSET + adder might go beyond that group or 1119 * won't be able to use entire table. 1120 */ 1121 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1122 !is_power_of_2(req->rss_sz))) 1123 return NIX_AF_ERR_RSS_SIZE_INVALID; 1124 1125 if (req->rss_sz && 1126 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1127 return NIX_AF_ERR_RSS_GRPS_INVALID; 1128 1129 /* Reset this NIX LF */ 1130 err = rvu_lf_reset(rvu, block, nixlf); 1131 if (err) { 1132 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1133 block->addr - BLKADDR_NIX0, nixlf); 1134 return NIX_AF_ERR_LF_RESET; 1135 } 1136 1137 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1138 1139 /* Alloc NIX RQ HW context memory and config the base */ 1140 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1141 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1142 if (err) 1143 goto free_mem; 1144 1145 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1146 if (!pfvf->rq_bmap) 1147 goto free_mem; 1148 1149 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1150 (u64)pfvf->rq_ctx->iova); 1151 1152 /* Set caching and queue count in HW */ 1153 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1154 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1155 1156 /* Alloc NIX SQ HW context memory and config the base */ 1157 hwctx_size = 1UL << (ctx_cfg & 0xF); 1158 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1159 if (err) 1160 goto free_mem; 1161 1162 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1163 if (!pfvf->sq_bmap) 1164 goto free_mem; 1165 1166 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1167 (u64)pfvf->sq_ctx->iova); 1168 1169 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1170 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1171 1172 /* Alloc NIX CQ HW context memory and config the base */ 1173 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1174 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1175 if (err) 1176 goto free_mem; 1177 1178 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1179 if (!pfvf->cq_bmap) 1180 goto free_mem; 1181 1182 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1183 (u64)pfvf->cq_ctx->iova); 1184 1185 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1186 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1187 1188 /* Initialize receive side scaling (RSS) */ 1189 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1190 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1191 req->rss_grps, hwctx_size, req->way_mask); 1192 if (err) 1193 goto free_mem; 1194 1195 /* Alloc memory for CQINT's HW contexts */ 1196 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1197 qints = (cfg >> 24) & 0xFFF; 1198 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1199 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1200 if (err) 1201 goto free_mem; 1202 1203 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1204 (u64)pfvf->cq_ints_ctx->iova); 1205 1206 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1207 BIT_ULL(36) | req->way_mask << 20); 1208 1209 /* Alloc memory for QINT's HW contexts */ 1210 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1211 qints = (cfg >> 12) & 0xFFF; 1212 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1213 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1214 if (err) 1215 goto free_mem; 1216 1217 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1218 (u64)pfvf->nix_qints_ctx->iova); 1219 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1220 BIT_ULL(36) | req->way_mask << 20); 1221 1222 /* Setup VLANX TPID's. 1223 * Use VLAN1 for 802.1Q 1224 * and VLAN0 for 802.1AD. 1225 */ 1226 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1227 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1228 1229 /* Enable LMTST for this NIX LF */ 1230 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1231 1232 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1233 if (req->npa_func) 1234 cfg = req->npa_func; 1235 if (req->sso_func) 1236 cfg |= (u64)req->sso_func << 16; 1237 1238 cfg |= (u64)req->xqe_sz << 33; 1239 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1240 1241 /* Config Rx pkt length, csum checks and apad enable / disable */ 1242 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1243 1244 /* Configure pkind for TX parse config */ 1245 cfg = NPC_TX_DEF_PKIND; 1246 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1247 1248 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1249 err = nix_interface_init(rvu, pcifunc, intf, nixlf); 1250 if (err) 1251 goto free_mem; 1252 1253 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1254 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1255 1256 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1257 rvu_write64(rvu, blkaddr, 1258 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1259 VTAGSIZE_T4 | VTAG_STRIP); 1260 1261 goto exit; 1262 1263 free_mem: 1264 nix_ctx_free(rvu, pfvf); 1265 rc = -ENOMEM; 1266 1267 exit: 1268 /* Set macaddr of this PF/VF */ 1269 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1270 1271 /* set SQB size info */ 1272 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1273 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1274 rsp->rx_chan_base = pfvf->rx_chan_base; 1275 rsp->tx_chan_base = pfvf->tx_chan_base; 1276 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1277 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1278 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1279 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1280 /* Get HW supported stat count */ 1281 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1282 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1283 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1284 /* Get count of CQ IRQs and error IRQs supported per LF */ 1285 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1286 rsp->qints = ((cfg >> 12) & 0xFFF); 1287 rsp->cints = ((cfg >> 24) & 0xFFF); 1288 rsp->cgx_links = hw->cgx_links; 1289 rsp->lbk_links = hw->lbk_links; 1290 rsp->sdp_links = hw->sdp_links; 1291 1292 return rc; 1293 } 1294 1295 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1296 struct msg_rsp *rsp) 1297 { 1298 struct rvu_hwinfo *hw = rvu->hw; 1299 u16 pcifunc = req->hdr.pcifunc; 1300 struct rvu_block *block; 1301 int blkaddr, nixlf, err; 1302 struct rvu_pfvf *pfvf; 1303 1304 pfvf = rvu_get_pfvf(rvu, pcifunc); 1305 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1306 if (!pfvf->nixlf || blkaddr < 0) 1307 return NIX_AF_ERR_AF_LF_INVALID; 1308 1309 block = &hw->block[blkaddr]; 1310 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1311 if (nixlf < 0) 1312 return NIX_AF_ERR_AF_LF_INVALID; 1313 1314 if (req->flags & NIX_LF_DISABLE_FLOWS) 1315 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1316 else 1317 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1318 1319 /* Free any tx vtag def entries used by this NIX LF */ 1320 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1321 nix_free_tx_vtag_entries(rvu, pcifunc); 1322 1323 nix_interface_deinit(rvu, pcifunc, nixlf); 1324 1325 /* Reset this NIX LF */ 1326 err = rvu_lf_reset(rvu, block, nixlf); 1327 if (err) { 1328 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1329 block->addr - BLKADDR_NIX0, nixlf); 1330 return NIX_AF_ERR_LF_RESET; 1331 } 1332 1333 nix_ctx_free(rvu, pfvf); 1334 1335 return 0; 1336 } 1337 1338 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1339 struct nix_mark_format_cfg *req, 1340 struct nix_mark_format_cfg_rsp *rsp) 1341 { 1342 u16 pcifunc = req->hdr.pcifunc; 1343 struct nix_hw *nix_hw; 1344 struct rvu_pfvf *pfvf; 1345 int blkaddr, rc; 1346 u32 cfg; 1347 1348 pfvf = rvu_get_pfvf(rvu, pcifunc); 1349 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1350 if (!pfvf->nixlf || blkaddr < 0) 1351 return NIX_AF_ERR_AF_LF_INVALID; 1352 1353 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1354 if (!nix_hw) 1355 return -EINVAL; 1356 1357 cfg = (((u32)req->offset & 0x7) << 16) | 1358 (((u32)req->y_mask & 0xF) << 12) | 1359 (((u32)req->y_val & 0xF) << 8) | 1360 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1361 1362 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1363 if (rc < 0) { 1364 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1365 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1366 return NIX_AF_ERR_MARK_CFG_FAIL; 1367 } 1368 1369 rsp->mark_format_idx = rc; 1370 return 0; 1371 } 1372 1373 /* Disable shaping of pkts by a scheduler queue 1374 * at a given scheduler level. 1375 */ 1376 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1377 int lvl, int schq) 1378 { 1379 u64 cir_reg = 0, pir_reg = 0; 1380 u64 cfg; 1381 1382 switch (lvl) { 1383 case NIX_TXSCH_LVL_TL1: 1384 cir_reg = NIX_AF_TL1X_CIR(schq); 1385 pir_reg = 0; /* PIR not available at TL1 */ 1386 break; 1387 case NIX_TXSCH_LVL_TL2: 1388 cir_reg = NIX_AF_TL2X_CIR(schq); 1389 pir_reg = NIX_AF_TL2X_PIR(schq); 1390 break; 1391 case NIX_TXSCH_LVL_TL3: 1392 cir_reg = NIX_AF_TL3X_CIR(schq); 1393 pir_reg = NIX_AF_TL3X_PIR(schq); 1394 break; 1395 case NIX_TXSCH_LVL_TL4: 1396 cir_reg = NIX_AF_TL4X_CIR(schq); 1397 pir_reg = NIX_AF_TL4X_PIR(schq); 1398 break; 1399 } 1400 1401 if (!cir_reg) 1402 return; 1403 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1404 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1405 1406 if (!pir_reg) 1407 return; 1408 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1409 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1410 } 1411 1412 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1413 int lvl, int schq) 1414 { 1415 struct rvu_hwinfo *hw = rvu->hw; 1416 int link; 1417 1418 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1419 return; 1420 1421 /* Reset TL4's SDP link config */ 1422 if (lvl == NIX_TXSCH_LVL_TL4) 1423 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1424 1425 if (lvl != NIX_TXSCH_LVL_TL2) 1426 return; 1427 1428 /* Reset TL2's CGX or LBK link config */ 1429 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1430 rvu_write64(rvu, blkaddr, 1431 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1432 } 1433 1434 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1435 { 1436 struct rvu_hwinfo *hw = rvu->hw; 1437 int pf = rvu_get_pf(pcifunc); 1438 u8 cgx_id = 0, lmac_id = 0; 1439 1440 if (is_afvf(pcifunc)) {/* LBK links */ 1441 return hw->cgx_links; 1442 } else if (is_pf_cgxmapped(rvu, pf)) { 1443 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1444 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1445 } 1446 1447 /* SDP link */ 1448 return hw->cgx_links + hw->lbk_links; 1449 } 1450 1451 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 1452 int link, int *start, int *end) 1453 { 1454 struct rvu_hwinfo *hw = rvu->hw; 1455 int pf = rvu_get_pf(pcifunc); 1456 1457 if (is_afvf(pcifunc)) { /* LBK links */ 1458 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1459 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 1460 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 1461 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1462 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 1463 } else { /* SDP link */ 1464 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 1465 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 1466 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 1467 } 1468 } 1469 1470 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 1471 struct nix_hw *nix_hw, 1472 struct nix_txsch_alloc_req *req) 1473 { 1474 struct rvu_hwinfo *hw = rvu->hw; 1475 int schq, req_schq, free_cnt; 1476 struct nix_txsch *txsch; 1477 int link, start, end; 1478 1479 txsch = &nix_hw->txsch[lvl]; 1480 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 1481 1482 if (!req_schq) 1483 return 0; 1484 1485 link = nix_get_tx_link(rvu, pcifunc); 1486 1487 /* For traffic aggregating scheduler level, one queue is enough */ 1488 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1489 if (req_schq != 1) 1490 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1491 return 0; 1492 } 1493 1494 /* Get free SCHQ count and check if request can be accomodated */ 1495 if (hw->cap.nix_fixed_txschq_mapping) { 1496 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1497 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 1498 if (end <= txsch->schq.max && schq < end && 1499 !test_bit(schq, txsch->schq.bmap)) 1500 free_cnt = 1; 1501 else 1502 free_cnt = 0; 1503 } else { 1504 free_cnt = rvu_rsrc_free_count(&txsch->schq); 1505 } 1506 1507 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) 1508 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1509 1510 /* If contiguous queues are needed, check for availability */ 1511 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 1512 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 1513 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1514 1515 return 0; 1516 } 1517 1518 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 1519 struct nix_txsch_alloc_rsp *rsp, 1520 int lvl, int start, int end) 1521 { 1522 struct rvu_hwinfo *hw = rvu->hw; 1523 u16 pcifunc = rsp->hdr.pcifunc; 1524 int idx, schq; 1525 1526 /* For traffic aggregating levels, queue alloc is based 1527 * on transmit link to which PF_FUNC is mapped to. 1528 */ 1529 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1530 /* A single TL queue is allocated */ 1531 if (rsp->schq_contig[lvl]) { 1532 rsp->schq_contig[lvl] = 1; 1533 rsp->schq_contig_list[lvl][0] = start; 1534 } 1535 1536 /* Both contig and non-contig reqs doesn't make sense here */ 1537 if (rsp->schq_contig[lvl]) 1538 rsp->schq[lvl] = 0; 1539 1540 if (rsp->schq[lvl]) { 1541 rsp->schq[lvl] = 1; 1542 rsp->schq_list[lvl][0] = start; 1543 } 1544 return; 1545 } 1546 1547 /* Adjust the queue request count if HW supports 1548 * only one queue per level configuration. 1549 */ 1550 if (hw->cap.nix_fixed_txschq_mapping) { 1551 idx = pcifunc & RVU_PFVF_FUNC_MASK; 1552 schq = start + idx; 1553 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 1554 rsp->schq_contig[lvl] = 0; 1555 rsp->schq[lvl] = 0; 1556 return; 1557 } 1558 1559 if (rsp->schq_contig[lvl]) { 1560 rsp->schq_contig[lvl] = 1; 1561 set_bit(schq, txsch->schq.bmap); 1562 rsp->schq_contig_list[lvl][0] = schq; 1563 rsp->schq[lvl] = 0; 1564 } else if (rsp->schq[lvl]) { 1565 rsp->schq[lvl] = 1; 1566 set_bit(schq, txsch->schq.bmap); 1567 rsp->schq_list[lvl][0] = schq; 1568 } 1569 return; 1570 } 1571 1572 /* Allocate contiguous queue indices requesty first */ 1573 if (rsp->schq_contig[lvl]) { 1574 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 1575 txsch->schq.max, start, 1576 rsp->schq_contig[lvl], 0); 1577 if (schq >= end) 1578 rsp->schq_contig[lvl] = 0; 1579 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 1580 set_bit(schq, txsch->schq.bmap); 1581 rsp->schq_contig_list[lvl][idx] = schq; 1582 schq++; 1583 } 1584 } 1585 1586 /* Allocate non-contiguous queue indices */ 1587 if (rsp->schq[lvl]) { 1588 idx = 0; 1589 for (schq = start; schq < end; schq++) { 1590 if (!test_bit(schq, txsch->schq.bmap)) { 1591 set_bit(schq, txsch->schq.bmap); 1592 rsp->schq_list[lvl][idx++] = schq; 1593 } 1594 if (idx == rsp->schq[lvl]) 1595 break; 1596 } 1597 /* Update how many were allocated */ 1598 rsp->schq[lvl] = idx; 1599 } 1600 } 1601 1602 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 1603 struct nix_txsch_alloc_req *req, 1604 struct nix_txsch_alloc_rsp *rsp) 1605 { 1606 struct rvu_hwinfo *hw = rvu->hw; 1607 u16 pcifunc = req->hdr.pcifunc; 1608 int link, blkaddr, rc = 0; 1609 int lvl, idx, start, end; 1610 struct nix_txsch *txsch; 1611 struct rvu_pfvf *pfvf; 1612 struct nix_hw *nix_hw; 1613 u32 *pfvf_map; 1614 u16 schq; 1615 1616 pfvf = rvu_get_pfvf(rvu, pcifunc); 1617 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1618 if (!pfvf->nixlf || blkaddr < 0) 1619 return NIX_AF_ERR_AF_LF_INVALID; 1620 1621 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1622 if (!nix_hw) 1623 return -EINVAL; 1624 1625 mutex_lock(&rvu->rsrc_lock); 1626 1627 /* Check if request is valid as per HW capabilities 1628 * and can be accomodated. 1629 */ 1630 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1631 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 1632 if (rc) 1633 goto err; 1634 } 1635 1636 /* Allocate requested Tx scheduler queues */ 1637 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1638 txsch = &nix_hw->txsch[lvl]; 1639 pfvf_map = txsch->pfvf_map; 1640 1641 if (!req->schq[lvl] && !req->schq_contig[lvl]) 1642 continue; 1643 1644 rsp->schq[lvl] = req->schq[lvl]; 1645 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 1646 1647 link = nix_get_tx_link(rvu, pcifunc); 1648 1649 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1650 start = link; 1651 end = link; 1652 } else if (hw->cap.nix_fixed_txschq_mapping) { 1653 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1654 } else { 1655 start = 0; 1656 end = txsch->schq.max; 1657 } 1658 1659 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 1660 1661 /* Reset queue config */ 1662 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 1663 schq = rsp->schq_contig_list[lvl][idx]; 1664 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1665 NIX_TXSCHQ_CFG_DONE)) 1666 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1667 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1668 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); 1669 } 1670 1671 for (idx = 0; idx < req->schq[lvl]; idx++) { 1672 schq = rsp->schq_list[lvl][idx]; 1673 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1674 NIX_TXSCHQ_CFG_DONE)) 1675 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1676 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1677 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); 1678 } 1679 } 1680 1681 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 1682 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 1683 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 1684 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1685 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1686 goto exit; 1687 err: 1688 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 1689 exit: 1690 mutex_unlock(&rvu->rsrc_lock); 1691 return rc; 1692 } 1693 1694 static void nix_smq_flush(struct rvu *rvu, int blkaddr, 1695 int smq, u16 pcifunc, int nixlf) 1696 { 1697 int pf = rvu_get_pf(pcifunc); 1698 u8 cgx_id = 0, lmac_id = 0; 1699 int err, restore_tx_en = 0; 1700 u64 cfg; 1701 1702 /* enable cgx tx if disabled */ 1703 if (is_pf_cgxmapped(rvu, pf)) { 1704 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1705 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), 1706 lmac_id, true); 1707 } 1708 1709 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 1710 /* Do SMQ flush and set enqueue xoff */ 1711 cfg |= BIT_ULL(50) | BIT_ULL(49); 1712 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 1713 1714 /* Disable backpressure from physical link, 1715 * otherwise SMQ flush may stall. 1716 */ 1717 rvu_cgx_enadis_rx_bp(rvu, pf, false); 1718 1719 /* Wait for flush to complete */ 1720 err = rvu_poll_reg(rvu, blkaddr, 1721 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 1722 if (err) 1723 dev_err(rvu->dev, 1724 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq); 1725 1726 rvu_cgx_enadis_rx_bp(rvu, pf, true); 1727 /* restore cgx tx state */ 1728 if (restore_tx_en) 1729 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 1730 } 1731 1732 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 1733 { 1734 int blkaddr, nixlf, lvl, schq, err; 1735 struct rvu_hwinfo *hw = rvu->hw; 1736 struct nix_txsch *txsch; 1737 struct nix_hw *nix_hw; 1738 1739 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1740 if (blkaddr < 0) 1741 return NIX_AF_ERR_AF_LF_INVALID; 1742 1743 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1744 if (!nix_hw) 1745 return -EINVAL; 1746 1747 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1748 if (nixlf < 0) 1749 return NIX_AF_ERR_AF_LF_INVALID; 1750 1751 /* Disable TL2/3 queue links before SMQ flush*/ 1752 mutex_lock(&rvu->rsrc_lock); 1753 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1754 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) 1755 continue; 1756 1757 txsch = &nix_hw->txsch[lvl]; 1758 for (schq = 0; schq < txsch->schq.max; schq++) { 1759 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1760 continue; 1761 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1762 } 1763 } 1764 1765 /* Flush SMQs */ 1766 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 1767 for (schq = 0; schq < txsch->schq.max; schq++) { 1768 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1769 continue; 1770 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1771 } 1772 1773 /* Now free scheduler queues to free pool */ 1774 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1775 /* TLs above aggregation level are shared across all PF 1776 * and it's VFs, hence skip freeing them. 1777 */ 1778 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1779 continue; 1780 1781 txsch = &nix_hw->txsch[lvl]; 1782 for (schq = 0; schq < txsch->schq.max; schq++) { 1783 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1784 continue; 1785 rvu_free_rsrc(&txsch->schq, schq); 1786 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 1787 } 1788 } 1789 mutex_unlock(&rvu->rsrc_lock); 1790 1791 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ 1792 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); 1793 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); 1794 if (err) 1795 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 1796 1797 return 0; 1798 } 1799 1800 static int nix_txschq_free_one(struct rvu *rvu, 1801 struct nix_txsch_free_req *req) 1802 { 1803 struct rvu_hwinfo *hw = rvu->hw; 1804 u16 pcifunc = req->hdr.pcifunc; 1805 int lvl, schq, nixlf, blkaddr; 1806 struct nix_txsch *txsch; 1807 struct nix_hw *nix_hw; 1808 u32 *pfvf_map; 1809 1810 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1811 if (blkaddr < 0) 1812 return NIX_AF_ERR_AF_LF_INVALID; 1813 1814 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1815 if (!nix_hw) 1816 return -EINVAL; 1817 1818 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1819 if (nixlf < 0) 1820 return NIX_AF_ERR_AF_LF_INVALID; 1821 1822 lvl = req->schq_lvl; 1823 schq = req->schq; 1824 txsch = &nix_hw->txsch[lvl]; 1825 1826 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 1827 return 0; 1828 1829 pfvf_map = txsch->pfvf_map; 1830 mutex_lock(&rvu->rsrc_lock); 1831 1832 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 1833 mutex_unlock(&rvu->rsrc_lock); 1834 goto err; 1835 } 1836 1837 /* Flush if it is a SMQ. Onus of disabling 1838 * TL2/3 queue links before SMQ flush is on user 1839 */ 1840 if (lvl == NIX_TXSCH_LVL_SMQ) 1841 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1842 1843 /* Free the resource */ 1844 rvu_free_rsrc(&txsch->schq, schq); 1845 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 1846 mutex_unlock(&rvu->rsrc_lock); 1847 return 0; 1848 err: 1849 return NIX_AF_ERR_TLX_INVALID; 1850 } 1851 1852 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 1853 struct nix_txsch_free_req *req, 1854 struct msg_rsp *rsp) 1855 { 1856 if (req->flags & TXSCHQ_FREE_ALL) 1857 return nix_txschq_free(rvu, req->hdr.pcifunc); 1858 else 1859 return nix_txschq_free_one(rvu, req); 1860 } 1861 1862 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 1863 int lvl, u64 reg, u64 regval) 1864 { 1865 u64 regbase = reg & 0xFFFF; 1866 u16 schq, parent; 1867 1868 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 1869 return false; 1870 1871 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1872 /* Check if this schq belongs to this PF/VF or not */ 1873 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 1874 return false; 1875 1876 parent = (regval >> 16) & 0x1FF; 1877 /* Validate MDQ's TL4 parent */ 1878 if (regbase == NIX_AF_MDQX_PARENT(0) && 1879 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 1880 return false; 1881 1882 /* Validate TL4's TL3 parent */ 1883 if (regbase == NIX_AF_TL4X_PARENT(0) && 1884 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 1885 return false; 1886 1887 /* Validate TL3's TL2 parent */ 1888 if (regbase == NIX_AF_TL3X_PARENT(0) && 1889 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 1890 return false; 1891 1892 /* Validate TL2's TL1 parent */ 1893 if (regbase == NIX_AF_TL2X_PARENT(0) && 1894 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 1895 return false; 1896 1897 return true; 1898 } 1899 1900 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 1901 { 1902 u64 regbase; 1903 1904 if (hw->cap.nix_shaping) 1905 return true; 1906 1907 /* If shaping and coloring is not supported, then 1908 * *_CIR and *_PIR registers should not be configured. 1909 */ 1910 regbase = reg & 0xFFFF; 1911 1912 switch (lvl) { 1913 case NIX_TXSCH_LVL_TL1: 1914 if (regbase == NIX_AF_TL1X_CIR(0)) 1915 return false; 1916 break; 1917 case NIX_TXSCH_LVL_TL2: 1918 if (regbase == NIX_AF_TL2X_CIR(0) || 1919 regbase == NIX_AF_TL2X_PIR(0)) 1920 return false; 1921 break; 1922 case NIX_TXSCH_LVL_TL3: 1923 if (regbase == NIX_AF_TL3X_CIR(0) || 1924 regbase == NIX_AF_TL3X_PIR(0)) 1925 return false; 1926 break; 1927 case NIX_TXSCH_LVL_TL4: 1928 if (regbase == NIX_AF_TL4X_CIR(0) || 1929 regbase == NIX_AF_TL4X_PIR(0)) 1930 return false; 1931 break; 1932 } 1933 return true; 1934 } 1935 1936 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 1937 u16 pcifunc, int blkaddr) 1938 { 1939 u32 *pfvf_map; 1940 int schq; 1941 1942 schq = nix_get_tx_link(rvu, pcifunc); 1943 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 1944 /* Skip if PF has already done the config */ 1945 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 1946 return; 1947 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 1948 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 1949 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 1950 TXSCH_TL1_DFLT_RR_QTM); 1951 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 1952 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 1953 } 1954 1955 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 1956 struct nix_txschq_config *req, 1957 struct msg_rsp *rsp) 1958 { 1959 struct rvu_hwinfo *hw = rvu->hw; 1960 u16 pcifunc = req->hdr.pcifunc; 1961 u64 reg, regval, schq_regbase; 1962 struct nix_txsch *txsch; 1963 struct nix_hw *nix_hw; 1964 int blkaddr, idx, err; 1965 int nixlf, schq; 1966 u32 *pfvf_map; 1967 1968 if (req->lvl >= NIX_TXSCH_LVL_CNT || 1969 req->num_regs > MAX_REGS_PER_MBOX_MSG) 1970 return NIX_AF_INVAL_TXSCHQ_CFG; 1971 1972 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 1973 if (err) 1974 return err; 1975 1976 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1977 if (!nix_hw) 1978 return -EINVAL; 1979 1980 txsch = &nix_hw->txsch[req->lvl]; 1981 pfvf_map = txsch->pfvf_map; 1982 1983 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 1984 pcifunc & RVU_PFVF_FUNC_MASK) { 1985 mutex_lock(&rvu->rsrc_lock); 1986 if (req->lvl == NIX_TXSCH_LVL_TL1) 1987 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 1988 mutex_unlock(&rvu->rsrc_lock); 1989 return 0; 1990 } 1991 1992 for (idx = 0; idx < req->num_regs; idx++) { 1993 reg = req->reg[idx]; 1994 regval = req->regval[idx]; 1995 schq_regbase = reg & 0xFFFF; 1996 1997 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 1998 txsch->lvl, reg, regval)) 1999 return NIX_AF_INVAL_TXSCHQ_CFG; 2000 2001 /* Check if shaping and coloring is supported */ 2002 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 2003 continue; 2004 2005 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 2006 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 2007 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 2008 pcifunc, 0); 2009 regval &= ~(0x7FULL << 24); 2010 regval |= ((u64)nixlf << 24); 2011 } 2012 2013 /* Clear 'BP_ENA' config, if it's not allowed */ 2014 if (!hw->cap.nix_tx_link_bp) { 2015 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 2016 (schq_regbase & 0xFF00) == 2017 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 2018 regval &= ~BIT_ULL(13); 2019 } 2020 2021 /* Mark config as done for TL1 by PF */ 2022 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 2023 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 2024 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2025 mutex_lock(&rvu->rsrc_lock); 2026 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 2027 NIX_TXSCHQ_CFG_DONE); 2028 mutex_unlock(&rvu->rsrc_lock); 2029 } 2030 2031 /* SMQ flush is special hence split register writes such 2032 * that flush first and write rest of the bits later. 2033 */ 2034 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 2035 (regval & BIT_ULL(49))) { 2036 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2037 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2038 regval &= ~BIT_ULL(49); 2039 } 2040 rvu_write64(rvu, blkaddr, reg, regval); 2041 } 2042 2043 return 0; 2044 } 2045 2046 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 2047 struct nix_vtag_config *req) 2048 { 2049 u64 regval = req->vtag_size; 2050 2051 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2052 req->vtag_size > VTAGSIZE_T8) 2053 return -EINVAL; 2054 2055 /* RX VTAG Type 7 reserved for vf vlan */ 2056 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2057 return NIX_AF_ERR_RX_VTAG_INUSE; 2058 2059 if (req->rx.capture_vtag) 2060 regval |= BIT_ULL(5); 2061 if (req->rx.strip_vtag) 2062 regval |= BIT_ULL(4); 2063 2064 rvu_write64(rvu, blkaddr, 2065 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2066 return 0; 2067 } 2068 2069 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2070 u16 pcifunc, int index) 2071 { 2072 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2073 struct nix_txvlan *vlan = &nix_hw->txvlan; 2074 2075 if (vlan->entry2pfvf_map[index] != pcifunc) 2076 return NIX_AF_ERR_PARAM; 2077 2078 rvu_write64(rvu, blkaddr, 2079 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2080 rvu_write64(rvu, blkaddr, 2081 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2082 2083 vlan->entry2pfvf_map[index] = 0; 2084 rvu_free_rsrc(&vlan->rsrc, index); 2085 2086 return 0; 2087 } 2088 2089 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2090 { 2091 struct nix_txvlan *vlan; 2092 struct nix_hw *nix_hw; 2093 int index, blkaddr; 2094 2095 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2096 if (blkaddr < 0) 2097 return; 2098 2099 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2100 vlan = &nix_hw->txvlan; 2101 2102 mutex_lock(&vlan->rsrc_lock); 2103 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2104 for (index = 0; index < vlan->rsrc.max; index++) { 2105 if (vlan->entry2pfvf_map[index] == pcifunc) 2106 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2107 } 2108 mutex_unlock(&vlan->rsrc_lock); 2109 } 2110 2111 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2112 u64 vtag, u8 size) 2113 { 2114 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2115 struct nix_txvlan *vlan = &nix_hw->txvlan; 2116 u64 regval; 2117 int index; 2118 2119 mutex_lock(&vlan->rsrc_lock); 2120 2121 index = rvu_alloc_rsrc(&vlan->rsrc); 2122 if (index < 0) { 2123 mutex_unlock(&vlan->rsrc_lock); 2124 return index; 2125 } 2126 2127 mutex_unlock(&vlan->rsrc_lock); 2128 2129 regval = size ? vtag : vtag << 32; 2130 2131 rvu_write64(rvu, blkaddr, 2132 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 2133 rvu_write64(rvu, blkaddr, 2134 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 2135 2136 return index; 2137 } 2138 2139 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 2140 struct nix_vtag_config *req) 2141 { 2142 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2143 struct nix_txvlan *vlan = &nix_hw->txvlan; 2144 u16 pcifunc = req->hdr.pcifunc; 2145 int idx0 = req->tx.vtag0_idx; 2146 int idx1 = req->tx.vtag1_idx; 2147 int err = 0; 2148 2149 if (req->tx.free_vtag0 && req->tx.free_vtag1) 2150 if (vlan->entry2pfvf_map[idx0] != pcifunc || 2151 vlan->entry2pfvf_map[idx1] != pcifunc) 2152 return NIX_AF_ERR_PARAM; 2153 2154 mutex_lock(&vlan->rsrc_lock); 2155 2156 if (req->tx.free_vtag0) { 2157 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 2158 if (err) 2159 goto exit; 2160 } 2161 2162 if (req->tx.free_vtag1) 2163 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 2164 2165 exit: 2166 mutex_unlock(&vlan->rsrc_lock); 2167 return err; 2168 } 2169 2170 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 2171 struct nix_vtag_config *req, 2172 struct nix_vtag_config_rsp *rsp) 2173 { 2174 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2175 struct nix_txvlan *vlan = &nix_hw->txvlan; 2176 u16 pcifunc = req->hdr.pcifunc; 2177 2178 if (req->tx.cfg_vtag0) { 2179 rsp->vtag0_idx = 2180 nix_tx_vtag_alloc(rvu, blkaddr, 2181 req->tx.vtag0, req->vtag_size); 2182 2183 if (rsp->vtag0_idx < 0) 2184 return NIX_AF_ERR_TX_VTAG_NOSPC; 2185 2186 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 2187 } 2188 2189 if (req->tx.cfg_vtag1) { 2190 rsp->vtag1_idx = 2191 nix_tx_vtag_alloc(rvu, blkaddr, 2192 req->tx.vtag1, req->vtag_size); 2193 2194 if (rsp->vtag1_idx < 0) 2195 goto err_free; 2196 2197 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 2198 } 2199 2200 return 0; 2201 2202 err_free: 2203 if (req->tx.cfg_vtag0) 2204 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 2205 2206 return NIX_AF_ERR_TX_VTAG_NOSPC; 2207 } 2208 2209 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 2210 struct nix_vtag_config *req, 2211 struct nix_vtag_config_rsp *rsp) 2212 { 2213 u16 pcifunc = req->hdr.pcifunc; 2214 int blkaddr, nixlf, err; 2215 2216 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2217 if (err) 2218 return err; 2219 2220 if (req->cfg_type) { 2221 /* rx vtag configuration */ 2222 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 2223 if (err) 2224 return NIX_AF_ERR_PARAM; 2225 } else { 2226 /* tx vtag configuration */ 2227 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 2228 (req->tx.free_vtag0 || req->tx.free_vtag1)) 2229 return NIX_AF_ERR_PARAM; 2230 2231 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 2232 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 2233 2234 if (req->tx.free_vtag0 || req->tx.free_vtag1) 2235 return nix_tx_vtag_decfg(rvu, blkaddr, req); 2236 } 2237 2238 return 0; 2239 } 2240 2241 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 2242 int mce, u8 op, u16 pcifunc, int next, bool eol) 2243 { 2244 struct nix_aq_enq_req aq_req; 2245 int err; 2246 2247 aq_req.hdr.pcifunc = 0; 2248 aq_req.ctype = NIX_AQ_CTYPE_MCE; 2249 aq_req.op = op; 2250 aq_req.qidx = mce; 2251 2252 /* Use RSS with RSS index 0 */ 2253 aq_req.mce.op = 1; 2254 aq_req.mce.index = 0; 2255 aq_req.mce.eol = eol; 2256 aq_req.mce.pf_func = pcifunc; 2257 aq_req.mce.next = next; 2258 2259 /* All fields valid */ 2260 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 2261 2262 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 2263 if (err) { 2264 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 2265 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 2266 return err; 2267 } 2268 return 0; 2269 } 2270 2271 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, 2272 u16 pcifunc, bool add) 2273 { 2274 struct mce *mce, *tail = NULL; 2275 bool delete = false; 2276 2277 /* Scan through the current list */ 2278 hlist_for_each_entry(mce, &mce_list->head, node) { 2279 /* If already exists, then delete */ 2280 if (mce->pcifunc == pcifunc && !add) { 2281 delete = true; 2282 break; 2283 } else if (mce->pcifunc == pcifunc && add) { 2284 /* entry already exists */ 2285 return 0; 2286 } 2287 tail = mce; 2288 } 2289 2290 if (delete) { 2291 hlist_del(&mce->node); 2292 kfree(mce); 2293 mce_list->count--; 2294 return 0; 2295 } 2296 2297 if (!add) 2298 return 0; 2299 2300 /* Add a new one to the list, at the tail */ 2301 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 2302 if (!mce) 2303 return -ENOMEM; 2304 mce->pcifunc = pcifunc; 2305 if (!tail) 2306 hlist_add_head(&mce->node, &mce_list->head); 2307 else 2308 hlist_add_behind(&mce->node, &tail->node); 2309 mce_list->count++; 2310 return 0; 2311 } 2312 2313 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, 2314 struct nix_mce_list *mce_list, 2315 int mce_idx, int mcam_index, bool add) 2316 { 2317 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; 2318 struct npc_mcam *mcam = &rvu->hw->mcam; 2319 struct nix_mcast *mcast; 2320 struct nix_hw *nix_hw; 2321 struct mce *mce; 2322 2323 if (!mce_list) 2324 return -EINVAL; 2325 2326 /* Get this PF/VF func's MCE index */ 2327 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 2328 2329 if (idx > (mce_idx + mce_list->max)) { 2330 dev_err(rvu->dev, 2331 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 2332 __func__, idx, mce_list->max, 2333 pcifunc >> RVU_PFVF_PF_SHIFT); 2334 return -EINVAL; 2335 } 2336 2337 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 2338 if (err) 2339 return err; 2340 2341 mcast = &nix_hw->mcast; 2342 mutex_lock(&mcast->mce_lock); 2343 2344 err = nix_update_mce_list_entry(mce_list, pcifunc, add); 2345 if (err) 2346 goto end; 2347 2348 /* Disable MCAM entry in NPC */ 2349 if (!mce_list->count) { 2350 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2351 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); 2352 goto end; 2353 } 2354 2355 /* Dump the updated list to HW */ 2356 idx = mce_idx; 2357 last_idx = idx + mce_list->count - 1; 2358 hlist_for_each_entry(mce, &mce_list->head, node) { 2359 if (idx > last_idx) 2360 break; 2361 2362 next_idx = idx + 1; 2363 /* EOL should be set in last MCE */ 2364 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 2365 mce->pcifunc, next_idx, 2366 (next_idx > last_idx) ? true : false); 2367 if (err) 2368 goto end; 2369 idx++; 2370 } 2371 2372 end: 2373 mutex_unlock(&mcast->mce_lock); 2374 return err; 2375 } 2376 2377 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, 2378 struct nix_mce_list **mce_list, int *mce_idx) 2379 { 2380 struct rvu_hwinfo *hw = rvu->hw; 2381 struct rvu_pfvf *pfvf; 2382 2383 if (!hw->cap.nix_rx_multicast || 2384 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { 2385 *mce_list = NULL; 2386 *mce_idx = 0; 2387 return; 2388 } 2389 2390 /* Get this PF/VF func's MCE index */ 2391 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 2392 2393 if (type == NIXLF_BCAST_ENTRY) { 2394 *mce_list = &pfvf->bcast_mce_list; 2395 *mce_idx = pfvf->bcast_mce_idx; 2396 } else if (type == NIXLF_ALLMULTI_ENTRY) { 2397 *mce_list = &pfvf->mcast_mce_list; 2398 *mce_idx = pfvf->mcast_mce_idx; 2399 } else if (type == NIXLF_PROMISC_ENTRY) { 2400 *mce_list = &pfvf->promisc_mce_list; 2401 *mce_idx = pfvf->promisc_mce_idx; 2402 } else { 2403 *mce_list = NULL; 2404 *mce_idx = 0; 2405 } 2406 } 2407 2408 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 2409 int type, bool add) 2410 { 2411 int err = 0, nixlf, blkaddr, mcam_index, mce_idx; 2412 struct npc_mcam *mcam = &rvu->hw->mcam; 2413 struct rvu_hwinfo *hw = rvu->hw; 2414 struct nix_mce_list *mce_list; 2415 2416 /* skip multicast pkt replication for AF's VFs */ 2417 if (is_afvf(pcifunc)) 2418 return 0; 2419 2420 if (!hw->cap.nix_rx_multicast) 2421 return 0; 2422 2423 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2424 if (blkaddr < 0) 2425 return -EINVAL; 2426 2427 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2428 if (nixlf < 0) 2429 return -EINVAL; 2430 2431 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 2432 2433 mcam_index = npc_get_nixlf_mcam_index(mcam, 2434 pcifunc & ~RVU_PFVF_FUNC_MASK, 2435 nixlf, type); 2436 err = nix_update_mce_list(rvu, pcifunc, mce_list, 2437 mce_idx, mcam_index, add); 2438 return err; 2439 } 2440 2441 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) 2442 { 2443 struct nix_mcast *mcast = &nix_hw->mcast; 2444 int err, pf, numvfs, idx; 2445 struct rvu_pfvf *pfvf; 2446 u16 pcifunc; 2447 u64 cfg; 2448 2449 /* Skip PF0 (i.e AF) */ 2450 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 2451 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2452 /* If PF is not enabled, nothing to do */ 2453 if (!((cfg >> 20) & 0x01)) 2454 continue; 2455 /* Get numVFs attached to this PF */ 2456 numvfs = (cfg >> 12) & 0xFF; 2457 2458 pfvf = &rvu->pf[pf]; 2459 2460 /* This NIX0/1 block mapped to PF ? */ 2461 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 2462 continue; 2463 2464 /* save start idx of broadcast mce list */ 2465 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2466 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 2467 2468 /* save start idx of multicast mce list */ 2469 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2470 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); 2471 2472 /* save the start idx of promisc mce list */ 2473 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2474 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); 2475 2476 for (idx = 0; idx < (numvfs + 1); idx++) { 2477 /* idx-0 is for PF, followed by VFs */ 2478 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 2479 pcifunc |= idx; 2480 /* Add dummy entries now, so that we don't have to check 2481 * for whether AQ_OP should be INIT/WRITE later on. 2482 * Will be updated when a NIXLF is attached/detached to 2483 * these PF/VFs. 2484 */ 2485 err = nix_blk_setup_mce(rvu, nix_hw, 2486 pfvf->bcast_mce_idx + idx, 2487 NIX_AQ_INSTOP_INIT, 2488 pcifunc, 0, true); 2489 if (err) 2490 return err; 2491 2492 /* add dummy entries to multicast mce list */ 2493 err = nix_blk_setup_mce(rvu, nix_hw, 2494 pfvf->mcast_mce_idx + idx, 2495 NIX_AQ_INSTOP_INIT, 2496 pcifunc, 0, true); 2497 if (err) 2498 return err; 2499 2500 /* add dummy entries to promisc mce list */ 2501 err = nix_blk_setup_mce(rvu, nix_hw, 2502 pfvf->promisc_mce_idx + idx, 2503 NIX_AQ_INSTOP_INIT, 2504 pcifunc, 0, true); 2505 if (err) 2506 return err; 2507 } 2508 } 2509 return 0; 2510 } 2511 2512 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2513 { 2514 struct nix_mcast *mcast = &nix_hw->mcast; 2515 struct rvu_hwinfo *hw = rvu->hw; 2516 int err, size; 2517 2518 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 2519 size = (1ULL << size); 2520 2521 /* Alloc memory for multicast/mirror replication entries */ 2522 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 2523 (256UL << MC_TBL_SIZE), size); 2524 if (err) 2525 return -ENOMEM; 2526 2527 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 2528 (u64)mcast->mce_ctx->iova); 2529 2530 /* Set max list length equal to max no of VFs per PF + PF itself */ 2531 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 2532 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 2533 2534 /* Alloc memory for multicast replication buffers */ 2535 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 2536 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 2537 (8UL << MC_BUF_CNT), size); 2538 if (err) 2539 return -ENOMEM; 2540 2541 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 2542 (u64)mcast->mcast_buf->iova); 2543 2544 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 2545 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 2546 2547 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 2548 BIT_ULL(63) | (mcast->replay_pkind << 24) | 2549 BIT_ULL(20) | MC_BUF_CNT); 2550 2551 mutex_init(&mcast->mce_lock); 2552 2553 return nix_setup_mce_tables(rvu, nix_hw); 2554 } 2555 2556 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 2557 { 2558 struct nix_txvlan *vlan = &nix_hw->txvlan; 2559 int err; 2560 2561 /* Allocate resource bimap for tx vtag def registers*/ 2562 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 2563 err = rvu_alloc_bitmap(&vlan->rsrc); 2564 if (err) 2565 return -ENOMEM; 2566 2567 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 2568 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 2569 sizeof(u16), GFP_KERNEL); 2570 if (!vlan->entry2pfvf_map) 2571 goto free_mem; 2572 2573 mutex_init(&vlan->rsrc_lock); 2574 return 0; 2575 2576 free_mem: 2577 kfree(vlan->rsrc.bmap); 2578 return -ENOMEM; 2579 } 2580 2581 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2582 { 2583 struct nix_txsch *txsch; 2584 int err, lvl, schq; 2585 u64 cfg, reg; 2586 2587 /* Get scheduler queue count of each type and alloc 2588 * bitmap for each for alloc/free/attach operations. 2589 */ 2590 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2591 txsch = &nix_hw->txsch[lvl]; 2592 txsch->lvl = lvl; 2593 switch (lvl) { 2594 case NIX_TXSCH_LVL_SMQ: 2595 reg = NIX_AF_MDQ_CONST; 2596 break; 2597 case NIX_TXSCH_LVL_TL4: 2598 reg = NIX_AF_TL4_CONST; 2599 break; 2600 case NIX_TXSCH_LVL_TL3: 2601 reg = NIX_AF_TL3_CONST; 2602 break; 2603 case NIX_TXSCH_LVL_TL2: 2604 reg = NIX_AF_TL2_CONST; 2605 break; 2606 case NIX_TXSCH_LVL_TL1: 2607 reg = NIX_AF_TL1_CONST; 2608 break; 2609 } 2610 cfg = rvu_read64(rvu, blkaddr, reg); 2611 txsch->schq.max = cfg & 0xFFFF; 2612 err = rvu_alloc_bitmap(&txsch->schq); 2613 if (err) 2614 return err; 2615 2616 /* Allocate memory for scheduler queues to 2617 * PF/VF pcifunc mapping info. 2618 */ 2619 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 2620 sizeof(u32), GFP_KERNEL); 2621 if (!txsch->pfvf_map) 2622 return -ENOMEM; 2623 for (schq = 0; schq < txsch->schq.max; schq++) 2624 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2625 } 2626 return 0; 2627 } 2628 2629 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 2630 int blkaddr, u32 cfg) 2631 { 2632 int fmt_idx; 2633 2634 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 2635 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 2636 return fmt_idx; 2637 } 2638 if (fmt_idx >= nix_hw->mark_format.total) 2639 return -ERANGE; 2640 2641 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 2642 nix_hw->mark_format.cfg[fmt_idx] = cfg; 2643 nix_hw->mark_format.in_use++; 2644 return fmt_idx; 2645 } 2646 2647 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 2648 int blkaddr) 2649 { 2650 u64 cfgs[] = { 2651 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 2652 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 2653 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 2654 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 2655 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 2656 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 2657 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 2658 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 2659 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 2660 }; 2661 int i, rc; 2662 u64 total; 2663 2664 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 2665 nix_hw->mark_format.total = (u8)total; 2666 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 2667 GFP_KERNEL); 2668 if (!nix_hw->mark_format.cfg) 2669 return -ENOMEM; 2670 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 2671 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 2672 if (rc < 0) 2673 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 2674 i, rc); 2675 } 2676 2677 return 0; 2678 } 2679 2680 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 2681 { 2682 /* CN10K supports LBK FIFO size 72 KB */ 2683 if (rvu->hw->lbk_bufsize == 0x12000) 2684 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 2685 else 2686 *max_mtu = NIC_HW_MAX_FRS; 2687 } 2688 2689 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 2690 { 2691 /* RPM supports FIFO len 128 KB */ 2692 if (rvu_cgx_get_fifolen(rvu) == 0x20000) 2693 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 2694 else 2695 *max_mtu = NIC_HW_MAX_FRS; 2696 } 2697 2698 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 2699 struct nix_hw_info *rsp) 2700 { 2701 u16 pcifunc = req->hdr.pcifunc; 2702 int blkaddr; 2703 2704 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2705 if (blkaddr < 0) 2706 return NIX_AF_ERR_AF_LF_INVALID; 2707 2708 if (is_afvf(pcifunc)) 2709 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 2710 else 2711 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 2712 2713 rsp->min_mtu = NIC_HW_MIN_FRS; 2714 return 0; 2715 } 2716 2717 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 2718 struct msg_rsp *rsp) 2719 { 2720 u16 pcifunc = req->hdr.pcifunc; 2721 int i, nixlf, blkaddr, err; 2722 u64 stats; 2723 2724 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2725 if (err) 2726 return err; 2727 2728 /* Get stats count supported by HW */ 2729 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 2730 2731 /* Reset tx stats */ 2732 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 2733 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 2734 2735 /* Reset rx stats */ 2736 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 2737 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 2738 2739 return 0; 2740 } 2741 2742 /* Returns the ALG index to be set into NPC_RX_ACTION */ 2743 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 2744 { 2745 int i; 2746 2747 /* Scan over exiting algo entries to find a match */ 2748 for (i = 0; i < nix_hw->flowkey.in_use; i++) 2749 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 2750 return i; 2751 2752 return -ERANGE; 2753 } 2754 2755 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 2756 { 2757 int idx, nr_field, key_off, field_marker, keyoff_marker; 2758 int max_key_off, max_bit_pos, group_member; 2759 struct nix_rx_flowkey_alg *field; 2760 struct nix_rx_flowkey_alg tmp; 2761 u32 key_type, valid_key; 2762 int l4_key_offset = 0; 2763 2764 if (!alg) 2765 return -EINVAL; 2766 2767 #define FIELDS_PER_ALG 5 2768 #define MAX_KEY_OFF 40 2769 /* Clear all fields */ 2770 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 2771 2772 /* Each of the 32 possible flow key algorithm definitions should 2773 * fall into above incremental config (except ALG0). Otherwise a 2774 * single NPC MCAM entry is not sufficient for supporting RSS. 2775 * 2776 * If a different definition or combination needed then NPC MCAM 2777 * has to be programmed to filter such pkts and it's action should 2778 * point to this definition to calculate flowtag or hash. 2779 * 2780 * The `for loop` goes over _all_ protocol field and the following 2781 * variables depicts the state machine forward progress logic. 2782 * 2783 * keyoff_marker - Enabled when hash byte length needs to be accounted 2784 * in field->key_offset update. 2785 * field_marker - Enabled when a new field needs to be selected. 2786 * group_member - Enabled when protocol is part of a group. 2787 */ 2788 2789 keyoff_marker = 0; max_key_off = 0; group_member = 0; 2790 nr_field = 0; key_off = 0; field_marker = 1; 2791 field = &tmp; max_bit_pos = fls(flow_cfg); 2792 for (idx = 0; 2793 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 2794 key_off < MAX_KEY_OFF; idx++) { 2795 key_type = BIT(idx); 2796 valid_key = flow_cfg & key_type; 2797 /* Found a field marker, reset the field values */ 2798 if (field_marker) 2799 memset(&tmp, 0, sizeof(tmp)); 2800 2801 field_marker = true; 2802 keyoff_marker = true; 2803 switch (key_type) { 2804 case NIX_FLOW_KEY_TYPE_PORT: 2805 field->sel_chan = true; 2806 /* This should be set to 1, when SEL_CHAN is set */ 2807 field->bytesm1 = 1; 2808 break; 2809 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 2810 field->lid = NPC_LID_LC; 2811 field->hdr_offset = 9; /* offset */ 2812 field->bytesm1 = 0; /* 1 byte */ 2813 field->ltype_match = NPC_LT_LC_IP; 2814 field->ltype_mask = 0xF; 2815 break; 2816 case NIX_FLOW_KEY_TYPE_IPV4: 2817 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 2818 field->lid = NPC_LID_LC; 2819 field->ltype_match = NPC_LT_LC_IP; 2820 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 2821 field->lid = NPC_LID_LG; 2822 field->ltype_match = NPC_LT_LG_TU_IP; 2823 } 2824 field->hdr_offset = 12; /* SIP offset */ 2825 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 2826 field->ltype_mask = 0xF; /* Match only IPv4 */ 2827 keyoff_marker = false; 2828 break; 2829 case NIX_FLOW_KEY_TYPE_IPV6: 2830 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 2831 field->lid = NPC_LID_LC; 2832 field->ltype_match = NPC_LT_LC_IP6; 2833 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 2834 field->lid = NPC_LID_LG; 2835 field->ltype_match = NPC_LT_LG_TU_IP6; 2836 } 2837 field->hdr_offset = 8; /* SIP offset */ 2838 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 2839 field->ltype_mask = 0xF; /* Match only IPv6 */ 2840 break; 2841 case NIX_FLOW_KEY_TYPE_TCP: 2842 case NIX_FLOW_KEY_TYPE_UDP: 2843 case NIX_FLOW_KEY_TYPE_SCTP: 2844 case NIX_FLOW_KEY_TYPE_INNR_TCP: 2845 case NIX_FLOW_KEY_TYPE_INNR_UDP: 2846 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 2847 field->lid = NPC_LID_LD; 2848 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 2849 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 2850 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 2851 field->lid = NPC_LID_LH; 2852 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 2853 2854 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 2855 * so no need to change the ltype_match, just change 2856 * the lid for inner protocols 2857 */ 2858 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 2859 (int)NPC_LT_LH_TU_TCP); 2860 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 2861 (int)NPC_LT_LH_TU_UDP); 2862 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 2863 (int)NPC_LT_LH_TU_SCTP); 2864 2865 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 2866 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 2867 valid_key) { 2868 field->ltype_match |= NPC_LT_LD_TCP; 2869 group_member = true; 2870 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 2871 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 2872 valid_key) { 2873 field->ltype_match |= NPC_LT_LD_UDP; 2874 group_member = true; 2875 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 2876 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 2877 valid_key) { 2878 field->ltype_match |= NPC_LT_LD_SCTP; 2879 group_member = true; 2880 } 2881 field->ltype_mask = ~field->ltype_match; 2882 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 2883 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 2884 /* Handle the case where any of the group item 2885 * is enabled in the group but not the final one 2886 */ 2887 if (group_member) { 2888 valid_key = true; 2889 group_member = false; 2890 } 2891 } else { 2892 field_marker = false; 2893 keyoff_marker = false; 2894 } 2895 2896 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 2897 * remember the TCP key offset of 40 byte hash key. 2898 */ 2899 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 2900 l4_key_offset = key_off; 2901 break; 2902 case NIX_FLOW_KEY_TYPE_NVGRE: 2903 field->lid = NPC_LID_LD; 2904 field->hdr_offset = 4; /* VSID offset */ 2905 field->bytesm1 = 2; 2906 field->ltype_match = NPC_LT_LD_NVGRE; 2907 field->ltype_mask = 0xF; 2908 break; 2909 case NIX_FLOW_KEY_TYPE_VXLAN: 2910 case NIX_FLOW_KEY_TYPE_GENEVE: 2911 field->lid = NPC_LID_LE; 2912 field->bytesm1 = 2; 2913 field->hdr_offset = 4; 2914 field->ltype_mask = 0xF; 2915 field_marker = false; 2916 keyoff_marker = false; 2917 2918 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 2919 field->ltype_match |= NPC_LT_LE_VXLAN; 2920 group_member = true; 2921 } 2922 2923 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 2924 field->ltype_match |= NPC_LT_LE_GENEVE; 2925 group_member = true; 2926 } 2927 2928 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 2929 if (group_member) { 2930 field->ltype_mask = ~field->ltype_match; 2931 field_marker = true; 2932 keyoff_marker = true; 2933 valid_key = true; 2934 group_member = false; 2935 } 2936 } 2937 break; 2938 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 2939 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 2940 field->lid = NPC_LID_LA; 2941 field->ltype_match = NPC_LT_LA_ETHER; 2942 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 2943 field->lid = NPC_LID_LF; 2944 field->ltype_match = NPC_LT_LF_TU_ETHER; 2945 } 2946 field->hdr_offset = 0; 2947 field->bytesm1 = 5; /* DMAC 6 Byte */ 2948 field->ltype_mask = 0xF; 2949 break; 2950 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 2951 field->lid = NPC_LID_LC; 2952 field->hdr_offset = 40; /* IPV6 hdr */ 2953 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 2954 field->ltype_match = NPC_LT_LC_IP6_EXT; 2955 field->ltype_mask = 0xF; 2956 break; 2957 case NIX_FLOW_KEY_TYPE_GTPU: 2958 field->lid = NPC_LID_LE; 2959 field->hdr_offset = 4; 2960 field->bytesm1 = 3; /* 4 bytes TID*/ 2961 field->ltype_match = NPC_LT_LE_GTPU; 2962 field->ltype_mask = 0xF; 2963 break; 2964 case NIX_FLOW_KEY_TYPE_VLAN: 2965 field->lid = NPC_LID_LB; 2966 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 2967 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 2968 field->ltype_match = NPC_LT_LB_CTAG; 2969 field->ltype_mask = 0xF; 2970 field->fn_mask = 1; /* Mask out the first nibble */ 2971 break; 2972 case NIX_FLOW_KEY_TYPE_AH: 2973 case NIX_FLOW_KEY_TYPE_ESP: 2974 field->hdr_offset = 0; 2975 field->bytesm1 = 7; /* SPI + sequence number */ 2976 field->ltype_mask = 0xF; 2977 field->lid = NPC_LID_LE; 2978 field->ltype_match = NPC_LT_LE_ESP; 2979 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 2980 field->lid = NPC_LID_LD; 2981 field->ltype_match = NPC_LT_LD_AH; 2982 field->hdr_offset = 4; 2983 keyoff_marker = false; 2984 } 2985 break; 2986 } 2987 field->ena = 1; 2988 2989 /* Found a valid flow key type */ 2990 if (valid_key) { 2991 /* Use the key offset of TCP/UDP/SCTP fields 2992 * for ESP/AH fields. 2993 */ 2994 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 2995 key_type == NIX_FLOW_KEY_TYPE_AH) 2996 key_off = l4_key_offset; 2997 field->key_offset = key_off; 2998 memcpy(&alg[nr_field], field, sizeof(*field)); 2999 max_key_off = max(max_key_off, field->bytesm1 + 1); 3000 3001 /* Found a field marker, get the next field */ 3002 if (field_marker) 3003 nr_field++; 3004 } 3005 3006 /* Found a keyoff marker, update the new key_off */ 3007 if (keyoff_marker) { 3008 key_off += max_key_off; 3009 max_key_off = 0; 3010 } 3011 } 3012 /* Processed all the flow key types */ 3013 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 3014 return 0; 3015 else 3016 return NIX_AF_ERR_RSS_NOSPC_FIELD; 3017 } 3018 3019 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 3020 { 3021 u64 field[FIELDS_PER_ALG]; 3022 struct nix_hw *hw; 3023 int fid, rc; 3024 3025 hw = get_nix_hw(rvu->hw, blkaddr); 3026 if (!hw) 3027 return -EINVAL; 3028 3029 /* No room to add new flow hash algoritham */ 3030 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 3031 return NIX_AF_ERR_RSS_NOSPC_ALGO; 3032 3033 /* Generate algo fields for the given flow_cfg */ 3034 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 3035 if (rc) 3036 return rc; 3037 3038 /* Update ALGX_FIELDX register with generated fields */ 3039 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3040 rvu_write64(rvu, blkaddr, 3041 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 3042 fid), field[fid]); 3043 3044 /* Store the flow_cfg for futher lookup */ 3045 rc = hw->flowkey.in_use; 3046 hw->flowkey.flowkey[rc] = flow_cfg; 3047 hw->flowkey.in_use++; 3048 3049 return rc; 3050 } 3051 3052 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 3053 struct nix_rss_flowkey_cfg *req, 3054 struct nix_rss_flowkey_cfg_rsp *rsp) 3055 { 3056 u16 pcifunc = req->hdr.pcifunc; 3057 int alg_idx, nixlf, blkaddr; 3058 struct nix_hw *nix_hw; 3059 int err; 3060 3061 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3062 if (err) 3063 return err; 3064 3065 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3066 if (!nix_hw) 3067 return -EINVAL; 3068 3069 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 3070 /* Failed to get algo index from the exiting list, reserve new */ 3071 if (alg_idx < 0) { 3072 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 3073 req->flowkey_cfg); 3074 if (alg_idx < 0) 3075 return alg_idx; 3076 } 3077 rsp->alg_idx = alg_idx; 3078 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 3079 alg_idx, req->mcam_index); 3080 return 0; 3081 } 3082 3083 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 3084 { 3085 u32 flowkey_cfg, minkey_cfg; 3086 int alg, fid, rc; 3087 3088 /* Disable all flow key algx fieldx */ 3089 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 3090 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3091 rvu_write64(rvu, blkaddr, 3092 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 3093 0); 3094 } 3095 3096 /* IPv4/IPv6 SIP/DIPs */ 3097 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 3098 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3099 if (rc < 0) 3100 return rc; 3101 3102 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3103 minkey_cfg = flowkey_cfg; 3104 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 3105 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3106 if (rc < 0) 3107 return rc; 3108 3109 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3110 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 3111 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3112 if (rc < 0) 3113 return rc; 3114 3115 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3116 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 3117 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3118 if (rc < 0) 3119 return rc; 3120 3121 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3122 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3123 NIX_FLOW_KEY_TYPE_UDP; 3124 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3125 if (rc < 0) 3126 return rc; 3127 3128 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3129 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3130 NIX_FLOW_KEY_TYPE_SCTP; 3131 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3132 if (rc < 0) 3133 return rc; 3134 3135 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3136 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 3137 NIX_FLOW_KEY_TYPE_SCTP; 3138 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3139 if (rc < 0) 3140 return rc; 3141 3142 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3143 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3144 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 3145 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3146 if (rc < 0) 3147 return rc; 3148 3149 return 0; 3150 } 3151 3152 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 3153 struct nix_set_mac_addr *req, 3154 struct msg_rsp *rsp) 3155 { 3156 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 3157 u16 pcifunc = req->hdr.pcifunc; 3158 int blkaddr, nixlf, err; 3159 struct rvu_pfvf *pfvf; 3160 3161 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3162 if (err) 3163 return err; 3164 3165 pfvf = rvu_get_pfvf(rvu, pcifunc); 3166 3167 /* untrusted VF can't overwrite admin(PF) changes */ 3168 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 3169 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { 3170 dev_warn(rvu->dev, 3171 "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); 3172 return -EPERM; 3173 } 3174 3175 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 3176 3177 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 3178 pfvf->rx_chan_base, req->mac_addr); 3179 3180 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) 3181 ether_addr_copy(pfvf->default_mac, req->mac_addr); 3182 3183 return 0; 3184 } 3185 3186 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 3187 struct msg_req *req, 3188 struct nix_get_mac_addr_rsp *rsp) 3189 { 3190 u16 pcifunc = req->hdr.pcifunc; 3191 struct rvu_pfvf *pfvf; 3192 3193 if (!is_nixlf_attached(rvu, pcifunc)) 3194 return NIX_AF_ERR_AF_LF_INVALID; 3195 3196 pfvf = rvu_get_pfvf(rvu, pcifunc); 3197 3198 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 3199 3200 return 0; 3201 } 3202 3203 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 3204 struct msg_rsp *rsp) 3205 { 3206 bool allmulti, promisc, nix_rx_multicast; 3207 u16 pcifunc = req->hdr.pcifunc; 3208 struct rvu_pfvf *pfvf; 3209 int nixlf, err; 3210 3211 pfvf = rvu_get_pfvf(rvu, pcifunc); 3212 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; 3213 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; 3214 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; 3215 3216 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; 3217 3218 if (is_vf(pcifunc) && !nix_rx_multicast && 3219 (promisc || allmulti)) { 3220 dev_warn_ratelimited(rvu->dev, 3221 "VF promisc/multicast not supported\n"); 3222 return 0; 3223 } 3224 3225 /* untrusted VF can't configure promisc/allmulti */ 3226 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 3227 (promisc || allmulti)) 3228 return 0; 3229 3230 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3231 if (err) 3232 return err; 3233 3234 if (nix_rx_multicast) { 3235 /* add/del this PF_FUNC to/from mcast pkt replication list */ 3236 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, 3237 allmulti); 3238 if (err) { 3239 dev_err(rvu->dev, 3240 "Failed to update pcifunc 0x%x to multicast list\n", 3241 pcifunc); 3242 return err; 3243 } 3244 3245 /* add/del this PF_FUNC to/from promisc pkt replication list */ 3246 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, 3247 promisc); 3248 if (err) { 3249 dev_err(rvu->dev, 3250 "Failed to update pcifunc 0x%x to promisc list\n", 3251 pcifunc); 3252 return err; 3253 } 3254 } 3255 3256 /* install/uninstall allmulti entry */ 3257 if (allmulti) { 3258 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, 3259 pfvf->rx_chan_base); 3260 } else { 3261 if (!nix_rx_multicast) 3262 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); 3263 } 3264 3265 /* install/uninstall promisc entry */ 3266 if (promisc) { 3267 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 3268 pfvf->rx_chan_base, 3269 pfvf->rx_chan_cnt); 3270 } else { 3271 if (!nix_rx_multicast) 3272 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 3273 } 3274 3275 return 0; 3276 } 3277 3278 static void nix_find_link_frs(struct rvu *rvu, 3279 struct nix_frs_cfg *req, u16 pcifunc) 3280 { 3281 int pf = rvu_get_pf(pcifunc); 3282 struct rvu_pfvf *pfvf; 3283 int maxlen, minlen; 3284 int numvfs, hwvf; 3285 int vf; 3286 3287 /* Update with requester's min/max lengths */ 3288 pfvf = rvu_get_pfvf(rvu, pcifunc); 3289 pfvf->maxlen = req->maxlen; 3290 if (req->update_minlen) 3291 pfvf->minlen = req->minlen; 3292 3293 maxlen = req->maxlen; 3294 minlen = req->update_minlen ? req->minlen : 0; 3295 3296 /* Get this PF's numVFs and starting hwvf */ 3297 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 3298 3299 /* For each VF, compare requested max/minlen */ 3300 for (vf = 0; vf < numvfs; vf++) { 3301 pfvf = &rvu->hwvf[hwvf + vf]; 3302 if (pfvf->maxlen > maxlen) 3303 maxlen = pfvf->maxlen; 3304 if (req->update_minlen && 3305 pfvf->minlen && pfvf->minlen < minlen) 3306 minlen = pfvf->minlen; 3307 } 3308 3309 /* Compare requested max/minlen with PF's max/minlen */ 3310 pfvf = &rvu->pf[pf]; 3311 if (pfvf->maxlen > maxlen) 3312 maxlen = pfvf->maxlen; 3313 if (req->update_minlen && 3314 pfvf->minlen && pfvf->minlen < minlen) 3315 minlen = pfvf->minlen; 3316 3317 /* Update the request with max/min PF's and it's VF's max/min */ 3318 req->maxlen = maxlen; 3319 if (req->update_minlen) 3320 req->minlen = minlen; 3321 } 3322 3323 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 3324 struct msg_rsp *rsp) 3325 { 3326 struct rvu_hwinfo *hw = rvu->hw; 3327 u16 pcifunc = req->hdr.pcifunc; 3328 int pf = rvu_get_pf(pcifunc); 3329 int blkaddr, schq, link = -1; 3330 struct nix_txsch *txsch; 3331 u64 cfg, lmac_fifo_len; 3332 struct nix_hw *nix_hw; 3333 u8 cgx = 0, lmac = 0; 3334 u16 max_mtu; 3335 3336 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3337 if (blkaddr < 0) 3338 return NIX_AF_ERR_AF_LF_INVALID; 3339 3340 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3341 if (!nix_hw) 3342 return -EINVAL; 3343 3344 if (is_afvf(pcifunc)) 3345 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 3346 else 3347 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 3348 3349 if (!req->sdp_link && req->maxlen > max_mtu) 3350 return NIX_AF_ERR_FRS_INVALID; 3351 3352 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 3353 return NIX_AF_ERR_FRS_INVALID; 3354 3355 /* Check if requester wants to update SMQ's */ 3356 if (!req->update_smq) 3357 goto rx_frscfg; 3358 3359 /* Update min/maxlen in each of the SMQ attached to this PF/VF */ 3360 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 3361 mutex_lock(&rvu->rsrc_lock); 3362 for (schq = 0; schq < txsch->schq.max; schq++) { 3363 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 3364 continue; 3365 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); 3366 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); 3367 if (req->update_minlen) 3368 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); 3369 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); 3370 } 3371 mutex_unlock(&rvu->rsrc_lock); 3372 3373 rx_frscfg: 3374 /* Check if config is for SDP link */ 3375 if (req->sdp_link) { 3376 if (!hw->sdp_links) 3377 return NIX_AF_ERR_RX_LINK_INVALID; 3378 link = hw->cgx_links + hw->lbk_links; 3379 goto linkcfg; 3380 } 3381 3382 /* Check if the request is from CGX mapped RVU PF */ 3383 if (is_pf_cgxmapped(rvu, pf)) { 3384 /* Get CGX and LMAC to which this PF is mapped and find link */ 3385 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 3386 link = (cgx * hw->lmac_per_cgx) + lmac; 3387 } else if (pf == 0) { 3388 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 3389 link = hw->cgx_links; 3390 } 3391 3392 if (link < 0) 3393 return NIX_AF_ERR_RX_LINK_INVALID; 3394 3395 nix_find_link_frs(rvu, req, pcifunc); 3396 3397 linkcfg: 3398 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 3399 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 3400 if (req->update_minlen) 3401 cfg = (cfg & ~0xFFFFULL) | req->minlen; 3402 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 3403 3404 if (req->sdp_link || pf == 0) 3405 return 0; 3406 3407 /* Update transmit credits for CGX links */ 3408 lmac_fifo_len = 3409 rvu_cgx_get_fifolen(rvu) / 3410 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3411 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); 3412 cfg &= ~(0xFFFFFULL << 12); 3413 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; 3414 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 3415 return 0; 3416 } 3417 3418 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 3419 struct msg_rsp *rsp) 3420 { 3421 int nixlf, blkaddr, err; 3422 u64 cfg; 3423 3424 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 3425 if (err) 3426 return err; 3427 3428 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 3429 /* Set the interface configuration */ 3430 if (req->len_verify & BIT(0)) 3431 cfg |= BIT_ULL(41); 3432 else 3433 cfg &= ~BIT_ULL(41); 3434 3435 if (req->len_verify & BIT(1)) 3436 cfg |= BIT_ULL(40); 3437 else 3438 cfg &= ~BIT_ULL(40); 3439 3440 if (req->csum_verify & BIT(0)) 3441 cfg |= BIT_ULL(37); 3442 else 3443 cfg &= ~BIT_ULL(37); 3444 3445 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 3446 3447 return 0; 3448 } 3449 3450 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 3451 { 3452 /* CN10k supports 72KB FIFO size and max packet size of 64k */ 3453 if (rvu->hw->lbk_bufsize == 0x12000) 3454 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16; 3455 3456 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 3457 } 3458 3459 static void nix_link_config(struct rvu *rvu, int blkaddr) 3460 { 3461 struct rvu_hwinfo *hw = rvu->hw; 3462 int cgx, lmac_cnt, slink, link; 3463 u16 lbk_max_frs, lmac_max_frs; 3464 u64 tx_credits; 3465 3466 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 3467 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 3468 3469 /* Set default min/max packet lengths allowed on NIX Rx links. 3470 * 3471 * With HW reset minlen value of 60byte, HW will treat ARP pkts 3472 * as undersize and report them to SW as error pkts, hence 3473 * setting it to 40 bytes. 3474 */ 3475 for (link = 0; link < hw->cgx_links; link++) { 3476 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3477 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 3478 } 3479 3480 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 3481 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3482 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 3483 } 3484 if (hw->sdp_links) { 3485 link = hw->cgx_links + hw->lbk_links; 3486 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3487 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 3488 } 3489 3490 /* Set credits for Tx links assuming max packet length allowed. 3491 * This will be reconfigured based on MTU set for PF/VF. 3492 */ 3493 for (cgx = 0; cgx < hw->cgx; cgx++) { 3494 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3495 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) - 3496 lmac_max_frs) / 16; 3497 /* Enable credits and set credit pkt count to max allowed */ 3498 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3499 slink = cgx * hw->lmac_per_cgx; 3500 for (link = slink; link < (slink + lmac_cnt); link++) { 3501 rvu_write64(rvu, blkaddr, 3502 NIX_AF_TX_LINKX_NORM_CREDIT(link), 3503 tx_credits); 3504 } 3505 } 3506 3507 /* Set Tx credits for LBK link */ 3508 slink = hw->cgx_links; 3509 for (link = slink; link < (slink + hw->lbk_links); link++) { 3510 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 3511 /* Enable credits and set credit pkt count to max allowed */ 3512 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3513 rvu_write64(rvu, blkaddr, 3514 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 3515 } 3516 } 3517 3518 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 3519 { 3520 int idx, err; 3521 u64 status; 3522 3523 /* Start X2P bus calibration */ 3524 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3525 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 3526 /* Wait for calibration to complete */ 3527 err = rvu_poll_reg(rvu, blkaddr, 3528 NIX_AF_STATUS, BIT_ULL(10), false); 3529 if (err) { 3530 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 3531 return err; 3532 } 3533 3534 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 3535 /* Check if CGX devices are ready */ 3536 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 3537 /* Skip when cgx port is not available */ 3538 if (!rvu_cgx_pdata(idx, rvu) || 3539 (status & (BIT_ULL(16 + idx)))) 3540 continue; 3541 dev_err(rvu->dev, 3542 "CGX%d didn't respond to NIX X2P calibration\n", idx); 3543 err = -EBUSY; 3544 } 3545 3546 /* Check if LBK is ready */ 3547 if (!(status & BIT_ULL(19))) { 3548 dev_err(rvu->dev, 3549 "LBK didn't respond to NIX X2P calibration\n"); 3550 err = -EBUSY; 3551 } 3552 3553 /* Clear 'calibrate_x2p' bit */ 3554 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3555 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 3556 if (err || (status & 0x3FFULL)) 3557 dev_err(rvu->dev, 3558 "NIX X2P calibration failed, status 0x%llx\n", status); 3559 if (err) 3560 return err; 3561 return 0; 3562 } 3563 3564 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 3565 { 3566 u64 cfg; 3567 int err; 3568 3569 /* Set admin queue endianness */ 3570 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 3571 #ifdef __BIG_ENDIAN 3572 cfg |= BIT_ULL(8); 3573 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 3574 #else 3575 cfg &= ~BIT_ULL(8); 3576 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 3577 #endif 3578 3579 /* Do not bypass NDC cache */ 3580 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 3581 cfg &= ~0x3FFEULL; 3582 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 3583 /* Disable caching of SQB aka SQEs */ 3584 cfg |= 0x04ULL; 3585 #endif 3586 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 3587 3588 /* Result structure can be followed by RQ/SQ/CQ context at 3589 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 3590 * operation type. Alloc sufficient result memory for all operations. 3591 */ 3592 err = rvu_aq_alloc(rvu, &block->aq, 3593 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 3594 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 3595 if (err) 3596 return err; 3597 3598 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 3599 rvu_write64(rvu, block->addr, 3600 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 3601 return 0; 3602 } 3603 3604 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 3605 { 3606 const struct npc_lt_def_cfg *ltdefs; 3607 struct rvu_hwinfo *hw = rvu->hw; 3608 int blkaddr = nix_hw->blkaddr; 3609 struct rvu_block *block; 3610 int err; 3611 u64 cfg; 3612 3613 block = &hw->block[blkaddr]; 3614 3615 if (is_rvu_96xx_B0(rvu)) { 3616 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 3617 * internal state when conditional clocks are turned off. 3618 * Hence enable them. 3619 */ 3620 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3621 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 3622 3623 /* Set chan/link to backpressure TL3 instead of TL2 */ 3624 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 3625 3626 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 3627 * This sticky mode is known to cause SQ stalls when multiple 3628 * SQs are mapped to same SMQ and transmitting pkts at a time. 3629 */ 3630 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 3631 cfg &= ~BIT_ULL(15); 3632 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 3633 } 3634 3635 ltdefs = rvu->kpu.lt_def; 3636 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 3637 err = nix_calibrate_x2p(rvu, blkaddr); 3638 if (err) 3639 return err; 3640 3641 /* Initialize admin queue */ 3642 err = nix_aq_init(rvu, block); 3643 if (err) 3644 return err; 3645 3646 /* Restore CINT timer delay to HW reset values */ 3647 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 3648 3649 if (is_block_implemented(hw, blkaddr)) { 3650 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 3651 if (err) 3652 return err; 3653 3654 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); 3655 if (err) 3656 return err; 3657 3658 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 3659 if (err) 3660 return err; 3661 3662 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 3663 if (err) 3664 return err; 3665 3666 err = nix_setup_txvlan(rvu, nix_hw); 3667 if (err) 3668 return err; 3669 3670 /* Configure segmentation offload formats */ 3671 nix_setup_lso(rvu, nix_hw, blkaddr); 3672 3673 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 3674 * This helps HW protocol checker to identify headers 3675 * and validate length and checksums. 3676 */ 3677 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 3678 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 3679 ltdefs->rx_ol2.ltype_mask); 3680 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 3681 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 3682 ltdefs->rx_oip4.ltype_mask); 3683 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 3684 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 3685 ltdefs->rx_iip4.ltype_mask); 3686 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 3687 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 3688 ltdefs->rx_oip6.ltype_mask); 3689 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 3690 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 3691 ltdefs->rx_iip6.ltype_mask); 3692 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 3693 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 3694 ltdefs->rx_otcp.ltype_mask); 3695 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 3696 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 3697 ltdefs->rx_itcp.ltype_mask); 3698 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 3699 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 3700 ltdefs->rx_oudp.ltype_mask); 3701 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 3702 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 3703 ltdefs->rx_iudp.ltype_mask); 3704 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 3705 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 3706 ltdefs->rx_osctp.ltype_mask); 3707 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 3708 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 3709 ltdefs->rx_isctp.ltype_mask); 3710 3711 if (!is_rvu_otx2(rvu)) { 3712 /* Enable APAD calculation for other protocols 3713 * matching APAD0 and APAD1 lt def registers. 3714 */ 3715 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, 3716 (ltdefs->rx_apad0.valid << 11) | 3717 (ltdefs->rx_apad0.lid << 8) | 3718 (ltdefs->rx_apad0.ltype_match << 4) | 3719 ltdefs->rx_apad0.ltype_mask); 3720 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, 3721 (ltdefs->rx_apad1.valid << 11) | 3722 (ltdefs->rx_apad1.lid << 8) | 3723 (ltdefs->rx_apad1.ltype_match << 4) | 3724 ltdefs->rx_apad1.ltype_mask); 3725 3726 /* Receive ethertype defination register defines layer 3727 * information in NPC_RESULT_S to identify the Ethertype 3728 * location in L2 header. Used for Ethertype overwriting 3729 * in inline IPsec flow. 3730 */ 3731 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), 3732 (ltdefs->rx_et[0].offset << 12) | 3733 (ltdefs->rx_et[0].valid << 11) | 3734 (ltdefs->rx_et[0].lid << 8) | 3735 (ltdefs->rx_et[0].ltype_match << 4) | 3736 ltdefs->rx_et[0].ltype_mask); 3737 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), 3738 (ltdefs->rx_et[1].offset << 12) | 3739 (ltdefs->rx_et[1].valid << 11) | 3740 (ltdefs->rx_et[1].lid << 8) | 3741 (ltdefs->rx_et[1].ltype_match << 4) | 3742 ltdefs->rx_et[1].ltype_mask); 3743 } 3744 3745 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 3746 if (err) 3747 return err; 3748 3749 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 3750 nix_link_config(rvu, blkaddr); 3751 3752 /* Enable Channel backpressure */ 3753 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 3754 } 3755 return 0; 3756 } 3757 3758 int rvu_nix_init(struct rvu *rvu) 3759 { 3760 struct rvu_hwinfo *hw = rvu->hw; 3761 struct nix_hw *nix_hw; 3762 int blkaddr = 0, err; 3763 int i = 0; 3764 3765 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 3766 GFP_KERNEL); 3767 if (!hw->nix) 3768 return -ENOMEM; 3769 3770 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3771 while (blkaddr) { 3772 nix_hw = &hw->nix[i]; 3773 nix_hw->rvu = rvu; 3774 nix_hw->blkaddr = blkaddr; 3775 err = rvu_nix_block_init(rvu, nix_hw); 3776 if (err) 3777 return err; 3778 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3779 i++; 3780 } 3781 3782 return 0; 3783 } 3784 3785 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 3786 struct rvu_block *block) 3787 { 3788 struct nix_txsch *txsch; 3789 struct nix_mcast *mcast; 3790 struct nix_txvlan *vlan; 3791 struct nix_hw *nix_hw; 3792 int lvl; 3793 3794 rvu_aq_free(rvu, block->aq); 3795 3796 if (is_block_implemented(rvu->hw, blkaddr)) { 3797 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3798 if (!nix_hw) 3799 return; 3800 3801 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3802 txsch = &nix_hw->txsch[lvl]; 3803 kfree(txsch->schq.bmap); 3804 } 3805 3806 nix_ipolicer_freemem(nix_hw); 3807 3808 vlan = &nix_hw->txvlan; 3809 kfree(vlan->rsrc.bmap); 3810 mutex_destroy(&vlan->rsrc_lock); 3811 devm_kfree(rvu->dev, vlan->entry2pfvf_map); 3812 3813 mcast = &nix_hw->mcast; 3814 qmem_free(rvu->dev, mcast->mce_ctx); 3815 qmem_free(rvu->dev, mcast->mcast_buf); 3816 mutex_destroy(&mcast->mce_lock); 3817 } 3818 } 3819 3820 void rvu_nix_freemem(struct rvu *rvu) 3821 { 3822 struct rvu_hwinfo *hw = rvu->hw; 3823 struct rvu_block *block; 3824 int blkaddr = 0; 3825 3826 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3827 while (blkaddr) { 3828 block = &hw->block[blkaddr]; 3829 rvu_nix_block_freemem(rvu, blkaddr, block); 3830 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3831 } 3832 } 3833 3834 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 3835 struct msg_rsp *rsp) 3836 { 3837 u16 pcifunc = req->hdr.pcifunc; 3838 struct rvu_pfvf *pfvf; 3839 int nixlf, err; 3840 3841 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3842 if (err) 3843 return err; 3844 3845 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 3846 3847 npc_mcam_enable_flows(rvu, pcifunc); 3848 3849 pfvf = rvu_get_pfvf(rvu, pcifunc); 3850 set_bit(NIXLF_INITIALIZED, &pfvf->flags); 3851 3852 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 3853 } 3854 3855 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 3856 struct msg_rsp *rsp) 3857 { 3858 u16 pcifunc = req->hdr.pcifunc; 3859 struct rvu_pfvf *pfvf; 3860 int nixlf, err; 3861 3862 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3863 if (err) 3864 return err; 3865 3866 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 3867 3868 pfvf = rvu_get_pfvf(rvu, pcifunc); 3869 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 3870 3871 return rvu_cgx_start_stop_io(rvu, pcifunc, false); 3872 } 3873 3874 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 3875 { 3876 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 3877 struct hwctx_disable_req ctx_req; 3878 int err; 3879 3880 ctx_req.hdr.pcifunc = pcifunc; 3881 3882 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 3883 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 3884 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 3885 nix_interface_deinit(rvu, pcifunc, nixlf); 3886 nix_rx_sync(rvu, blkaddr); 3887 nix_txschq_free(rvu, pcifunc); 3888 3889 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 3890 3891 rvu_cgx_start_stop_io(rvu, pcifunc, false); 3892 3893 if (pfvf->sq_ctx) { 3894 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 3895 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3896 if (err) 3897 dev_err(rvu->dev, "SQ ctx disable failed\n"); 3898 } 3899 3900 if (pfvf->rq_ctx) { 3901 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 3902 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3903 if (err) 3904 dev_err(rvu->dev, "RQ ctx disable failed\n"); 3905 } 3906 3907 if (pfvf->cq_ctx) { 3908 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 3909 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3910 if (err) 3911 dev_err(rvu->dev, "CQ ctx disable failed\n"); 3912 } 3913 3914 nix_ctx_free(rvu, pfvf); 3915 3916 nix_free_all_bandprof(rvu, pcifunc); 3917 } 3918 3919 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 3920 3921 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 3922 { 3923 struct rvu_hwinfo *hw = rvu->hw; 3924 struct rvu_block *block; 3925 int blkaddr, pf; 3926 int nixlf; 3927 u64 cfg; 3928 3929 pf = rvu_get_pf(pcifunc); 3930 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 3931 return 0; 3932 3933 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3934 if (blkaddr < 0) 3935 return NIX_AF_ERR_AF_LF_INVALID; 3936 3937 block = &hw->block[blkaddr]; 3938 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 3939 if (nixlf < 0) 3940 return NIX_AF_ERR_AF_LF_INVALID; 3941 3942 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 3943 3944 if (enable) 3945 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 3946 else 3947 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 3948 3949 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 3950 3951 return 0; 3952 } 3953 3954 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 3955 struct msg_rsp *rsp) 3956 { 3957 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 3958 } 3959 3960 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 3961 struct msg_rsp *rsp) 3962 { 3963 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 3964 } 3965 3966 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 3967 struct nix_lso_format_cfg *req, 3968 struct nix_lso_format_cfg_rsp *rsp) 3969 { 3970 u16 pcifunc = req->hdr.pcifunc; 3971 struct nix_hw *nix_hw; 3972 struct rvu_pfvf *pfvf; 3973 int blkaddr, idx, f; 3974 u64 reg; 3975 3976 pfvf = rvu_get_pfvf(rvu, pcifunc); 3977 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3978 if (!pfvf->nixlf || blkaddr < 0) 3979 return NIX_AF_ERR_AF_LF_INVALID; 3980 3981 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3982 if (!nix_hw) 3983 return -EINVAL; 3984 3985 /* Find existing matching LSO format, if any */ 3986 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 3987 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 3988 reg = rvu_read64(rvu, blkaddr, 3989 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 3990 if (req->fields[f] != (reg & req->field_mask)) 3991 break; 3992 } 3993 3994 if (f == NIX_LSO_FIELD_MAX) 3995 break; 3996 } 3997 3998 if (idx < nix_hw->lso.in_use) { 3999 /* Match found */ 4000 rsp->lso_format_idx = idx; 4001 return 0; 4002 } 4003 4004 if (nix_hw->lso.in_use == nix_hw->lso.total) 4005 return NIX_AF_ERR_LSO_CFG_FAIL; 4006 4007 rsp->lso_format_idx = nix_hw->lso.in_use++; 4008 4009 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 4010 rvu_write64(rvu, blkaddr, 4011 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 4012 req->fields[f]); 4013 4014 return 0; 4015 } 4016 4017 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 4018 { 4019 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 4020 4021 /* overwrite vf mac address with default_mac */ 4022 if (from_vf) 4023 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 4024 } 4025 4026 /* NIX ingress policers or bandwidth profiles APIs */ 4027 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) 4028 { 4029 struct npc_lt_def_cfg defs, *ltdefs; 4030 4031 ltdefs = &defs; 4032 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); 4033 4034 /* Extract PCP and DEI fields from outer VLAN from byte offset 4035 * 2 from the start of LB_PTR (ie TAG). 4036 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN 4037 * fields are considered when 'Tunnel enable' is set in profile. 4038 */ 4039 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, 4040 (2UL << 12) | (ltdefs->ovlan.lid << 8) | 4041 (ltdefs->ovlan.ltype_match << 4) | 4042 ltdefs->ovlan.ltype_mask); 4043 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, 4044 (2UL << 12) | (ltdefs->ivlan.lid << 8) | 4045 (ltdefs->ivlan.ltype_match << 4) | 4046 ltdefs->ivlan.ltype_mask); 4047 4048 /* DSCP field in outer and tunneled IPv4 packets */ 4049 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, 4050 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | 4051 (ltdefs->rx_oip4.ltype_match << 4) | 4052 ltdefs->rx_oip4.ltype_mask); 4053 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, 4054 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | 4055 (ltdefs->rx_iip4.ltype_match << 4) | 4056 ltdefs->rx_iip4.ltype_mask); 4057 4058 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ 4059 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, 4060 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | 4061 (ltdefs->rx_oip6.ltype_match << 4) | 4062 ltdefs->rx_oip6.ltype_mask); 4063 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, 4064 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | 4065 (ltdefs->rx_iip6.ltype_match << 4) | 4066 ltdefs->rx_iip6.ltype_mask); 4067 } 4068 4069 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, 4070 int layer, int prof_idx) 4071 { 4072 struct nix_cn10k_aq_enq_req aq_req; 4073 int rc; 4074 4075 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4076 4077 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); 4078 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 4079 aq_req.op = NIX_AQ_INSTOP_INIT; 4080 4081 /* Context is all zeros, submit to AQ */ 4082 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4083 (struct nix_aq_enq_req *)&aq_req, NULL); 4084 if (rc) 4085 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", 4086 layer, prof_idx); 4087 return rc; 4088 } 4089 4090 static int nix_setup_ipolicers(struct rvu *rvu, 4091 struct nix_hw *nix_hw, int blkaddr) 4092 { 4093 struct rvu_hwinfo *hw = rvu->hw; 4094 struct nix_ipolicer *ipolicer; 4095 int err, layer, prof_idx; 4096 u64 cfg; 4097 4098 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 4099 if (!(cfg & BIT_ULL(61))) { 4100 hw->cap.ipolicer = false; 4101 return 0; 4102 } 4103 4104 hw->cap.ipolicer = true; 4105 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, 4106 sizeof(*ipolicer), GFP_KERNEL); 4107 if (!nix_hw->ipolicer) 4108 return -ENOMEM; 4109 4110 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); 4111 4112 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4113 ipolicer = &nix_hw->ipolicer[layer]; 4114 switch (layer) { 4115 case BAND_PROF_LEAF_LAYER: 4116 ipolicer->band_prof.max = cfg & 0XFFFF; 4117 break; 4118 case BAND_PROF_MID_LAYER: 4119 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; 4120 break; 4121 case BAND_PROF_TOP_LAYER: 4122 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; 4123 break; 4124 } 4125 4126 if (!ipolicer->band_prof.max) 4127 continue; 4128 4129 err = rvu_alloc_bitmap(&ipolicer->band_prof); 4130 if (err) 4131 return err; 4132 4133 ipolicer->pfvf_map = devm_kcalloc(rvu->dev, 4134 ipolicer->band_prof.max, 4135 sizeof(u16), GFP_KERNEL); 4136 if (!ipolicer->pfvf_map) 4137 return -ENOMEM; 4138 4139 ipolicer->match_id = devm_kcalloc(rvu->dev, 4140 ipolicer->band_prof.max, 4141 sizeof(u16), GFP_KERNEL); 4142 if (!ipolicer->match_id) 4143 return -ENOMEM; 4144 4145 for (prof_idx = 0; 4146 prof_idx < ipolicer->band_prof.max; prof_idx++) { 4147 /* Set AF as current owner for INIT ops to succeed */ 4148 ipolicer->pfvf_map[prof_idx] = 0x00; 4149 4150 /* There is no enable bit in the profile context, 4151 * so no context disable. So let's INIT them here 4152 * so that PF/VF later on have to just do WRITE to 4153 * setup policer rates and config. 4154 */ 4155 err = nix_init_policer_context(rvu, nix_hw, 4156 layer, prof_idx); 4157 if (err) 4158 return err; 4159 } 4160 4161 /* Allocate memory for maintaining ref_counts for MID level 4162 * profiles, this will be needed for leaf layer profiles' 4163 * aggregation. 4164 */ 4165 if (layer != BAND_PROF_MID_LAYER) 4166 continue; 4167 4168 ipolicer->ref_count = devm_kcalloc(rvu->dev, 4169 ipolicer->band_prof.max, 4170 sizeof(u16), GFP_KERNEL); 4171 } 4172 4173 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ 4174 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); 4175 4176 nix_config_rx_pkt_policer_precolor(rvu, blkaddr); 4177 4178 return 0; 4179 } 4180 4181 static void nix_ipolicer_freemem(struct nix_hw *nix_hw) 4182 { 4183 struct nix_ipolicer *ipolicer; 4184 int layer; 4185 4186 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4187 ipolicer = &nix_hw->ipolicer[layer]; 4188 4189 if (!ipolicer->band_prof.max) 4190 continue; 4191 4192 kfree(ipolicer->band_prof.bmap); 4193 } 4194 } 4195 4196 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 4197 struct nix_hw *nix_hw, u16 pcifunc) 4198 { 4199 struct nix_ipolicer *ipolicer; 4200 int layer, hi_layer, prof_idx; 4201 4202 /* Bits [15:14] in profile index represent layer */ 4203 layer = (req->qidx >> 14) & 0x03; 4204 prof_idx = req->qidx & 0x3FFF; 4205 4206 ipolicer = &nix_hw->ipolicer[layer]; 4207 if (prof_idx >= ipolicer->band_prof.max) 4208 return -EINVAL; 4209 4210 /* Check if the profile is allocated to the requesting PCIFUNC or not 4211 * with the exception of AF. AF is allowed to read and update contexts. 4212 */ 4213 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) 4214 return -EINVAL; 4215 4216 /* If this profile is linked to higher layer profile then check 4217 * if that profile is also allocated to the requesting PCIFUNC 4218 * or not. 4219 */ 4220 if (!req->prof.hl_en) 4221 return 0; 4222 4223 /* Leaf layer profile can link only to mid layer and 4224 * mid layer to top layer. 4225 */ 4226 if (layer == BAND_PROF_LEAF_LAYER) 4227 hi_layer = BAND_PROF_MID_LAYER; 4228 else if (layer == BAND_PROF_MID_LAYER) 4229 hi_layer = BAND_PROF_TOP_LAYER; 4230 else 4231 return -EINVAL; 4232 4233 ipolicer = &nix_hw->ipolicer[hi_layer]; 4234 prof_idx = req->prof.band_prof_id; 4235 if (prof_idx >= ipolicer->band_prof.max || 4236 ipolicer->pfvf_map[prof_idx] != pcifunc) 4237 return -EINVAL; 4238 4239 return 0; 4240 } 4241 4242 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, 4243 struct nix_bandprof_alloc_req *req, 4244 struct nix_bandprof_alloc_rsp *rsp) 4245 { 4246 int blkaddr, layer, prof, idx, err; 4247 u16 pcifunc = req->hdr.pcifunc; 4248 struct nix_ipolicer *ipolicer; 4249 struct nix_hw *nix_hw; 4250 4251 if (!rvu->hw->cap.ipolicer) 4252 return NIX_AF_ERR_IPOLICER_NOTSUPP; 4253 4254 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4255 if (err) 4256 return err; 4257 4258 mutex_lock(&rvu->rsrc_lock); 4259 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4260 if (layer == BAND_PROF_INVAL_LAYER) 4261 continue; 4262 if (!req->prof_count[layer]) 4263 continue; 4264 4265 ipolicer = &nix_hw->ipolicer[layer]; 4266 for (idx = 0; idx < req->prof_count[layer]; idx++) { 4267 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ 4268 if (idx == MAX_BANDPROF_PER_PFFUNC) 4269 break; 4270 4271 prof = rvu_alloc_rsrc(&ipolicer->band_prof); 4272 if (prof < 0) 4273 break; 4274 rsp->prof_count[layer]++; 4275 rsp->prof_idx[layer][idx] = prof; 4276 ipolicer->pfvf_map[prof] = pcifunc; 4277 } 4278 } 4279 mutex_unlock(&rvu->rsrc_lock); 4280 return 0; 4281 } 4282 4283 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) 4284 { 4285 int blkaddr, layer, prof_idx, err; 4286 struct nix_ipolicer *ipolicer; 4287 struct nix_hw *nix_hw; 4288 4289 if (!rvu->hw->cap.ipolicer) 4290 return NIX_AF_ERR_IPOLICER_NOTSUPP; 4291 4292 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4293 if (err) 4294 return err; 4295 4296 mutex_lock(&rvu->rsrc_lock); 4297 /* Free all the profiles allocated to the PCIFUNC */ 4298 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4299 if (layer == BAND_PROF_INVAL_LAYER) 4300 continue; 4301 ipolicer = &nix_hw->ipolicer[layer]; 4302 4303 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { 4304 if (ipolicer->pfvf_map[prof_idx] != pcifunc) 4305 continue; 4306 4307 /* Clear ratelimit aggregation, if any */ 4308 if (layer == BAND_PROF_LEAF_LAYER && 4309 ipolicer->match_id[prof_idx]) 4310 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 4311 4312 ipolicer->pfvf_map[prof_idx] = 0x00; 4313 ipolicer->match_id[prof_idx] = 0; 4314 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 4315 } 4316 } 4317 mutex_unlock(&rvu->rsrc_lock); 4318 return 0; 4319 } 4320 4321 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, 4322 struct nix_bandprof_free_req *req, 4323 struct msg_rsp *rsp) 4324 { 4325 int blkaddr, layer, prof_idx, idx, err; 4326 u16 pcifunc = req->hdr.pcifunc; 4327 struct nix_ipolicer *ipolicer; 4328 struct nix_hw *nix_hw; 4329 4330 if (req->free_all) 4331 return nix_free_all_bandprof(rvu, pcifunc); 4332 4333 if (!rvu->hw->cap.ipolicer) 4334 return NIX_AF_ERR_IPOLICER_NOTSUPP; 4335 4336 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4337 if (err) 4338 return err; 4339 4340 mutex_lock(&rvu->rsrc_lock); 4341 /* Free the requested profile indices */ 4342 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4343 if (layer == BAND_PROF_INVAL_LAYER) 4344 continue; 4345 if (!req->prof_count[layer]) 4346 continue; 4347 4348 ipolicer = &nix_hw->ipolicer[layer]; 4349 for (idx = 0; idx < req->prof_count[layer]; idx++) { 4350 prof_idx = req->prof_idx[layer][idx]; 4351 if (prof_idx >= ipolicer->band_prof.max || 4352 ipolicer->pfvf_map[prof_idx] != pcifunc) 4353 continue; 4354 4355 /* Clear ratelimit aggregation, if any */ 4356 if (layer == BAND_PROF_LEAF_LAYER && 4357 ipolicer->match_id[prof_idx]) 4358 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 4359 4360 ipolicer->pfvf_map[prof_idx] = 0x00; 4361 ipolicer->match_id[prof_idx] = 0; 4362 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 4363 if (idx == MAX_BANDPROF_PER_PFFUNC) 4364 break; 4365 } 4366 } 4367 mutex_unlock(&rvu->rsrc_lock); 4368 return 0; 4369 } 4370 4371 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, 4372 struct nix_cn10k_aq_enq_req *aq_req, 4373 struct nix_cn10k_aq_enq_rsp *aq_rsp, 4374 u16 pcifunc, u8 ctype, u32 qidx) 4375 { 4376 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4377 aq_req->hdr.pcifunc = pcifunc; 4378 aq_req->ctype = ctype; 4379 aq_req->op = NIX_AQ_INSTOP_READ; 4380 aq_req->qidx = qidx; 4381 4382 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4383 (struct nix_aq_enq_req *)aq_req, 4384 (struct nix_aq_enq_rsp *)aq_rsp); 4385 } 4386 4387 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, 4388 struct nix_hw *nix_hw, 4389 struct nix_cn10k_aq_enq_req *aq_req, 4390 struct nix_cn10k_aq_enq_rsp *aq_rsp, 4391 u32 leaf_prof, u16 mid_prof) 4392 { 4393 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4394 aq_req->hdr.pcifunc = 0x00; 4395 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; 4396 aq_req->op = NIX_AQ_INSTOP_WRITE; 4397 aq_req->qidx = leaf_prof; 4398 4399 aq_req->prof.band_prof_id = mid_prof; 4400 aq_req->prof_mask.band_prof_id = GENMASK(6, 0); 4401 aq_req->prof.hl_en = 1; 4402 aq_req->prof_mask.hl_en = 1; 4403 4404 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4405 (struct nix_aq_enq_req *)aq_req, 4406 (struct nix_aq_enq_rsp *)aq_rsp); 4407 } 4408 4409 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, 4410 u16 rq_idx, u16 match_id) 4411 { 4412 int leaf_prof, mid_prof, leaf_match; 4413 struct nix_cn10k_aq_enq_req aq_req; 4414 struct nix_cn10k_aq_enq_rsp aq_rsp; 4415 struct nix_ipolicer *ipolicer; 4416 struct nix_hw *nix_hw; 4417 int blkaddr, idx, rc; 4418 4419 if (!rvu->hw->cap.ipolicer) 4420 return 0; 4421 4422 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4423 if (rc) 4424 return rc; 4425 4426 /* Fetch the RQ's context to see if policing is enabled */ 4427 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, 4428 NIX_AQ_CTYPE_RQ, rq_idx); 4429 if (rc) { 4430 dev_err(rvu->dev, 4431 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", 4432 __func__, rq_idx, pcifunc); 4433 return rc; 4434 } 4435 4436 if (!aq_rsp.rq.policer_ena) 4437 return 0; 4438 4439 /* Get the bandwidth profile ID mapped to this RQ */ 4440 leaf_prof = aq_rsp.rq.band_prof_id; 4441 4442 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; 4443 ipolicer->match_id[leaf_prof] = match_id; 4444 4445 /* Check if any other leaf profile is marked with same match_id */ 4446 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 4447 if (idx == leaf_prof) 4448 continue; 4449 if (ipolicer->match_id[idx] != match_id) 4450 continue; 4451 4452 leaf_match = idx; 4453 break; 4454 } 4455 4456 if (idx == ipolicer->band_prof.max) 4457 return 0; 4458 4459 /* Fetch the matching profile's context to check if it's already 4460 * mapped to a mid level profile. 4461 */ 4462 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 4463 NIX_AQ_CTYPE_BANDPROF, leaf_match); 4464 if (rc) { 4465 dev_err(rvu->dev, 4466 "%s: Failed to fetch context of leaf profile %d\n", 4467 __func__, leaf_match); 4468 return rc; 4469 } 4470 4471 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 4472 if (aq_rsp.prof.hl_en) { 4473 /* Get Mid layer prof index and map leaf_prof index 4474 * also such that flows that are being steered 4475 * to different RQs and marked with same match_id 4476 * are rate limited in a aggregate fashion 4477 */ 4478 mid_prof = aq_rsp.prof.band_prof_id; 4479 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 4480 &aq_req, &aq_rsp, 4481 leaf_prof, mid_prof); 4482 if (rc) { 4483 dev_err(rvu->dev, 4484 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 4485 __func__, leaf_prof, mid_prof); 4486 goto exit; 4487 } 4488 4489 mutex_lock(&rvu->rsrc_lock); 4490 ipolicer->ref_count[mid_prof]++; 4491 mutex_unlock(&rvu->rsrc_lock); 4492 goto exit; 4493 } 4494 4495 /* Allocate a mid layer profile and 4496 * map both 'leaf_prof' and 'leaf_match' profiles to it. 4497 */ 4498 mutex_lock(&rvu->rsrc_lock); 4499 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); 4500 if (mid_prof < 0) { 4501 dev_err(rvu->dev, 4502 "%s: Unable to allocate mid layer profile\n", __func__); 4503 mutex_unlock(&rvu->rsrc_lock); 4504 goto exit; 4505 } 4506 mutex_unlock(&rvu->rsrc_lock); 4507 ipolicer->pfvf_map[mid_prof] = 0x00; 4508 ipolicer->ref_count[mid_prof] = 0; 4509 4510 /* Initialize mid layer profile same as 'leaf_prof' */ 4511 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 4512 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 4513 if (rc) { 4514 dev_err(rvu->dev, 4515 "%s: Failed to fetch context of leaf profile %d\n", 4516 __func__, leaf_prof); 4517 goto exit; 4518 } 4519 4520 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4521 aq_req.hdr.pcifunc = 0x00; 4522 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); 4523 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 4524 aq_req.op = NIX_AQ_INSTOP_WRITE; 4525 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); 4526 /* Clear higher layer enable bit in the mid profile, just in case */ 4527 aq_req.prof.hl_en = 0; 4528 aq_req.prof_mask.hl_en = 1; 4529 4530 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4531 (struct nix_aq_enq_req *)&aq_req, NULL); 4532 if (rc) { 4533 dev_err(rvu->dev, 4534 "%s: Failed to INIT context of mid layer profile %d\n", 4535 __func__, mid_prof); 4536 goto exit; 4537 } 4538 4539 /* Map both leaf profiles to this mid layer profile */ 4540 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 4541 &aq_req, &aq_rsp, 4542 leaf_prof, mid_prof); 4543 if (rc) { 4544 dev_err(rvu->dev, 4545 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 4546 __func__, leaf_prof, mid_prof); 4547 goto exit; 4548 } 4549 4550 mutex_lock(&rvu->rsrc_lock); 4551 ipolicer->ref_count[mid_prof]++; 4552 mutex_unlock(&rvu->rsrc_lock); 4553 4554 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 4555 &aq_req, &aq_rsp, 4556 leaf_match, mid_prof); 4557 if (rc) { 4558 dev_err(rvu->dev, 4559 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 4560 __func__, leaf_match, mid_prof); 4561 ipolicer->ref_count[mid_prof]--; 4562 goto exit; 4563 } 4564 4565 mutex_lock(&rvu->rsrc_lock); 4566 ipolicer->ref_count[mid_prof]++; 4567 mutex_unlock(&rvu->rsrc_lock); 4568 4569 exit: 4570 return rc; 4571 } 4572 4573 /* Called with mutex rsrc_lock */ 4574 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 4575 u32 leaf_prof) 4576 { 4577 struct nix_cn10k_aq_enq_req aq_req; 4578 struct nix_cn10k_aq_enq_rsp aq_rsp; 4579 struct nix_ipolicer *ipolicer; 4580 u16 mid_prof; 4581 int rc; 4582 4583 mutex_unlock(&rvu->rsrc_lock); 4584 4585 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 4586 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 4587 4588 mutex_lock(&rvu->rsrc_lock); 4589 if (rc) { 4590 dev_err(rvu->dev, 4591 "%s: Failed to fetch context of leaf profile %d\n", 4592 __func__, leaf_prof); 4593 return; 4594 } 4595 4596 if (!aq_rsp.prof.hl_en) 4597 return; 4598 4599 mid_prof = aq_rsp.prof.band_prof_id; 4600 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 4601 ipolicer->ref_count[mid_prof]--; 4602 /* If ref_count is zero, free mid layer profile */ 4603 if (!ipolicer->ref_count[mid_prof]) { 4604 ipolicer->pfvf_map[mid_prof] = 0x00; 4605 rvu_free_rsrc(&ipolicer->band_prof, mid_prof); 4606 } 4607 } 4608