1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 14 #include "rvu_struct.h" 15 #include "rvu_reg.h" 16 #include "rvu.h" 17 #include "npc.h" 18 #include "cgx.h" 19 #include "lmac_common.h" 20 21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 23 int type, int chan_id); 24 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 25 int type, bool add); 26 static int nix_setup_ipolicers(struct rvu *rvu, 27 struct nix_hw *nix_hw, int blkaddr); 28 static void nix_ipolicer_freemem(struct nix_hw *nix_hw); 29 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 30 struct nix_hw *nix_hw, u16 pcifunc); 31 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); 32 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 33 u32 leaf_prof); 34 35 enum mc_tbl_sz { 36 MC_TBL_SZ_256, 37 MC_TBL_SZ_512, 38 MC_TBL_SZ_1K, 39 MC_TBL_SZ_2K, 40 MC_TBL_SZ_4K, 41 MC_TBL_SZ_8K, 42 MC_TBL_SZ_16K, 43 MC_TBL_SZ_32K, 44 MC_TBL_SZ_64K, 45 }; 46 47 enum mc_buf_cnt { 48 MC_BUF_CNT_8, 49 MC_BUF_CNT_16, 50 MC_BUF_CNT_32, 51 MC_BUF_CNT_64, 52 MC_BUF_CNT_128, 53 MC_BUF_CNT_256, 54 MC_BUF_CNT_512, 55 MC_BUF_CNT_1024, 56 MC_BUF_CNT_2048, 57 }; 58 59 enum nix_makr_fmt_indexes { 60 NIX_MARK_CFG_IP_DSCP_RED, 61 NIX_MARK_CFG_IP_DSCP_YELLOW, 62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED, 63 NIX_MARK_CFG_IP_ECN_RED, 64 NIX_MARK_CFG_IP_ECN_YELLOW, 65 NIX_MARK_CFG_IP_ECN_YELLOW_RED, 66 NIX_MARK_CFG_VLAN_DEI_RED, 67 NIX_MARK_CFG_VLAN_DEI_YELLOW, 68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, 69 NIX_MARK_CFG_MAX, 70 }; 71 72 /* For now considering MC resources needed for broadcast 73 * pkt replication only. i.e 256 HWVFs + 12 PFs. 74 */ 75 #define MC_TBL_SIZE MC_TBL_SZ_512 76 #define MC_BUF_CNT MC_BUF_CNT_128 77 78 struct mce { 79 struct hlist_node node; 80 u16 pcifunc; 81 }; 82 83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) 84 { 85 int i = 0; 86 87 /*If blkaddr is 0, return the first nix block address*/ 88 if (blkaddr == 0) 89 return rvu->nix_blkaddr[blkaddr]; 90 91 while (i + 1 < MAX_NIX_BLKS) { 92 if (rvu->nix_blkaddr[i] == blkaddr) 93 return rvu->nix_blkaddr[i + 1]; 94 i++; 95 } 96 97 return 0; 98 } 99 100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) 101 { 102 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 103 int blkaddr; 104 105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 106 if (!pfvf->nixlf || blkaddr < 0) 107 return false; 108 return true; 109 } 110 111 int rvu_get_nixlf_count(struct rvu *rvu) 112 { 113 int blkaddr = 0, max = 0; 114 struct rvu_block *block; 115 116 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 117 while (blkaddr) { 118 block = &rvu->hw->block[blkaddr]; 119 max += block->lf.max; 120 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 121 } 122 return max; 123 } 124 125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) 126 { 127 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 128 struct rvu_hwinfo *hw = rvu->hw; 129 int blkaddr; 130 131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 132 if (!pfvf->nixlf || blkaddr < 0) 133 return NIX_AF_ERR_AF_LF_INVALID; 134 135 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 136 if (*nixlf < 0) 137 return NIX_AF_ERR_AF_LF_INVALID; 138 139 if (nix_blkaddr) 140 *nix_blkaddr = blkaddr; 141 142 return 0; 143 } 144 145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, 146 struct nix_hw **nix_hw, int *blkaddr) 147 { 148 struct rvu_pfvf *pfvf; 149 150 pfvf = rvu_get_pfvf(rvu, pcifunc); 151 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 152 if (!pfvf->nixlf || *blkaddr < 0) 153 return NIX_AF_ERR_AF_LF_INVALID; 154 155 *nix_hw = get_nix_hw(rvu->hw, *blkaddr); 156 if (!*nix_hw) 157 return NIX_AF_ERR_INVALID_NIXBLK; 158 return 0; 159 } 160 161 static void nix_mce_list_init(struct nix_mce_list *list, int max) 162 { 163 INIT_HLIST_HEAD(&list->head); 164 list->count = 0; 165 list->max = max; 166 } 167 168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) 169 { 170 int idx; 171 172 if (!mcast) 173 return 0; 174 175 idx = mcast->next_free_mce; 176 mcast->next_free_mce += count; 177 return idx; 178 } 179 180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) 181 { 182 int nix_blkaddr = 0, i = 0; 183 struct rvu *rvu = hw->rvu; 184 185 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 186 while (nix_blkaddr) { 187 if (blkaddr == nix_blkaddr && hw->nix) 188 return &hw->nix[i]; 189 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); 190 i++; 191 } 192 return NULL; 193 } 194 195 static void nix_rx_sync(struct rvu *rvu, int blkaddr) 196 { 197 int err; 198 199 /*Sync all in flight RX packets to LLC/DRAM */ 200 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); 201 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); 202 if (err) 203 dev_err(rvu->dev, "NIX RX software sync failed\n"); 204 } 205 206 static bool is_valid_txschq(struct rvu *rvu, int blkaddr, 207 int lvl, u16 pcifunc, u16 schq) 208 { 209 struct rvu_hwinfo *hw = rvu->hw; 210 struct nix_txsch *txsch; 211 struct nix_hw *nix_hw; 212 u16 map_func; 213 214 nix_hw = get_nix_hw(rvu->hw, blkaddr); 215 if (!nix_hw) 216 return false; 217 218 txsch = &nix_hw->txsch[lvl]; 219 /* Check out of bounds */ 220 if (schq >= txsch->schq.max) 221 return false; 222 223 mutex_lock(&rvu->rsrc_lock); 224 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); 225 mutex_unlock(&rvu->rsrc_lock); 226 227 /* TLs aggegating traffic are shared across PF and VFs */ 228 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 229 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) 230 return false; 231 else 232 return true; 233 } 234 235 if (map_func != pcifunc) 236 return false; 237 238 return true; 239 } 240 241 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) 242 { 243 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 244 struct mac_ops *mac_ops; 245 int pkind, pf, vf, lbkid; 246 u8 cgx_id, lmac_id; 247 int err; 248 249 pf = rvu_get_pf(pcifunc); 250 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 251 return 0; 252 253 switch (type) { 254 case NIX_INTF_TYPE_CGX: 255 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; 256 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 257 258 pkind = rvu_npc_get_pkind(rvu, pf); 259 if (pkind < 0) { 260 dev_err(rvu->dev, 261 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 262 return -EINVAL; 263 } 264 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 265 pfvf->tx_chan_base = pfvf->rx_chan_base; 266 pfvf->rx_chan_cnt = 1; 267 pfvf->tx_chan_cnt = 1; 268 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 269 rvu_npc_set_pkind(rvu, pkind, pfvf); 270 271 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); 272 /* By default we enable pause frames */ 273 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) 274 mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id, 275 rvu), 276 lmac_id, true, true); 277 break; 278 case NIX_INTF_TYPE_LBK: 279 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; 280 281 /* If NIX1 block is present on the silicon then NIXes are 282 * assigned alternatively for lbk interfaces. NIX0 should 283 * send packets on lbk link 1 channels and NIX1 should send 284 * on lbk link 0 channels for the communication between 285 * NIX0 and NIX1. 286 */ 287 lbkid = 0; 288 if (rvu->hw->lbk_links > 1) 289 lbkid = vf & 0x1 ? 0 : 1; 290 291 /* Note that AF's VFs work in pairs and talk over consecutive 292 * loopback channels.Therefore if odd number of AF VFs are 293 * enabled then the last VF remains with no pair. 294 */ 295 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 296 pfvf->tx_chan_base = vf & 0x1 ? 297 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 298 rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 299 pfvf->rx_chan_cnt = 1; 300 pfvf->tx_chan_cnt = 1; 301 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 302 pfvf->rx_chan_base, 303 pfvf->rx_chan_cnt); 304 break; 305 } 306 307 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached 308 * RVU PF/VF's MAC address. 309 */ 310 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 311 pfvf->rx_chan_base, pfvf->mac_addr); 312 313 /* Add this PF_FUNC to bcast pkt replication list */ 314 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); 315 if (err) { 316 dev_err(rvu->dev, 317 "Bcast list, failed to enable PF_FUNC 0x%x\n", 318 pcifunc); 319 return err; 320 } 321 /* Install MCAM rule matching Ethernet broadcast mac address */ 322 rvu_npc_install_bcast_match_entry(rvu, pcifunc, 323 nixlf, pfvf->rx_chan_base); 324 325 pfvf->maxlen = NIC_HW_MIN_FRS; 326 pfvf->minlen = NIC_HW_MIN_FRS; 327 328 return 0; 329 } 330 331 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) 332 { 333 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 334 int err; 335 336 pfvf->maxlen = 0; 337 pfvf->minlen = 0; 338 339 /* Remove this PF_FUNC from bcast pkt replication list */ 340 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); 341 if (err) { 342 dev_err(rvu->dev, 343 "Bcast list, failed to disable PF_FUNC 0x%x\n", 344 pcifunc); 345 } 346 347 /* Free and disable any MCAM entries used by this NIX LF */ 348 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 349 350 /* Disable DMAC filters used */ 351 rvu_cgx_disable_dmac_entries(rvu, pcifunc); 352 } 353 354 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, 355 struct nix_bp_cfg_req *req, 356 struct msg_rsp *rsp) 357 { 358 u16 pcifunc = req->hdr.pcifunc; 359 struct rvu_pfvf *pfvf; 360 int blkaddr, pf, type; 361 u16 chan_base, chan; 362 u64 cfg; 363 364 pf = rvu_get_pf(pcifunc); 365 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 366 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 367 return 0; 368 369 pfvf = rvu_get_pfvf(rvu, pcifunc); 370 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 371 372 chan_base = pfvf->rx_chan_base + req->chan_base; 373 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 374 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 375 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 376 cfg & ~BIT_ULL(16)); 377 } 378 return 0; 379 } 380 381 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 382 int type, int chan_id) 383 { 384 int bpid, blkaddr, lmac_chan_cnt; 385 struct rvu_hwinfo *hw = rvu->hw; 386 u16 cgx_bpid_cnt, lbk_bpid_cnt; 387 struct rvu_pfvf *pfvf; 388 u8 cgx_id, lmac_id; 389 u64 cfg; 390 391 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 392 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 393 lmac_chan_cnt = cfg & 0xFF; 394 395 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; 396 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); 397 398 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 399 400 /* Backpressure IDs range division 401 * CGX channles are mapped to (0 - 191) BPIDs 402 * LBK channles are mapped to (192 - 255) BPIDs 403 * SDP channles are mapped to (256 - 511) BPIDs 404 * 405 * Lmac channles and bpids mapped as follows 406 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) 407 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... 408 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... 409 */ 410 switch (type) { 411 case NIX_INTF_TYPE_CGX: 412 if ((req->chan_base + req->chan_cnt) > 15) 413 return -EINVAL; 414 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); 415 /* Assign bpid based on cgx, lmac and chan id */ 416 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + 417 (lmac_id * lmac_chan_cnt) + req->chan_base; 418 419 if (req->bpid_per_chan) 420 bpid += chan_id; 421 if (bpid > cgx_bpid_cnt) 422 return -EINVAL; 423 break; 424 425 case NIX_INTF_TYPE_LBK: 426 if ((req->chan_base + req->chan_cnt) > 63) 427 return -EINVAL; 428 bpid = cgx_bpid_cnt + req->chan_base; 429 if (req->bpid_per_chan) 430 bpid += chan_id; 431 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) 432 return -EINVAL; 433 break; 434 default: 435 return -EINVAL; 436 } 437 return bpid; 438 } 439 440 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, 441 struct nix_bp_cfg_req *req, 442 struct nix_bp_cfg_rsp *rsp) 443 { 444 int blkaddr, pf, type, chan_id = 0; 445 u16 pcifunc = req->hdr.pcifunc; 446 struct rvu_pfvf *pfvf; 447 u16 chan_base, chan; 448 s16 bpid, bpid_base; 449 u64 cfg; 450 451 pf = rvu_get_pf(pcifunc); 452 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 453 454 /* Enable backpressure only for CGX mapped PFs and LBK interface */ 455 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) 456 return 0; 457 458 pfvf = rvu_get_pfvf(rvu, pcifunc); 459 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 460 461 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); 462 chan_base = pfvf->rx_chan_base + req->chan_base; 463 bpid = bpid_base; 464 465 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { 466 if (bpid < 0) { 467 dev_warn(rvu->dev, "Fail to enable backpressure\n"); 468 return -EINVAL; 469 } 470 471 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); 472 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), 473 cfg | (bpid & 0xFF) | BIT_ULL(16)); 474 chan_id++; 475 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); 476 } 477 478 for (chan = 0; chan < req->chan_cnt; chan++) { 479 /* Map channel and bpid assign to it */ 480 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | 481 (bpid_base & 0x3FF); 482 if (req->bpid_per_chan) 483 bpid_base++; 484 } 485 rsp->chan_cnt = req->chan_cnt; 486 487 return 0; 488 } 489 490 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, 491 u64 format, bool v4, u64 *fidx) 492 { 493 struct nix_lso_format field = {0}; 494 495 /* IP's Length field */ 496 field.layer = NIX_TXLAYER_OL3; 497 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ 498 field.offset = v4 ? 2 : 4; 499 field.sizem1 = 1; /* i.e 2 bytes */ 500 field.alg = NIX_LSOALG_ADD_PAYLEN; 501 rvu_write64(rvu, blkaddr, 502 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 503 *(u64 *)&field); 504 505 /* No ID field in IPv6 header */ 506 if (!v4) 507 return; 508 509 /* IP's ID field */ 510 field.layer = NIX_TXLAYER_OL3; 511 field.offset = 4; 512 field.sizem1 = 1; /* i.e 2 bytes */ 513 field.alg = NIX_LSOALG_ADD_SEGNUM; 514 rvu_write64(rvu, blkaddr, 515 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 516 *(u64 *)&field); 517 } 518 519 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, 520 u64 format, u64 *fidx) 521 { 522 struct nix_lso_format field = {0}; 523 524 /* TCP's sequence number field */ 525 field.layer = NIX_TXLAYER_OL4; 526 field.offset = 4; 527 field.sizem1 = 3; /* i.e 4 bytes */ 528 field.alg = NIX_LSOALG_ADD_OFFSET; 529 rvu_write64(rvu, blkaddr, 530 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 531 *(u64 *)&field); 532 533 /* TCP's flags field */ 534 field.layer = NIX_TXLAYER_OL4; 535 field.offset = 12; 536 field.sizem1 = 1; /* 2 bytes */ 537 field.alg = NIX_LSOALG_TCP_FLAGS; 538 rvu_write64(rvu, blkaddr, 539 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), 540 *(u64 *)&field); 541 } 542 543 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 544 { 545 u64 cfg, idx, fidx = 0; 546 547 /* Get max HW supported format indices */ 548 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; 549 nix_hw->lso.total = cfg; 550 551 /* Enable LSO */ 552 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); 553 /* For TSO, set first and middle segment flags to 554 * mask out PSH, RST & FIN flags in TCP packet 555 */ 556 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); 557 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); 558 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); 559 560 /* Setup default static LSO formats 561 * 562 * Configure format fields for TCPv4 segmentation offload 563 */ 564 idx = NIX_LSO_FORMAT_IDX_TSOV4; 565 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); 566 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 567 568 /* Set rest of the fields to NOP */ 569 for (; fidx < 8; fidx++) { 570 rvu_write64(rvu, blkaddr, 571 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 572 } 573 nix_hw->lso.in_use++; 574 575 /* Configure format fields for TCPv6 segmentation offload */ 576 idx = NIX_LSO_FORMAT_IDX_TSOV6; 577 fidx = 0; 578 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); 579 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); 580 581 /* Set rest of the fields to NOP */ 582 for (; fidx < 8; fidx++) { 583 rvu_write64(rvu, blkaddr, 584 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); 585 } 586 nix_hw->lso.in_use++; 587 } 588 589 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) 590 { 591 kfree(pfvf->rq_bmap); 592 kfree(pfvf->sq_bmap); 593 kfree(pfvf->cq_bmap); 594 if (pfvf->rq_ctx) 595 qmem_free(rvu->dev, pfvf->rq_ctx); 596 if (pfvf->sq_ctx) 597 qmem_free(rvu->dev, pfvf->sq_ctx); 598 if (pfvf->cq_ctx) 599 qmem_free(rvu->dev, pfvf->cq_ctx); 600 if (pfvf->rss_ctx) 601 qmem_free(rvu->dev, pfvf->rss_ctx); 602 if (pfvf->nix_qints_ctx) 603 qmem_free(rvu->dev, pfvf->nix_qints_ctx); 604 if (pfvf->cq_ints_ctx) 605 qmem_free(rvu->dev, pfvf->cq_ints_ctx); 606 607 pfvf->rq_bmap = NULL; 608 pfvf->cq_bmap = NULL; 609 pfvf->sq_bmap = NULL; 610 pfvf->rq_ctx = NULL; 611 pfvf->sq_ctx = NULL; 612 pfvf->cq_ctx = NULL; 613 pfvf->rss_ctx = NULL; 614 pfvf->nix_qints_ctx = NULL; 615 pfvf->cq_ints_ctx = NULL; 616 } 617 618 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, 619 struct rvu_pfvf *pfvf, int nixlf, 620 int rss_sz, int rss_grps, int hwctx_size, 621 u64 way_mask) 622 { 623 int err, grp, num_indices; 624 625 /* RSS is not requested for this NIXLF */ 626 if (!rss_sz) 627 return 0; 628 num_indices = rss_sz * rss_grps; 629 630 /* Alloc NIX RSS HW context memory and config the base */ 631 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); 632 if (err) 633 return err; 634 635 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), 636 (u64)pfvf->rss_ctx->iova); 637 638 /* Config full RSS table size, enable RSS and caching */ 639 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), 640 BIT_ULL(36) | BIT_ULL(4) | 641 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) | 642 way_mask << 20); 643 /* Config RSS group offset and sizes */ 644 for (grp = 0; grp < rss_grps; grp++) 645 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), 646 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); 647 return 0; 648 } 649 650 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, 651 struct nix_aq_inst_s *inst) 652 { 653 struct admin_queue *aq = block->aq; 654 struct nix_aq_res_s *result; 655 int timeout = 1000; 656 u64 reg, head; 657 658 result = (struct nix_aq_res_s *)aq->res->base; 659 660 /* Get current head pointer where to append this instruction */ 661 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); 662 head = (reg >> 4) & AQ_PTR_MASK; 663 664 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), 665 (void *)inst, aq->inst->entry_sz); 666 memset(result, 0, sizeof(*result)); 667 /* sync into memory */ 668 wmb(); 669 670 /* Ring the doorbell and wait for result */ 671 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); 672 while (result->compcode == NIX_AQ_COMP_NOTDONE) { 673 cpu_relax(); 674 udelay(1); 675 timeout--; 676 if (!timeout) 677 return -EBUSY; 678 } 679 680 if (result->compcode != NIX_AQ_COMP_GOOD) 681 /* TODO: Replace this with some error code */ 682 return -EBUSY; 683 684 return 0; 685 } 686 687 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, 688 struct nix_aq_enq_req *req, 689 struct nix_aq_enq_rsp *rsp) 690 { 691 struct rvu_hwinfo *hw = rvu->hw; 692 u16 pcifunc = req->hdr.pcifunc; 693 int nixlf, blkaddr, rc = 0; 694 struct nix_aq_inst_s inst; 695 struct rvu_block *block; 696 struct admin_queue *aq; 697 struct rvu_pfvf *pfvf; 698 void *ctx, *mask; 699 bool ena; 700 u64 cfg; 701 702 blkaddr = nix_hw->blkaddr; 703 block = &hw->block[blkaddr]; 704 aq = block->aq; 705 if (!aq) { 706 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); 707 return NIX_AF_ERR_AQ_ENQUEUE; 708 } 709 710 pfvf = rvu_get_pfvf(rvu, pcifunc); 711 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 712 713 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile 714 * operations done by AF itself. 715 */ 716 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || 717 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { 718 if (!pfvf->nixlf || nixlf < 0) 719 return NIX_AF_ERR_AF_LF_INVALID; 720 } 721 722 switch (req->ctype) { 723 case NIX_AQ_CTYPE_RQ: 724 /* Check if index exceeds max no of queues */ 725 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) 726 rc = NIX_AF_ERR_AQ_ENQUEUE; 727 break; 728 case NIX_AQ_CTYPE_SQ: 729 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) 730 rc = NIX_AF_ERR_AQ_ENQUEUE; 731 break; 732 case NIX_AQ_CTYPE_CQ: 733 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) 734 rc = NIX_AF_ERR_AQ_ENQUEUE; 735 break; 736 case NIX_AQ_CTYPE_RSS: 737 /* Check if RSS is enabled and qidx is within range */ 738 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); 739 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || 740 (req->qidx >= (256UL << (cfg & 0xF)))) 741 rc = NIX_AF_ERR_AQ_ENQUEUE; 742 break; 743 case NIX_AQ_CTYPE_MCE: 744 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); 745 746 /* Check if index exceeds MCE list length */ 747 if (!nix_hw->mcast.mce_ctx || 748 (req->qidx >= (256UL << (cfg & 0xF)))) 749 rc = NIX_AF_ERR_AQ_ENQUEUE; 750 751 /* Adding multicast lists for requests from PF/VFs is not 752 * yet supported, so ignore this. 753 */ 754 if (rsp) 755 rc = NIX_AF_ERR_AQ_ENQUEUE; 756 break; 757 case NIX_AQ_CTYPE_BANDPROF: 758 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, 759 nix_hw, pcifunc)) 760 rc = NIX_AF_ERR_INVALID_BANDPROF; 761 break; 762 default: 763 rc = NIX_AF_ERR_AQ_ENQUEUE; 764 } 765 766 if (rc) 767 return rc; 768 769 /* Check if SQ pointed SMQ belongs to this PF/VF or not */ 770 if (req->ctype == NIX_AQ_CTYPE_SQ && 771 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || 772 (req->op == NIX_AQ_INSTOP_WRITE && 773 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { 774 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, 775 pcifunc, req->sq.smq)) 776 return NIX_AF_ERR_AQ_ENQUEUE; 777 } 778 779 memset(&inst, 0, sizeof(struct nix_aq_inst_s)); 780 inst.lf = nixlf; 781 inst.cindex = req->qidx; 782 inst.ctype = req->ctype; 783 inst.op = req->op; 784 /* Currently we are not supporting enqueuing multiple instructions, 785 * so always choose first entry in result memory. 786 */ 787 inst.res_addr = (u64)aq->res->iova; 788 789 /* Hardware uses same aq->res->base for updating result of 790 * previous instruction hence wait here till it is done. 791 */ 792 spin_lock(&aq->lock); 793 794 /* Clean result + context memory */ 795 memset(aq->res->base, 0, aq->res->entry_sz); 796 /* Context needs to be written at RES_ADDR + 128 */ 797 ctx = aq->res->base + 128; 798 /* Mask needs to be written at RES_ADDR + 256 */ 799 mask = aq->res->base + 256; 800 801 switch (req->op) { 802 case NIX_AQ_INSTOP_WRITE: 803 if (req->ctype == NIX_AQ_CTYPE_RQ) 804 memcpy(mask, &req->rq_mask, 805 sizeof(struct nix_rq_ctx_s)); 806 else if (req->ctype == NIX_AQ_CTYPE_SQ) 807 memcpy(mask, &req->sq_mask, 808 sizeof(struct nix_sq_ctx_s)); 809 else if (req->ctype == NIX_AQ_CTYPE_CQ) 810 memcpy(mask, &req->cq_mask, 811 sizeof(struct nix_cq_ctx_s)); 812 else if (req->ctype == NIX_AQ_CTYPE_RSS) 813 memcpy(mask, &req->rss_mask, 814 sizeof(struct nix_rsse_s)); 815 else if (req->ctype == NIX_AQ_CTYPE_MCE) 816 memcpy(mask, &req->mce_mask, 817 sizeof(struct nix_rx_mce_s)); 818 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 819 memcpy(mask, &req->prof_mask, 820 sizeof(struct nix_bandprof_s)); 821 fallthrough; 822 case NIX_AQ_INSTOP_INIT: 823 if (req->ctype == NIX_AQ_CTYPE_RQ) 824 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 825 else if (req->ctype == NIX_AQ_CTYPE_SQ) 826 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 827 else if (req->ctype == NIX_AQ_CTYPE_CQ) 828 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 829 else if (req->ctype == NIX_AQ_CTYPE_RSS) 830 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 831 else if (req->ctype == NIX_AQ_CTYPE_MCE) 832 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 833 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 834 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); 835 break; 836 case NIX_AQ_INSTOP_NOP: 837 case NIX_AQ_INSTOP_READ: 838 case NIX_AQ_INSTOP_LOCK: 839 case NIX_AQ_INSTOP_UNLOCK: 840 break; 841 default: 842 rc = NIX_AF_ERR_AQ_ENQUEUE; 843 spin_unlock(&aq->lock); 844 return rc; 845 } 846 847 /* Submit the instruction to AQ */ 848 rc = nix_aq_enqueue_wait(rvu, block, &inst); 849 if (rc) { 850 spin_unlock(&aq->lock); 851 return rc; 852 } 853 854 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ 855 if (req->op == NIX_AQ_INSTOP_INIT) { 856 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) 857 __set_bit(req->qidx, pfvf->rq_bmap); 858 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) 859 __set_bit(req->qidx, pfvf->sq_bmap); 860 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) 861 __set_bit(req->qidx, pfvf->cq_bmap); 862 } 863 864 if (req->op == NIX_AQ_INSTOP_WRITE) { 865 if (req->ctype == NIX_AQ_CTYPE_RQ) { 866 ena = (req->rq.ena & req->rq_mask.ena) | 867 (test_bit(req->qidx, pfvf->rq_bmap) & 868 ~req->rq_mask.ena); 869 if (ena) 870 __set_bit(req->qidx, pfvf->rq_bmap); 871 else 872 __clear_bit(req->qidx, pfvf->rq_bmap); 873 } 874 if (req->ctype == NIX_AQ_CTYPE_SQ) { 875 ena = (req->rq.ena & req->sq_mask.ena) | 876 (test_bit(req->qidx, pfvf->sq_bmap) & 877 ~req->sq_mask.ena); 878 if (ena) 879 __set_bit(req->qidx, pfvf->sq_bmap); 880 else 881 __clear_bit(req->qidx, pfvf->sq_bmap); 882 } 883 if (req->ctype == NIX_AQ_CTYPE_CQ) { 884 ena = (req->rq.ena & req->cq_mask.ena) | 885 (test_bit(req->qidx, pfvf->cq_bmap) & 886 ~req->cq_mask.ena); 887 if (ena) 888 __set_bit(req->qidx, pfvf->cq_bmap); 889 else 890 __clear_bit(req->qidx, pfvf->cq_bmap); 891 } 892 } 893 894 if (rsp) { 895 /* Copy read context into mailbox */ 896 if (req->op == NIX_AQ_INSTOP_READ) { 897 if (req->ctype == NIX_AQ_CTYPE_RQ) 898 memcpy(&rsp->rq, ctx, 899 sizeof(struct nix_rq_ctx_s)); 900 else if (req->ctype == NIX_AQ_CTYPE_SQ) 901 memcpy(&rsp->sq, ctx, 902 sizeof(struct nix_sq_ctx_s)); 903 else if (req->ctype == NIX_AQ_CTYPE_CQ) 904 memcpy(&rsp->cq, ctx, 905 sizeof(struct nix_cq_ctx_s)); 906 else if (req->ctype == NIX_AQ_CTYPE_RSS) 907 memcpy(&rsp->rss, ctx, 908 sizeof(struct nix_rsse_s)); 909 else if (req->ctype == NIX_AQ_CTYPE_MCE) 910 memcpy(&rsp->mce, ctx, 911 sizeof(struct nix_rx_mce_s)); 912 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 913 memcpy(&rsp->prof, ctx, 914 sizeof(struct nix_bandprof_s)); 915 } 916 } 917 918 spin_unlock(&aq->lock); 919 return 0; 920 } 921 922 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 923 struct nix_aq_enq_rsp *rsp) 924 { 925 struct nix_hw *nix_hw; 926 int blkaddr; 927 928 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); 929 if (blkaddr < 0) 930 return NIX_AF_ERR_AF_LF_INVALID; 931 932 nix_hw = get_nix_hw(rvu->hw, blkaddr); 933 if (!nix_hw) 934 return -EINVAL; 935 936 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); 937 } 938 939 static const char *nix_get_ctx_name(int ctype) 940 { 941 switch (ctype) { 942 case NIX_AQ_CTYPE_CQ: 943 return "CQ"; 944 case NIX_AQ_CTYPE_SQ: 945 return "SQ"; 946 case NIX_AQ_CTYPE_RQ: 947 return "RQ"; 948 case NIX_AQ_CTYPE_RSS: 949 return "RSS"; 950 } 951 return ""; 952 } 953 954 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) 955 { 956 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 957 struct nix_aq_enq_req aq_req; 958 unsigned long *bmap; 959 int qidx, q_cnt = 0; 960 int err = 0, rc; 961 962 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) 963 return NIX_AF_ERR_AQ_ENQUEUE; 964 965 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); 966 aq_req.hdr.pcifunc = req->hdr.pcifunc; 967 968 if (req->ctype == NIX_AQ_CTYPE_CQ) { 969 aq_req.cq.ena = 0; 970 aq_req.cq_mask.ena = 1; 971 aq_req.cq.bp_ena = 0; 972 aq_req.cq_mask.bp_ena = 1; 973 q_cnt = pfvf->cq_ctx->qsize; 974 bmap = pfvf->cq_bmap; 975 } 976 if (req->ctype == NIX_AQ_CTYPE_SQ) { 977 aq_req.sq.ena = 0; 978 aq_req.sq_mask.ena = 1; 979 q_cnt = pfvf->sq_ctx->qsize; 980 bmap = pfvf->sq_bmap; 981 } 982 if (req->ctype == NIX_AQ_CTYPE_RQ) { 983 aq_req.rq.ena = 0; 984 aq_req.rq_mask.ena = 1; 985 q_cnt = pfvf->rq_ctx->qsize; 986 bmap = pfvf->rq_bmap; 987 } 988 989 aq_req.ctype = req->ctype; 990 aq_req.op = NIX_AQ_INSTOP_WRITE; 991 992 for (qidx = 0; qidx < q_cnt; qidx++) { 993 if (!test_bit(qidx, bmap)) 994 continue; 995 aq_req.qidx = qidx; 996 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); 997 if (rc) { 998 err = rc; 999 dev_err(rvu->dev, "Failed to disable %s:%d context\n", 1000 nix_get_ctx_name(req->ctype), qidx); 1001 } 1002 } 1003 1004 return err; 1005 } 1006 1007 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 1008 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) 1009 { 1010 struct nix_aq_enq_req lock_ctx_req; 1011 int err; 1012 1013 if (req->op != NIX_AQ_INSTOP_INIT) 1014 return 0; 1015 1016 if (req->ctype == NIX_AQ_CTYPE_MCE || 1017 req->ctype == NIX_AQ_CTYPE_DYNO) 1018 return 0; 1019 1020 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); 1021 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; 1022 lock_ctx_req.ctype = req->ctype; 1023 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; 1024 lock_ctx_req.qidx = req->qidx; 1025 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); 1026 if (err) 1027 dev_err(rvu->dev, 1028 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", 1029 req->hdr.pcifunc, 1030 nix_get_ctx_name(req->ctype), req->qidx); 1031 return err; 1032 } 1033 1034 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1035 struct nix_aq_enq_req *req, 1036 struct nix_aq_enq_rsp *rsp) 1037 { 1038 int err; 1039 1040 err = rvu_nix_aq_enq_inst(rvu, req, rsp); 1041 if (!err) 1042 err = nix_lf_hwctx_lockdown(rvu, req); 1043 return err; 1044 } 1045 #else 1046 1047 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, 1048 struct nix_aq_enq_req *req, 1049 struct nix_aq_enq_rsp *rsp) 1050 { 1051 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1052 } 1053 #endif 1054 /* CN10K mbox handler */ 1055 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1056 struct nix_cn10k_aq_enq_req *req, 1057 struct nix_cn10k_aq_enq_rsp *rsp) 1058 { 1059 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1060 (struct nix_aq_enq_rsp *)rsp); 1061 } 1062 1063 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1064 struct hwctx_disable_req *req, 1065 struct msg_rsp *rsp) 1066 { 1067 return nix_lf_hwctx_disable(rvu, req); 1068 } 1069 1070 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, 1071 struct nix_lf_alloc_req *req, 1072 struct nix_lf_alloc_rsp *rsp) 1073 { 1074 int nixlf, qints, hwctx_size, intf, err, rc = 0; 1075 struct rvu_hwinfo *hw = rvu->hw; 1076 u16 pcifunc = req->hdr.pcifunc; 1077 struct rvu_block *block; 1078 struct rvu_pfvf *pfvf; 1079 u64 cfg, ctx_cfg; 1080 int blkaddr; 1081 1082 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) 1083 return NIX_AF_ERR_PARAM; 1084 1085 if (req->way_mask) 1086 req->way_mask &= 0xFFFF; 1087 1088 pfvf = rvu_get_pfvf(rvu, pcifunc); 1089 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1090 if (!pfvf->nixlf || blkaddr < 0) 1091 return NIX_AF_ERR_AF_LF_INVALID; 1092 1093 block = &hw->block[blkaddr]; 1094 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1095 if (nixlf < 0) 1096 return NIX_AF_ERR_AF_LF_INVALID; 1097 1098 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ 1099 if (req->npa_func) { 1100 /* If default, use 'this' NIXLF's PFFUNC */ 1101 if (req->npa_func == RVU_DEFAULT_PF_FUNC) 1102 req->npa_func = pcifunc; 1103 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) 1104 return NIX_AF_INVAL_NPA_PF_FUNC; 1105 } 1106 1107 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ 1108 if (req->sso_func) { 1109 /* If default, use 'this' NIXLF's PFFUNC */ 1110 if (req->sso_func == RVU_DEFAULT_PF_FUNC) 1111 req->sso_func = pcifunc; 1112 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) 1113 return NIX_AF_INVAL_SSO_PF_FUNC; 1114 } 1115 1116 /* If RSS is being enabled, check if requested config is valid. 1117 * RSS table size should be power of two, otherwise 1118 * RSS_GRP::OFFSET + adder might go beyond that group or 1119 * won't be able to use entire table. 1120 */ 1121 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || 1122 !is_power_of_2(req->rss_sz))) 1123 return NIX_AF_ERR_RSS_SIZE_INVALID; 1124 1125 if (req->rss_sz && 1126 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) 1127 return NIX_AF_ERR_RSS_GRPS_INVALID; 1128 1129 /* Reset this NIX LF */ 1130 err = rvu_lf_reset(rvu, block, nixlf); 1131 if (err) { 1132 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1133 block->addr - BLKADDR_NIX0, nixlf); 1134 return NIX_AF_ERR_LF_RESET; 1135 } 1136 1137 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); 1138 1139 /* Alloc NIX RQ HW context memory and config the base */ 1140 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); 1141 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); 1142 if (err) 1143 goto free_mem; 1144 1145 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); 1146 if (!pfvf->rq_bmap) 1147 goto free_mem; 1148 1149 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), 1150 (u64)pfvf->rq_ctx->iova); 1151 1152 /* Set caching and queue count in HW */ 1153 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; 1154 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); 1155 1156 /* Alloc NIX SQ HW context memory and config the base */ 1157 hwctx_size = 1UL << (ctx_cfg & 0xF); 1158 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); 1159 if (err) 1160 goto free_mem; 1161 1162 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); 1163 if (!pfvf->sq_bmap) 1164 goto free_mem; 1165 1166 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), 1167 (u64)pfvf->sq_ctx->iova); 1168 1169 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; 1170 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); 1171 1172 /* Alloc NIX CQ HW context memory and config the base */ 1173 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); 1174 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); 1175 if (err) 1176 goto free_mem; 1177 1178 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); 1179 if (!pfvf->cq_bmap) 1180 goto free_mem; 1181 1182 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), 1183 (u64)pfvf->cq_ctx->iova); 1184 1185 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; 1186 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); 1187 1188 /* Initialize receive side scaling (RSS) */ 1189 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); 1190 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, 1191 req->rss_grps, hwctx_size, req->way_mask); 1192 if (err) 1193 goto free_mem; 1194 1195 /* Alloc memory for CQINT's HW contexts */ 1196 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1197 qints = (cfg >> 24) & 0xFFF; 1198 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); 1199 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); 1200 if (err) 1201 goto free_mem; 1202 1203 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), 1204 (u64)pfvf->cq_ints_ctx->iova); 1205 1206 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), 1207 BIT_ULL(36) | req->way_mask << 20); 1208 1209 /* Alloc memory for QINT's HW contexts */ 1210 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1211 qints = (cfg >> 12) & 0xFFF; 1212 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); 1213 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); 1214 if (err) 1215 goto free_mem; 1216 1217 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), 1218 (u64)pfvf->nix_qints_ctx->iova); 1219 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), 1220 BIT_ULL(36) | req->way_mask << 20); 1221 1222 /* Setup VLANX TPID's. 1223 * Use VLAN1 for 802.1Q 1224 * and VLAN0 for 802.1AD. 1225 */ 1226 cfg = (0x8100ULL << 16) | 0x88A8ULL; 1227 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 1228 1229 /* Enable LMTST for this NIX LF */ 1230 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); 1231 1232 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ 1233 if (req->npa_func) 1234 cfg = req->npa_func; 1235 if (req->sso_func) 1236 cfg |= (u64)req->sso_func << 16; 1237 1238 cfg |= (u64)req->xqe_sz << 33; 1239 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); 1240 1241 /* Config Rx pkt length, csum checks and apad enable / disable */ 1242 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); 1243 1244 /* Configure pkind for TX parse config */ 1245 cfg = NPC_TX_DEF_PKIND; 1246 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); 1247 1248 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; 1249 err = nix_interface_init(rvu, pcifunc, intf, nixlf); 1250 if (err) 1251 goto free_mem; 1252 1253 /* Disable NPC entries as NIXLF's contexts are not initialized yet */ 1254 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1255 1256 /* Configure RX VTAG Type 7 (strip) for vf vlan */ 1257 rvu_write64(rvu, blkaddr, 1258 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), 1259 VTAGSIZE_T4 | VTAG_STRIP); 1260 1261 goto exit; 1262 1263 free_mem: 1264 nix_ctx_free(rvu, pfvf); 1265 rc = -ENOMEM; 1266 1267 exit: 1268 /* Set macaddr of this PF/VF */ 1269 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 1270 1271 /* set SQB size info */ 1272 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); 1273 rsp->sqb_size = (cfg >> 34) & 0xFFFF; 1274 rsp->rx_chan_base = pfvf->rx_chan_base; 1275 rsp->tx_chan_base = pfvf->tx_chan_base; 1276 rsp->rx_chan_cnt = pfvf->rx_chan_cnt; 1277 rsp->tx_chan_cnt = pfvf->tx_chan_cnt; 1278 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; 1279 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; 1280 /* Get HW supported stat count */ 1281 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 1282 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); 1283 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); 1284 /* Get count of CQ IRQs and error IRQs supported per LF */ 1285 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); 1286 rsp->qints = ((cfg >> 12) & 0xFFF); 1287 rsp->cints = ((cfg >> 24) & 0xFFF); 1288 rsp->cgx_links = hw->cgx_links; 1289 rsp->lbk_links = hw->lbk_links; 1290 rsp->sdp_links = hw->sdp_links; 1291 1292 return rc; 1293 } 1294 1295 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, 1296 struct msg_rsp *rsp) 1297 { 1298 struct rvu_hwinfo *hw = rvu->hw; 1299 u16 pcifunc = req->hdr.pcifunc; 1300 struct rvu_block *block; 1301 int blkaddr, nixlf, err; 1302 struct rvu_pfvf *pfvf; 1303 1304 pfvf = rvu_get_pfvf(rvu, pcifunc); 1305 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1306 if (!pfvf->nixlf || blkaddr < 0) 1307 return NIX_AF_ERR_AF_LF_INVALID; 1308 1309 block = &hw->block[blkaddr]; 1310 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 1311 if (nixlf < 0) 1312 return NIX_AF_ERR_AF_LF_INVALID; 1313 1314 if (req->flags & NIX_LF_DISABLE_FLOWS) 1315 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 1316 else 1317 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 1318 1319 /* Free any tx vtag def entries used by this NIX LF */ 1320 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) 1321 nix_free_tx_vtag_entries(rvu, pcifunc); 1322 1323 nix_interface_deinit(rvu, pcifunc, nixlf); 1324 1325 /* Reset this NIX LF */ 1326 err = rvu_lf_reset(rvu, block, nixlf); 1327 if (err) { 1328 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", 1329 block->addr - BLKADDR_NIX0, nixlf); 1330 return NIX_AF_ERR_LF_RESET; 1331 } 1332 1333 nix_ctx_free(rvu, pfvf); 1334 1335 return 0; 1336 } 1337 1338 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, 1339 struct nix_mark_format_cfg *req, 1340 struct nix_mark_format_cfg_rsp *rsp) 1341 { 1342 u16 pcifunc = req->hdr.pcifunc; 1343 struct nix_hw *nix_hw; 1344 struct rvu_pfvf *pfvf; 1345 int blkaddr, rc; 1346 u32 cfg; 1347 1348 pfvf = rvu_get_pfvf(rvu, pcifunc); 1349 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1350 if (!pfvf->nixlf || blkaddr < 0) 1351 return NIX_AF_ERR_AF_LF_INVALID; 1352 1353 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1354 if (!nix_hw) 1355 return -EINVAL; 1356 1357 cfg = (((u32)req->offset & 0x7) << 16) | 1358 (((u32)req->y_mask & 0xF) << 12) | 1359 (((u32)req->y_val & 0xF) << 8) | 1360 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); 1361 1362 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); 1363 if (rc < 0) { 1364 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", 1365 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 1366 return NIX_AF_ERR_MARK_CFG_FAIL; 1367 } 1368 1369 rsp->mark_format_idx = rc; 1370 return 0; 1371 } 1372 1373 /* Disable shaping of pkts by a scheduler queue 1374 * at a given scheduler level. 1375 */ 1376 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, 1377 int lvl, int schq) 1378 { 1379 u64 cir_reg = 0, pir_reg = 0; 1380 u64 cfg; 1381 1382 switch (lvl) { 1383 case NIX_TXSCH_LVL_TL1: 1384 cir_reg = NIX_AF_TL1X_CIR(schq); 1385 pir_reg = 0; /* PIR not available at TL1 */ 1386 break; 1387 case NIX_TXSCH_LVL_TL2: 1388 cir_reg = NIX_AF_TL2X_CIR(schq); 1389 pir_reg = NIX_AF_TL2X_PIR(schq); 1390 break; 1391 case NIX_TXSCH_LVL_TL3: 1392 cir_reg = NIX_AF_TL3X_CIR(schq); 1393 pir_reg = NIX_AF_TL3X_PIR(schq); 1394 break; 1395 case NIX_TXSCH_LVL_TL4: 1396 cir_reg = NIX_AF_TL4X_CIR(schq); 1397 pir_reg = NIX_AF_TL4X_PIR(schq); 1398 break; 1399 } 1400 1401 if (!cir_reg) 1402 return; 1403 cfg = rvu_read64(rvu, blkaddr, cir_reg); 1404 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); 1405 1406 if (!pir_reg) 1407 return; 1408 cfg = rvu_read64(rvu, blkaddr, pir_reg); 1409 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); 1410 } 1411 1412 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, 1413 int lvl, int schq) 1414 { 1415 struct rvu_hwinfo *hw = rvu->hw; 1416 int link; 1417 1418 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1419 return; 1420 1421 /* Reset TL4's SDP link config */ 1422 if (lvl == NIX_TXSCH_LVL_TL4) 1423 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 1424 1425 if (lvl != NIX_TXSCH_LVL_TL2) 1426 return; 1427 1428 /* Reset TL2's CGX or LBK link config */ 1429 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 1430 rvu_write64(rvu, blkaddr, 1431 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 1432 } 1433 1434 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) 1435 { 1436 struct rvu_hwinfo *hw = rvu->hw; 1437 int pf = rvu_get_pf(pcifunc); 1438 u8 cgx_id = 0, lmac_id = 0; 1439 1440 if (is_afvf(pcifunc)) {/* LBK links */ 1441 return hw->cgx_links; 1442 } else if (is_pf_cgxmapped(rvu, pf)) { 1443 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1444 return (cgx_id * hw->lmac_per_cgx) + lmac_id; 1445 } 1446 1447 /* SDP link */ 1448 return hw->cgx_links + hw->lbk_links; 1449 } 1450 1451 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, 1452 int link, int *start, int *end) 1453 { 1454 struct rvu_hwinfo *hw = rvu->hw; 1455 int pf = rvu_get_pf(pcifunc); 1456 1457 if (is_afvf(pcifunc)) { /* LBK links */ 1458 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1459 *end = *start + hw->cap.nix_txsch_per_lbk_lmac; 1460 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ 1461 *start = hw->cap.nix_txsch_per_cgx_lmac * link; 1462 *end = *start + hw->cap.nix_txsch_per_cgx_lmac; 1463 } else { /* SDP link */ 1464 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + 1465 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); 1466 *end = *start + hw->cap.nix_txsch_per_sdp_lmac; 1467 } 1468 } 1469 1470 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, 1471 struct nix_hw *nix_hw, 1472 struct nix_txsch_alloc_req *req) 1473 { 1474 struct rvu_hwinfo *hw = rvu->hw; 1475 int schq, req_schq, free_cnt; 1476 struct nix_txsch *txsch; 1477 int link, start, end; 1478 1479 txsch = &nix_hw->txsch[lvl]; 1480 req_schq = req->schq_contig[lvl] + req->schq[lvl]; 1481 1482 if (!req_schq) 1483 return 0; 1484 1485 link = nix_get_tx_link(rvu, pcifunc); 1486 1487 /* For traffic aggregating scheduler level, one queue is enough */ 1488 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1489 if (req_schq != 1) 1490 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1491 return 0; 1492 } 1493 1494 /* Get free SCHQ count and check if request can be accomodated */ 1495 if (hw->cap.nix_fixed_txschq_mapping) { 1496 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1497 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); 1498 if (end <= txsch->schq.max && schq < end && 1499 !test_bit(schq, txsch->schq.bmap)) 1500 free_cnt = 1; 1501 else 1502 free_cnt = 0; 1503 } else { 1504 free_cnt = rvu_rsrc_free_count(&txsch->schq); 1505 } 1506 1507 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) 1508 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1509 1510 /* If contiguous queues are needed, check for availability */ 1511 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && 1512 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) 1513 return NIX_AF_ERR_TLX_ALLOC_FAIL; 1514 1515 return 0; 1516 } 1517 1518 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, 1519 struct nix_txsch_alloc_rsp *rsp, 1520 int lvl, int start, int end) 1521 { 1522 struct rvu_hwinfo *hw = rvu->hw; 1523 u16 pcifunc = rsp->hdr.pcifunc; 1524 int idx, schq; 1525 1526 /* For traffic aggregating levels, queue alloc is based 1527 * on transmit link to which PF_FUNC is mapped to. 1528 */ 1529 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1530 /* A single TL queue is allocated */ 1531 if (rsp->schq_contig[lvl]) { 1532 rsp->schq_contig[lvl] = 1; 1533 rsp->schq_contig_list[lvl][0] = start; 1534 } 1535 1536 /* Both contig and non-contig reqs doesn't make sense here */ 1537 if (rsp->schq_contig[lvl]) 1538 rsp->schq[lvl] = 0; 1539 1540 if (rsp->schq[lvl]) { 1541 rsp->schq[lvl] = 1; 1542 rsp->schq_list[lvl][0] = start; 1543 } 1544 return; 1545 } 1546 1547 /* Adjust the queue request count if HW supports 1548 * only one queue per level configuration. 1549 */ 1550 if (hw->cap.nix_fixed_txschq_mapping) { 1551 idx = pcifunc & RVU_PFVF_FUNC_MASK; 1552 schq = start + idx; 1553 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { 1554 rsp->schq_contig[lvl] = 0; 1555 rsp->schq[lvl] = 0; 1556 return; 1557 } 1558 1559 if (rsp->schq_contig[lvl]) { 1560 rsp->schq_contig[lvl] = 1; 1561 set_bit(schq, txsch->schq.bmap); 1562 rsp->schq_contig_list[lvl][0] = schq; 1563 rsp->schq[lvl] = 0; 1564 } else if (rsp->schq[lvl]) { 1565 rsp->schq[lvl] = 1; 1566 set_bit(schq, txsch->schq.bmap); 1567 rsp->schq_list[lvl][0] = schq; 1568 } 1569 return; 1570 } 1571 1572 /* Allocate contiguous queue indices requesty first */ 1573 if (rsp->schq_contig[lvl]) { 1574 schq = bitmap_find_next_zero_area(txsch->schq.bmap, 1575 txsch->schq.max, start, 1576 rsp->schq_contig[lvl], 0); 1577 if (schq >= end) 1578 rsp->schq_contig[lvl] = 0; 1579 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { 1580 set_bit(schq, txsch->schq.bmap); 1581 rsp->schq_contig_list[lvl][idx] = schq; 1582 schq++; 1583 } 1584 } 1585 1586 /* Allocate non-contiguous queue indices */ 1587 if (rsp->schq[lvl]) { 1588 idx = 0; 1589 for (schq = start; schq < end; schq++) { 1590 if (!test_bit(schq, txsch->schq.bmap)) { 1591 set_bit(schq, txsch->schq.bmap); 1592 rsp->schq_list[lvl][idx++] = schq; 1593 } 1594 if (idx == rsp->schq[lvl]) 1595 break; 1596 } 1597 /* Update how many were allocated */ 1598 rsp->schq[lvl] = idx; 1599 } 1600 } 1601 1602 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, 1603 struct nix_txsch_alloc_req *req, 1604 struct nix_txsch_alloc_rsp *rsp) 1605 { 1606 struct rvu_hwinfo *hw = rvu->hw; 1607 u16 pcifunc = req->hdr.pcifunc; 1608 int link, blkaddr, rc = 0; 1609 int lvl, idx, start, end; 1610 struct nix_txsch *txsch; 1611 struct rvu_pfvf *pfvf; 1612 struct nix_hw *nix_hw; 1613 u32 *pfvf_map; 1614 u16 schq; 1615 1616 pfvf = rvu_get_pfvf(rvu, pcifunc); 1617 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1618 if (!pfvf->nixlf || blkaddr < 0) 1619 return NIX_AF_ERR_AF_LF_INVALID; 1620 1621 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1622 if (!nix_hw) 1623 return -EINVAL; 1624 1625 mutex_lock(&rvu->rsrc_lock); 1626 1627 /* Check if request is valid as per HW capabilities 1628 * and can be accomodated. 1629 */ 1630 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1631 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); 1632 if (rc) 1633 goto err; 1634 } 1635 1636 /* Allocate requested Tx scheduler queues */ 1637 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1638 txsch = &nix_hw->txsch[lvl]; 1639 pfvf_map = txsch->pfvf_map; 1640 1641 if (!req->schq[lvl] && !req->schq_contig[lvl]) 1642 continue; 1643 1644 rsp->schq[lvl] = req->schq[lvl]; 1645 rsp->schq_contig[lvl] = req->schq_contig[lvl]; 1646 1647 link = nix_get_tx_link(rvu, pcifunc); 1648 1649 if (lvl >= hw->cap.nix_tx_aggr_lvl) { 1650 start = link; 1651 end = link; 1652 } else if (hw->cap.nix_fixed_txschq_mapping) { 1653 nix_get_txschq_range(rvu, pcifunc, link, &start, &end); 1654 } else { 1655 start = 0; 1656 end = txsch->schq.max; 1657 } 1658 1659 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); 1660 1661 /* Reset queue config */ 1662 for (idx = 0; idx < req->schq_contig[lvl]; idx++) { 1663 schq = rsp->schq_contig_list[lvl][idx]; 1664 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1665 NIX_TXSCHQ_CFG_DONE)) 1666 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1667 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1668 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); 1669 } 1670 1671 for (idx = 0; idx < req->schq[lvl]; idx++) { 1672 schq = rsp->schq_list[lvl][idx]; 1673 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & 1674 NIX_TXSCHQ_CFG_DONE)) 1675 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); 1676 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1677 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); 1678 } 1679 } 1680 1681 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; 1682 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; 1683 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, 1684 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? 1685 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; 1686 goto exit; 1687 err: 1688 rc = NIX_AF_ERR_TLX_ALLOC_FAIL; 1689 exit: 1690 mutex_unlock(&rvu->rsrc_lock); 1691 return rc; 1692 } 1693 1694 static void nix_smq_flush(struct rvu *rvu, int blkaddr, 1695 int smq, u16 pcifunc, int nixlf) 1696 { 1697 int pf = rvu_get_pf(pcifunc); 1698 u8 cgx_id = 0, lmac_id = 0; 1699 int err, restore_tx_en = 0; 1700 u64 cfg; 1701 1702 /* enable cgx tx if disabled */ 1703 if (is_pf_cgxmapped(rvu, pf)) { 1704 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1705 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), 1706 lmac_id, true); 1707 } 1708 1709 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); 1710 /* Do SMQ flush and set enqueue xoff */ 1711 cfg |= BIT_ULL(50) | BIT_ULL(49); 1712 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); 1713 1714 /* Disable backpressure from physical link, 1715 * otherwise SMQ flush may stall. 1716 */ 1717 rvu_cgx_enadis_rx_bp(rvu, pf, false); 1718 1719 /* Wait for flush to complete */ 1720 err = rvu_poll_reg(rvu, blkaddr, 1721 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); 1722 if (err) 1723 dev_err(rvu->dev, 1724 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq); 1725 1726 rvu_cgx_enadis_rx_bp(rvu, pf, true); 1727 /* restore cgx tx state */ 1728 if (restore_tx_en) 1729 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); 1730 } 1731 1732 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) 1733 { 1734 int blkaddr, nixlf, lvl, schq, err; 1735 struct rvu_hwinfo *hw = rvu->hw; 1736 struct nix_txsch *txsch; 1737 struct nix_hw *nix_hw; 1738 1739 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1740 if (blkaddr < 0) 1741 return NIX_AF_ERR_AF_LF_INVALID; 1742 1743 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1744 if (!nix_hw) 1745 return -EINVAL; 1746 1747 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1748 if (nixlf < 0) 1749 return NIX_AF_ERR_AF_LF_INVALID; 1750 1751 /* Disable TL2/3 queue links before SMQ flush*/ 1752 mutex_lock(&rvu->rsrc_lock); 1753 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1754 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) 1755 continue; 1756 1757 txsch = &nix_hw->txsch[lvl]; 1758 for (schq = 0; schq < txsch->schq.max; schq++) { 1759 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1760 continue; 1761 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); 1762 } 1763 } 1764 1765 /* Flush SMQs */ 1766 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 1767 for (schq = 0; schq < txsch->schq.max; schq++) { 1768 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1769 continue; 1770 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1771 } 1772 1773 /* Now free scheduler queues to free pool */ 1774 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1775 /* TLs above aggregation level are shared across all PF 1776 * and it's VFs, hence skip freeing them. 1777 */ 1778 if (lvl >= hw->cap.nix_tx_aggr_lvl) 1779 continue; 1780 1781 txsch = &nix_hw->txsch[lvl]; 1782 for (schq = 0; schq < txsch->schq.max; schq++) { 1783 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1784 continue; 1785 rvu_free_rsrc(&txsch->schq, schq); 1786 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 1787 } 1788 } 1789 mutex_unlock(&rvu->rsrc_lock); 1790 1791 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ 1792 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); 1793 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); 1794 if (err) 1795 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); 1796 1797 return 0; 1798 } 1799 1800 static int nix_txschq_free_one(struct rvu *rvu, 1801 struct nix_txsch_free_req *req) 1802 { 1803 struct rvu_hwinfo *hw = rvu->hw; 1804 u16 pcifunc = req->hdr.pcifunc; 1805 int lvl, schq, nixlf, blkaddr; 1806 struct nix_txsch *txsch; 1807 struct nix_hw *nix_hw; 1808 u32 *pfvf_map; 1809 1810 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1811 if (blkaddr < 0) 1812 return NIX_AF_ERR_AF_LF_INVALID; 1813 1814 nix_hw = get_nix_hw(rvu->hw, blkaddr); 1815 if (!nix_hw) 1816 return -EINVAL; 1817 1818 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 1819 if (nixlf < 0) 1820 return NIX_AF_ERR_AF_LF_INVALID; 1821 1822 lvl = req->schq_lvl; 1823 schq = req->schq; 1824 txsch = &nix_hw->txsch[lvl]; 1825 1826 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) 1827 return 0; 1828 1829 pfvf_map = txsch->pfvf_map; 1830 mutex_lock(&rvu->rsrc_lock); 1831 1832 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { 1833 mutex_unlock(&rvu->rsrc_lock); 1834 goto err; 1835 } 1836 1837 /* Flush if it is a SMQ. Onus of disabling 1838 * TL2/3 queue links before SMQ flush is on user 1839 */ 1840 if (lvl == NIX_TXSCH_LVL_SMQ) 1841 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 1842 1843 /* Free the resource */ 1844 rvu_free_rsrc(&txsch->schq, schq); 1845 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 1846 mutex_unlock(&rvu->rsrc_lock); 1847 return 0; 1848 err: 1849 return NIX_AF_ERR_TLX_INVALID; 1850 } 1851 1852 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, 1853 struct nix_txsch_free_req *req, 1854 struct msg_rsp *rsp) 1855 { 1856 if (req->flags & TXSCHQ_FREE_ALL) 1857 return nix_txschq_free(rvu, req->hdr.pcifunc); 1858 else 1859 return nix_txschq_free_one(rvu, req); 1860 } 1861 1862 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, 1863 int lvl, u64 reg, u64 regval) 1864 { 1865 u64 regbase = reg & 0xFFFF; 1866 u16 schq, parent; 1867 1868 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) 1869 return false; 1870 1871 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 1872 /* Check if this schq belongs to this PF/VF or not */ 1873 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) 1874 return false; 1875 1876 parent = (regval >> 16) & 0x1FF; 1877 /* Validate MDQ's TL4 parent */ 1878 if (regbase == NIX_AF_MDQX_PARENT(0) && 1879 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) 1880 return false; 1881 1882 /* Validate TL4's TL3 parent */ 1883 if (regbase == NIX_AF_TL4X_PARENT(0) && 1884 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) 1885 return false; 1886 1887 /* Validate TL3's TL2 parent */ 1888 if (regbase == NIX_AF_TL3X_PARENT(0) && 1889 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) 1890 return false; 1891 1892 /* Validate TL2's TL1 parent */ 1893 if (regbase == NIX_AF_TL2X_PARENT(0) && 1894 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) 1895 return false; 1896 1897 return true; 1898 } 1899 1900 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) 1901 { 1902 u64 regbase; 1903 1904 if (hw->cap.nix_shaping) 1905 return true; 1906 1907 /* If shaping and coloring is not supported, then 1908 * *_CIR and *_PIR registers should not be configured. 1909 */ 1910 regbase = reg & 0xFFFF; 1911 1912 switch (lvl) { 1913 case NIX_TXSCH_LVL_TL1: 1914 if (regbase == NIX_AF_TL1X_CIR(0)) 1915 return false; 1916 break; 1917 case NIX_TXSCH_LVL_TL2: 1918 if (regbase == NIX_AF_TL2X_CIR(0) || 1919 regbase == NIX_AF_TL2X_PIR(0)) 1920 return false; 1921 break; 1922 case NIX_TXSCH_LVL_TL3: 1923 if (regbase == NIX_AF_TL3X_CIR(0) || 1924 regbase == NIX_AF_TL3X_PIR(0)) 1925 return false; 1926 break; 1927 case NIX_TXSCH_LVL_TL4: 1928 if (regbase == NIX_AF_TL4X_CIR(0) || 1929 regbase == NIX_AF_TL4X_PIR(0)) 1930 return false; 1931 break; 1932 } 1933 return true; 1934 } 1935 1936 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, 1937 u16 pcifunc, int blkaddr) 1938 { 1939 u32 *pfvf_map; 1940 int schq; 1941 1942 schq = nix_get_tx_link(rvu, pcifunc); 1943 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; 1944 /* Skip if PF has already done the config */ 1945 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) 1946 return; 1947 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), 1948 (TXSCH_TL1_DFLT_RR_PRIO << 1)); 1949 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), 1950 TXSCH_TL1_DFLT_RR_QTM); 1951 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); 1952 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); 1953 } 1954 1955 static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, 1956 u16 pcifunc, struct nix_txsch *txsch) 1957 { 1958 struct rvu_hwinfo *hw = rvu->hw; 1959 int lbk_link_start, lbk_links; 1960 u8 pf = rvu_get_pf(pcifunc); 1961 int schq; 1962 1963 if (!is_pf_cgxmapped(rvu, pf)) 1964 return; 1965 1966 lbk_link_start = hw->cgx_links; 1967 1968 for (schq = 0; schq < txsch->schq.max; schq++) { 1969 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 1970 continue; 1971 /* Enable all LBK links with channel 63 by default so that 1972 * packets can be sent to LBK with a NPC TX MCAM rule 1973 */ 1974 lbk_links = hw->lbk_links; 1975 while (lbk_links--) 1976 rvu_write64(rvu, blkaddr, 1977 NIX_AF_TL3_TL2X_LINKX_CFG(schq, 1978 lbk_link_start + 1979 lbk_links), 1980 BIT_ULL(12) | RVU_SWITCH_LBK_CHAN); 1981 } 1982 } 1983 1984 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, 1985 struct nix_txschq_config *req, 1986 struct msg_rsp *rsp) 1987 { 1988 struct rvu_hwinfo *hw = rvu->hw; 1989 u16 pcifunc = req->hdr.pcifunc; 1990 u64 reg, regval, schq_regbase; 1991 struct nix_txsch *txsch; 1992 struct nix_hw *nix_hw; 1993 int blkaddr, idx, err; 1994 int nixlf, schq; 1995 u32 *pfvf_map; 1996 1997 if (req->lvl >= NIX_TXSCH_LVL_CNT || 1998 req->num_regs > MAX_REGS_PER_MBOX_MSG) 1999 return NIX_AF_INVAL_TXSCHQ_CFG; 2000 2001 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2002 if (err) 2003 return err; 2004 2005 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2006 if (!nix_hw) 2007 return -EINVAL; 2008 2009 txsch = &nix_hw->txsch[req->lvl]; 2010 pfvf_map = txsch->pfvf_map; 2011 2012 if (req->lvl >= hw->cap.nix_tx_aggr_lvl && 2013 pcifunc & RVU_PFVF_FUNC_MASK) { 2014 mutex_lock(&rvu->rsrc_lock); 2015 if (req->lvl == NIX_TXSCH_LVL_TL1) 2016 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); 2017 mutex_unlock(&rvu->rsrc_lock); 2018 return 0; 2019 } 2020 2021 for (idx = 0; idx < req->num_regs; idx++) { 2022 reg = req->reg[idx]; 2023 regval = req->regval[idx]; 2024 schq_regbase = reg & 0xFFFF; 2025 2026 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, 2027 txsch->lvl, reg, regval)) 2028 return NIX_AF_INVAL_TXSCHQ_CFG; 2029 2030 /* Check if shaping and coloring is supported */ 2031 if (!is_txschq_shaping_valid(hw, req->lvl, reg)) 2032 continue; 2033 2034 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ 2035 if (schq_regbase == NIX_AF_SMQX_CFG(0)) { 2036 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], 2037 pcifunc, 0); 2038 regval &= ~(0x7FULL << 24); 2039 regval |= ((u64)nixlf << 24); 2040 } 2041 2042 /* Clear 'BP_ENA' config, if it's not allowed */ 2043 if (!hw->cap.nix_tx_link_bp) { 2044 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || 2045 (schq_regbase & 0xFF00) == 2046 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) 2047 regval &= ~BIT_ULL(13); 2048 } 2049 2050 /* Mark config as done for TL1 by PF */ 2051 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && 2052 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { 2053 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2054 mutex_lock(&rvu->rsrc_lock); 2055 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], 2056 NIX_TXSCHQ_CFG_DONE); 2057 mutex_unlock(&rvu->rsrc_lock); 2058 } 2059 2060 /* SMQ flush is special hence split register writes such 2061 * that flush first and write rest of the bits later. 2062 */ 2063 if (schq_regbase == NIX_AF_SMQX_CFG(0) && 2064 (regval & BIT_ULL(49))) { 2065 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); 2066 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); 2067 regval &= ~BIT_ULL(49); 2068 } 2069 rvu_write64(rvu, blkaddr, reg, regval); 2070 } 2071 2072 rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc, 2073 &nix_hw->txsch[NIX_TXSCH_LVL_TL2]); 2074 2075 return 0; 2076 } 2077 2078 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, 2079 struct nix_vtag_config *req) 2080 { 2081 u64 regval = req->vtag_size; 2082 2083 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || 2084 req->vtag_size > VTAGSIZE_T8) 2085 return -EINVAL; 2086 2087 /* RX VTAG Type 7 reserved for vf vlan */ 2088 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) 2089 return NIX_AF_ERR_RX_VTAG_INUSE; 2090 2091 if (req->rx.capture_vtag) 2092 regval |= BIT_ULL(5); 2093 if (req->rx.strip_vtag) 2094 regval |= BIT_ULL(4); 2095 2096 rvu_write64(rvu, blkaddr, 2097 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); 2098 return 0; 2099 } 2100 2101 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, 2102 u16 pcifunc, int index) 2103 { 2104 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2105 struct nix_txvlan *vlan = &nix_hw->txvlan; 2106 2107 if (vlan->entry2pfvf_map[index] != pcifunc) 2108 return NIX_AF_ERR_PARAM; 2109 2110 rvu_write64(rvu, blkaddr, 2111 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); 2112 rvu_write64(rvu, blkaddr, 2113 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); 2114 2115 vlan->entry2pfvf_map[index] = 0; 2116 rvu_free_rsrc(&vlan->rsrc, index); 2117 2118 return 0; 2119 } 2120 2121 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) 2122 { 2123 struct nix_txvlan *vlan; 2124 struct nix_hw *nix_hw; 2125 int index, blkaddr; 2126 2127 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2128 if (blkaddr < 0) 2129 return; 2130 2131 nix_hw = get_nix_hw(rvu->hw, blkaddr); 2132 vlan = &nix_hw->txvlan; 2133 2134 mutex_lock(&vlan->rsrc_lock); 2135 /* Scan all the entries and free the ones mapped to 'pcifunc' */ 2136 for (index = 0; index < vlan->rsrc.max; index++) { 2137 if (vlan->entry2pfvf_map[index] == pcifunc) 2138 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); 2139 } 2140 mutex_unlock(&vlan->rsrc_lock); 2141 } 2142 2143 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, 2144 u64 vtag, u8 size) 2145 { 2146 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2147 struct nix_txvlan *vlan = &nix_hw->txvlan; 2148 u64 regval; 2149 int index; 2150 2151 mutex_lock(&vlan->rsrc_lock); 2152 2153 index = rvu_alloc_rsrc(&vlan->rsrc); 2154 if (index < 0) { 2155 mutex_unlock(&vlan->rsrc_lock); 2156 return index; 2157 } 2158 2159 mutex_unlock(&vlan->rsrc_lock); 2160 2161 regval = size ? vtag : vtag << 32; 2162 2163 rvu_write64(rvu, blkaddr, 2164 NIX_AF_TX_VTAG_DEFX_DATA(index), regval); 2165 rvu_write64(rvu, blkaddr, 2166 NIX_AF_TX_VTAG_DEFX_CTL(index), size); 2167 2168 return index; 2169 } 2170 2171 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, 2172 struct nix_vtag_config *req) 2173 { 2174 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2175 struct nix_txvlan *vlan = &nix_hw->txvlan; 2176 u16 pcifunc = req->hdr.pcifunc; 2177 int idx0 = req->tx.vtag0_idx; 2178 int idx1 = req->tx.vtag1_idx; 2179 int err = 0; 2180 2181 if (req->tx.free_vtag0 && req->tx.free_vtag1) 2182 if (vlan->entry2pfvf_map[idx0] != pcifunc || 2183 vlan->entry2pfvf_map[idx1] != pcifunc) 2184 return NIX_AF_ERR_PARAM; 2185 2186 mutex_lock(&vlan->rsrc_lock); 2187 2188 if (req->tx.free_vtag0) { 2189 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); 2190 if (err) 2191 goto exit; 2192 } 2193 2194 if (req->tx.free_vtag1) 2195 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); 2196 2197 exit: 2198 mutex_unlock(&vlan->rsrc_lock); 2199 return err; 2200 } 2201 2202 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, 2203 struct nix_vtag_config *req, 2204 struct nix_vtag_config_rsp *rsp) 2205 { 2206 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); 2207 struct nix_txvlan *vlan = &nix_hw->txvlan; 2208 u16 pcifunc = req->hdr.pcifunc; 2209 2210 if (req->tx.cfg_vtag0) { 2211 rsp->vtag0_idx = 2212 nix_tx_vtag_alloc(rvu, blkaddr, 2213 req->tx.vtag0, req->vtag_size); 2214 2215 if (rsp->vtag0_idx < 0) 2216 return NIX_AF_ERR_TX_VTAG_NOSPC; 2217 2218 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; 2219 } 2220 2221 if (req->tx.cfg_vtag1) { 2222 rsp->vtag1_idx = 2223 nix_tx_vtag_alloc(rvu, blkaddr, 2224 req->tx.vtag1, req->vtag_size); 2225 2226 if (rsp->vtag1_idx < 0) 2227 goto err_free; 2228 2229 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; 2230 } 2231 2232 return 0; 2233 2234 err_free: 2235 if (req->tx.cfg_vtag0) 2236 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); 2237 2238 return NIX_AF_ERR_TX_VTAG_NOSPC; 2239 } 2240 2241 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, 2242 struct nix_vtag_config *req, 2243 struct nix_vtag_config_rsp *rsp) 2244 { 2245 u16 pcifunc = req->hdr.pcifunc; 2246 int blkaddr, nixlf, err; 2247 2248 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2249 if (err) 2250 return err; 2251 2252 if (req->cfg_type) { 2253 /* rx vtag configuration */ 2254 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); 2255 if (err) 2256 return NIX_AF_ERR_PARAM; 2257 } else { 2258 /* tx vtag configuration */ 2259 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && 2260 (req->tx.free_vtag0 || req->tx.free_vtag1)) 2261 return NIX_AF_ERR_PARAM; 2262 2263 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) 2264 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); 2265 2266 if (req->tx.free_vtag0 || req->tx.free_vtag1) 2267 return nix_tx_vtag_decfg(rvu, blkaddr, req); 2268 } 2269 2270 return 0; 2271 } 2272 2273 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, 2274 int mce, u8 op, u16 pcifunc, int next, bool eol) 2275 { 2276 struct nix_aq_enq_req aq_req; 2277 int err; 2278 2279 aq_req.hdr.pcifunc = 0; 2280 aq_req.ctype = NIX_AQ_CTYPE_MCE; 2281 aq_req.op = op; 2282 aq_req.qidx = mce; 2283 2284 /* Use RSS with RSS index 0 */ 2285 aq_req.mce.op = 1; 2286 aq_req.mce.index = 0; 2287 aq_req.mce.eol = eol; 2288 aq_req.mce.pf_func = pcifunc; 2289 aq_req.mce.next = next; 2290 2291 /* All fields valid */ 2292 *(u64 *)(&aq_req.mce_mask) = ~0ULL; 2293 2294 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); 2295 if (err) { 2296 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", 2297 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); 2298 return err; 2299 } 2300 return 0; 2301 } 2302 2303 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, 2304 u16 pcifunc, bool add) 2305 { 2306 struct mce *mce, *tail = NULL; 2307 bool delete = false; 2308 2309 /* Scan through the current list */ 2310 hlist_for_each_entry(mce, &mce_list->head, node) { 2311 /* If already exists, then delete */ 2312 if (mce->pcifunc == pcifunc && !add) { 2313 delete = true; 2314 break; 2315 } else if (mce->pcifunc == pcifunc && add) { 2316 /* entry already exists */ 2317 return 0; 2318 } 2319 tail = mce; 2320 } 2321 2322 if (delete) { 2323 hlist_del(&mce->node); 2324 kfree(mce); 2325 mce_list->count--; 2326 return 0; 2327 } 2328 2329 if (!add) 2330 return 0; 2331 2332 /* Add a new one to the list, at the tail */ 2333 mce = kzalloc(sizeof(*mce), GFP_KERNEL); 2334 if (!mce) 2335 return -ENOMEM; 2336 mce->pcifunc = pcifunc; 2337 if (!tail) 2338 hlist_add_head(&mce->node, &mce_list->head); 2339 else 2340 hlist_add_behind(&mce->node, &tail->node); 2341 mce_list->count++; 2342 return 0; 2343 } 2344 2345 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, 2346 struct nix_mce_list *mce_list, 2347 int mce_idx, int mcam_index, bool add) 2348 { 2349 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; 2350 struct npc_mcam *mcam = &rvu->hw->mcam; 2351 struct nix_mcast *mcast; 2352 struct nix_hw *nix_hw; 2353 struct mce *mce; 2354 2355 if (!mce_list) 2356 return -EINVAL; 2357 2358 /* Get this PF/VF func's MCE index */ 2359 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); 2360 2361 if (idx > (mce_idx + mce_list->max)) { 2362 dev_err(rvu->dev, 2363 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", 2364 __func__, idx, mce_list->max, 2365 pcifunc >> RVU_PFVF_PF_SHIFT); 2366 return -EINVAL; 2367 } 2368 2369 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 2370 if (err) 2371 return err; 2372 2373 mcast = &nix_hw->mcast; 2374 mutex_lock(&mcast->mce_lock); 2375 2376 err = nix_update_mce_list_entry(mce_list, pcifunc, add); 2377 if (err) 2378 goto end; 2379 2380 /* Disable MCAM entry in NPC */ 2381 if (!mce_list->count) { 2382 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2383 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); 2384 goto end; 2385 } 2386 2387 /* Dump the updated list to HW */ 2388 idx = mce_idx; 2389 last_idx = idx + mce_list->count - 1; 2390 hlist_for_each_entry(mce, &mce_list->head, node) { 2391 if (idx > last_idx) 2392 break; 2393 2394 next_idx = idx + 1; 2395 /* EOL should be set in last MCE */ 2396 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, 2397 mce->pcifunc, next_idx, 2398 (next_idx > last_idx) ? true : false); 2399 if (err) 2400 goto end; 2401 idx++; 2402 } 2403 2404 end: 2405 mutex_unlock(&mcast->mce_lock); 2406 return err; 2407 } 2408 2409 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, 2410 struct nix_mce_list **mce_list, int *mce_idx) 2411 { 2412 struct rvu_hwinfo *hw = rvu->hw; 2413 struct rvu_pfvf *pfvf; 2414 2415 if (!hw->cap.nix_rx_multicast || 2416 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { 2417 *mce_list = NULL; 2418 *mce_idx = 0; 2419 return; 2420 } 2421 2422 /* Get this PF/VF func's MCE index */ 2423 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); 2424 2425 if (type == NIXLF_BCAST_ENTRY) { 2426 *mce_list = &pfvf->bcast_mce_list; 2427 *mce_idx = pfvf->bcast_mce_idx; 2428 } else if (type == NIXLF_ALLMULTI_ENTRY) { 2429 *mce_list = &pfvf->mcast_mce_list; 2430 *mce_idx = pfvf->mcast_mce_idx; 2431 } else if (type == NIXLF_PROMISC_ENTRY) { 2432 *mce_list = &pfvf->promisc_mce_list; 2433 *mce_idx = pfvf->promisc_mce_idx; 2434 } else { 2435 *mce_list = NULL; 2436 *mce_idx = 0; 2437 } 2438 } 2439 2440 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 2441 int type, bool add) 2442 { 2443 int err = 0, nixlf, blkaddr, mcam_index, mce_idx; 2444 struct npc_mcam *mcam = &rvu->hw->mcam; 2445 struct rvu_hwinfo *hw = rvu->hw; 2446 struct nix_mce_list *mce_list; 2447 2448 /* skip multicast pkt replication for AF's VFs */ 2449 if (is_afvf(pcifunc)) 2450 return 0; 2451 2452 if (!hw->cap.nix_rx_multicast) 2453 return 0; 2454 2455 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2456 if (blkaddr < 0) 2457 return -EINVAL; 2458 2459 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); 2460 if (nixlf < 0) 2461 return -EINVAL; 2462 2463 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); 2464 2465 mcam_index = npc_get_nixlf_mcam_index(mcam, 2466 pcifunc & ~RVU_PFVF_FUNC_MASK, 2467 nixlf, type); 2468 err = nix_update_mce_list(rvu, pcifunc, mce_list, 2469 mce_idx, mcam_index, add); 2470 return err; 2471 } 2472 2473 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) 2474 { 2475 struct nix_mcast *mcast = &nix_hw->mcast; 2476 int err, pf, numvfs, idx; 2477 struct rvu_pfvf *pfvf; 2478 u16 pcifunc; 2479 u64 cfg; 2480 2481 /* Skip PF0 (i.e AF) */ 2482 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { 2483 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); 2484 /* If PF is not enabled, nothing to do */ 2485 if (!((cfg >> 20) & 0x01)) 2486 continue; 2487 /* Get numVFs attached to this PF */ 2488 numvfs = (cfg >> 12) & 0xFF; 2489 2490 pfvf = &rvu->pf[pf]; 2491 2492 /* This NIX0/1 block mapped to PF ? */ 2493 if (pfvf->nix_blkaddr != nix_hw->blkaddr) 2494 continue; 2495 2496 /* save start idx of broadcast mce list */ 2497 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2498 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); 2499 2500 /* save start idx of multicast mce list */ 2501 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2502 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); 2503 2504 /* save the start idx of promisc mce list */ 2505 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); 2506 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); 2507 2508 for (idx = 0; idx < (numvfs + 1); idx++) { 2509 /* idx-0 is for PF, followed by VFs */ 2510 pcifunc = (pf << RVU_PFVF_PF_SHIFT); 2511 pcifunc |= idx; 2512 /* Add dummy entries now, so that we don't have to check 2513 * for whether AQ_OP should be INIT/WRITE later on. 2514 * Will be updated when a NIXLF is attached/detached to 2515 * these PF/VFs. 2516 */ 2517 err = nix_blk_setup_mce(rvu, nix_hw, 2518 pfvf->bcast_mce_idx + idx, 2519 NIX_AQ_INSTOP_INIT, 2520 pcifunc, 0, true); 2521 if (err) 2522 return err; 2523 2524 /* add dummy entries to multicast mce list */ 2525 err = nix_blk_setup_mce(rvu, nix_hw, 2526 pfvf->mcast_mce_idx + idx, 2527 NIX_AQ_INSTOP_INIT, 2528 pcifunc, 0, true); 2529 if (err) 2530 return err; 2531 2532 /* add dummy entries to promisc mce list */ 2533 err = nix_blk_setup_mce(rvu, nix_hw, 2534 pfvf->promisc_mce_idx + idx, 2535 NIX_AQ_INSTOP_INIT, 2536 pcifunc, 0, true); 2537 if (err) 2538 return err; 2539 } 2540 } 2541 return 0; 2542 } 2543 2544 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2545 { 2546 struct nix_mcast *mcast = &nix_hw->mcast; 2547 struct rvu_hwinfo *hw = rvu->hw; 2548 int err, size; 2549 2550 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; 2551 size = (1ULL << size); 2552 2553 /* Alloc memory for multicast/mirror replication entries */ 2554 err = qmem_alloc(rvu->dev, &mcast->mce_ctx, 2555 (256UL << MC_TBL_SIZE), size); 2556 if (err) 2557 return -ENOMEM; 2558 2559 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, 2560 (u64)mcast->mce_ctx->iova); 2561 2562 /* Set max list length equal to max no of VFs per PF + PF itself */ 2563 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, 2564 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); 2565 2566 /* Alloc memory for multicast replication buffers */ 2567 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; 2568 err = qmem_alloc(rvu->dev, &mcast->mcast_buf, 2569 (8UL << MC_BUF_CNT), size); 2570 if (err) 2571 return -ENOMEM; 2572 2573 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, 2574 (u64)mcast->mcast_buf->iova); 2575 2576 /* Alloc pkind for NIX internal RX multicast/mirror replay */ 2577 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); 2578 2579 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, 2580 BIT_ULL(63) | (mcast->replay_pkind << 24) | 2581 BIT_ULL(20) | MC_BUF_CNT); 2582 2583 mutex_init(&mcast->mce_lock); 2584 2585 return nix_setup_mce_tables(rvu, nix_hw); 2586 } 2587 2588 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) 2589 { 2590 struct nix_txvlan *vlan = &nix_hw->txvlan; 2591 int err; 2592 2593 /* Allocate resource bimap for tx vtag def registers*/ 2594 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; 2595 err = rvu_alloc_bitmap(&vlan->rsrc); 2596 if (err) 2597 return -ENOMEM; 2598 2599 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ 2600 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, 2601 sizeof(u16), GFP_KERNEL); 2602 if (!vlan->entry2pfvf_map) 2603 goto free_mem; 2604 2605 mutex_init(&vlan->rsrc_lock); 2606 return 0; 2607 2608 free_mem: 2609 kfree(vlan->rsrc.bmap); 2610 return -ENOMEM; 2611 } 2612 2613 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 2614 { 2615 struct nix_txsch *txsch; 2616 int err, lvl, schq; 2617 u64 cfg, reg; 2618 2619 /* Get scheduler queue count of each type and alloc 2620 * bitmap for each for alloc/free/attach operations. 2621 */ 2622 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 2623 txsch = &nix_hw->txsch[lvl]; 2624 txsch->lvl = lvl; 2625 switch (lvl) { 2626 case NIX_TXSCH_LVL_SMQ: 2627 reg = NIX_AF_MDQ_CONST; 2628 break; 2629 case NIX_TXSCH_LVL_TL4: 2630 reg = NIX_AF_TL4_CONST; 2631 break; 2632 case NIX_TXSCH_LVL_TL3: 2633 reg = NIX_AF_TL3_CONST; 2634 break; 2635 case NIX_TXSCH_LVL_TL2: 2636 reg = NIX_AF_TL2_CONST; 2637 break; 2638 case NIX_TXSCH_LVL_TL1: 2639 reg = NIX_AF_TL1_CONST; 2640 break; 2641 } 2642 cfg = rvu_read64(rvu, blkaddr, reg); 2643 txsch->schq.max = cfg & 0xFFFF; 2644 err = rvu_alloc_bitmap(&txsch->schq); 2645 if (err) 2646 return err; 2647 2648 /* Allocate memory for scheduler queues to 2649 * PF/VF pcifunc mapping info. 2650 */ 2651 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, 2652 sizeof(u32), GFP_KERNEL); 2653 if (!txsch->pfvf_map) 2654 return -ENOMEM; 2655 for (schq = 0; schq < txsch->schq.max; schq++) 2656 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); 2657 } 2658 return 0; 2659 } 2660 2661 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, 2662 int blkaddr, u32 cfg) 2663 { 2664 int fmt_idx; 2665 2666 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { 2667 if (nix_hw->mark_format.cfg[fmt_idx] == cfg) 2668 return fmt_idx; 2669 } 2670 if (fmt_idx >= nix_hw->mark_format.total) 2671 return -ERANGE; 2672 2673 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); 2674 nix_hw->mark_format.cfg[fmt_idx] = cfg; 2675 nix_hw->mark_format.in_use++; 2676 return fmt_idx; 2677 } 2678 2679 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, 2680 int blkaddr) 2681 { 2682 u64 cfgs[] = { 2683 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, 2684 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, 2685 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, 2686 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, 2687 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, 2688 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, 2689 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, 2690 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, 2691 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, 2692 }; 2693 int i, rc; 2694 u64 total; 2695 2696 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; 2697 nix_hw->mark_format.total = (u8)total; 2698 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), 2699 GFP_KERNEL); 2700 if (!nix_hw->mark_format.cfg) 2701 return -ENOMEM; 2702 for (i = 0; i < NIX_MARK_CFG_MAX; i++) { 2703 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); 2704 if (rc < 0) 2705 dev_err(rvu->dev, "Err %d in setup mark format %d\n", 2706 i, rc); 2707 } 2708 2709 return 0; 2710 } 2711 2712 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 2713 { 2714 /* CN10K supports LBK FIFO size 72 KB */ 2715 if (rvu->hw->lbk_bufsize == 0x12000) 2716 *max_mtu = CN10K_LBK_LINK_MAX_FRS; 2717 else 2718 *max_mtu = NIC_HW_MAX_FRS; 2719 } 2720 2721 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 2722 { 2723 /* RPM supports FIFO len 128 KB */ 2724 if (rvu_cgx_get_fifolen(rvu) == 0x20000) 2725 *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 2726 else 2727 *max_mtu = NIC_HW_MAX_FRS; 2728 } 2729 2730 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 2731 struct nix_hw_info *rsp) 2732 { 2733 u16 pcifunc = req->hdr.pcifunc; 2734 int blkaddr; 2735 2736 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2737 if (blkaddr < 0) 2738 return NIX_AF_ERR_AF_LF_INVALID; 2739 2740 if (is_afvf(pcifunc)) 2741 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 2742 else 2743 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 2744 2745 rsp->min_mtu = NIC_HW_MIN_FRS; 2746 return 0; 2747 } 2748 2749 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 2750 struct msg_rsp *rsp) 2751 { 2752 u16 pcifunc = req->hdr.pcifunc; 2753 int i, nixlf, blkaddr, err; 2754 u64 stats; 2755 2756 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 2757 if (err) 2758 return err; 2759 2760 /* Get stats count supported by HW */ 2761 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); 2762 2763 /* Reset tx stats */ 2764 for (i = 0; i < ((stats >> 24) & 0xFF); i++) 2765 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); 2766 2767 /* Reset rx stats */ 2768 for (i = 0; i < ((stats >> 32) & 0xFF); i++) 2769 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); 2770 2771 return 0; 2772 } 2773 2774 /* Returns the ALG index to be set into NPC_RX_ACTION */ 2775 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) 2776 { 2777 int i; 2778 2779 /* Scan over exiting algo entries to find a match */ 2780 for (i = 0; i < nix_hw->flowkey.in_use; i++) 2781 if (nix_hw->flowkey.flowkey[i] == flow_cfg) 2782 return i; 2783 2784 return -ERANGE; 2785 } 2786 2787 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) 2788 { 2789 int idx, nr_field, key_off, field_marker, keyoff_marker; 2790 int max_key_off, max_bit_pos, group_member; 2791 struct nix_rx_flowkey_alg *field; 2792 struct nix_rx_flowkey_alg tmp; 2793 u32 key_type, valid_key; 2794 int l4_key_offset = 0; 2795 2796 if (!alg) 2797 return -EINVAL; 2798 2799 #define FIELDS_PER_ALG 5 2800 #define MAX_KEY_OFF 40 2801 /* Clear all fields */ 2802 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); 2803 2804 /* Each of the 32 possible flow key algorithm definitions should 2805 * fall into above incremental config (except ALG0). Otherwise a 2806 * single NPC MCAM entry is not sufficient for supporting RSS. 2807 * 2808 * If a different definition or combination needed then NPC MCAM 2809 * has to be programmed to filter such pkts and it's action should 2810 * point to this definition to calculate flowtag or hash. 2811 * 2812 * The `for loop` goes over _all_ protocol field and the following 2813 * variables depicts the state machine forward progress logic. 2814 * 2815 * keyoff_marker - Enabled when hash byte length needs to be accounted 2816 * in field->key_offset update. 2817 * field_marker - Enabled when a new field needs to be selected. 2818 * group_member - Enabled when protocol is part of a group. 2819 */ 2820 2821 keyoff_marker = 0; max_key_off = 0; group_member = 0; 2822 nr_field = 0; key_off = 0; field_marker = 1; 2823 field = &tmp; max_bit_pos = fls(flow_cfg); 2824 for (idx = 0; 2825 idx < max_bit_pos && nr_field < FIELDS_PER_ALG && 2826 key_off < MAX_KEY_OFF; idx++) { 2827 key_type = BIT(idx); 2828 valid_key = flow_cfg & key_type; 2829 /* Found a field marker, reset the field values */ 2830 if (field_marker) 2831 memset(&tmp, 0, sizeof(tmp)); 2832 2833 field_marker = true; 2834 keyoff_marker = true; 2835 switch (key_type) { 2836 case NIX_FLOW_KEY_TYPE_PORT: 2837 field->sel_chan = true; 2838 /* This should be set to 1, when SEL_CHAN is set */ 2839 field->bytesm1 = 1; 2840 break; 2841 case NIX_FLOW_KEY_TYPE_IPV4_PROTO: 2842 field->lid = NPC_LID_LC; 2843 field->hdr_offset = 9; /* offset */ 2844 field->bytesm1 = 0; /* 1 byte */ 2845 field->ltype_match = NPC_LT_LC_IP; 2846 field->ltype_mask = 0xF; 2847 break; 2848 case NIX_FLOW_KEY_TYPE_IPV4: 2849 case NIX_FLOW_KEY_TYPE_INNR_IPV4: 2850 field->lid = NPC_LID_LC; 2851 field->ltype_match = NPC_LT_LC_IP; 2852 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { 2853 field->lid = NPC_LID_LG; 2854 field->ltype_match = NPC_LT_LG_TU_IP; 2855 } 2856 field->hdr_offset = 12; /* SIP offset */ 2857 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ 2858 field->ltype_mask = 0xF; /* Match only IPv4 */ 2859 keyoff_marker = false; 2860 break; 2861 case NIX_FLOW_KEY_TYPE_IPV6: 2862 case NIX_FLOW_KEY_TYPE_INNR_IPV6: 2863 field->lid = NPC_LID_LC; 2864 field->ltype_match = NPC_LT_LC_IP6; 2865 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { 2866 field->lid = NPC_LID_LG; 2867 field->ltype_match = NPC_LT_LG_TU_IP6; 2868 } 2869 field->hdr_offset = 8; /* SIP offset */ 2870 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ 2871 field->ltype_mask = 0xF; /* Match only IPv6 */ 2872 break; 2873 case NIX_FLOW_KEY_TYPE_TCP: 2874 case NIX_FLOW_KEY_TYPE_UDP: 2875 case NIX_FLOW_KEY_TYPE_SCTP: 2876 case NIX_FLOW_KEY_TYPE_INNR_TCP: 2877 case NIX_FLOW_KEY_TYPE_INNR_UDP: 2878 case NIX_FLOW_KEY_TYPE_INNR_SCTP: 2879 field->lid = NPC_LID_LD; 2880 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || 2881 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || 2882 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) 2883 field->lid = NPC_LID_LH; 2884 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ 2885 2886 /* Enum values for NPC_LID_LD and NPC_LID_LG are same, 2887 * so no need to change the ltype_match, just change 2888 * the lid for inner protocols 2889 */ 2890 BUILD_BUG_ON((int)NPC_LT_LD_TCP != 2891 (int)NPC_LT_LH_TU_TCP); 2892 BUILD_BUG_ON((int)NPC_LT_LD_UDP != 2893 (int)NPC_LT_LH_TU_UDP); 2894 BUILD_BUG_ON((int)NPC_LT_LD_SCTP != 2895 (int)NPC_LT_LH_TU_SCTP); 2896 2897 if ((key_type == NIX_FLOW_KEY_TYPE_TCP || 2898 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && 2899 valid_key) { 2900 field->ltype_match |= NPC_LT_LD_TCP; 2901 group_member = true; 2902 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || 2903 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && 2904 valid_key) { 2905 field->ltype_match |= NPC_LT_LD_UDP; 2906 group_member = true; 2907 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || 2908 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && 2909 valid_key) { 2910 field->ltype_match |= NPC_LT_LD_SCTP; 2911 group_member = true; 2912 } 2913 field->ltype_mask = ~field->ltype_match; 2914 if (key_type == NIX_FLOW_KEY_TYPE_SCTP || 2915 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { 2916 /* Handle the case where any of the group item 2917 * is enabled in the group but not the final one 2918 */ 2919 if (group_member) { 2920 valid_key = true; 2921 group_member = false; 2922 } 2923 } else { 2924 field_marker = false; 2925 keyoff_marker = false; 2926 } 2927 2928 /* TCP/UDP/SCTP and ESP/AH falls at same offset so 2929 * remember the TCP key offset of 40 byte hash key. 2930 */ 2931 if (key_type == NIX_FLOW_KEY_TYPE_TCP) 2932 l4_key_offset = key_off; 2933 break; 2934 case NIX_FLOW_KEY_TYPE_NVGRE: 2935 field->lid = NPC_LID_LD; 2936 field->hdr_offset = 4; /* VSID offset */ 2937 field->bytesm1 = 2; 2938 field->ltype_match = NPC_LT_LD_NVGRE; 2939 field->ltype_mask = 0xF; 2940 break; 2941 case NIX_FLOW_KEY_TYPE_VXLAN: 2942 case NIX_FLOW_KEY_TYPE_GENEVE: 2943 field->lid = NPC_LID_LE; 2944 field->bytesm1 = 2; 2945 field->hdr_offset = 4; 2946 field->ltype_mask = 0xF; 2947 field_marker = false; 2948 keyoff_marker = false; 2949 2950 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { 2951 field->ltype_match |= NPC_LT_LE_VXLAN; 2952 group_member = true; 2953 } 2954 2955 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { 2956 field->ltype_match |= NPC_LT_LE_GENEVE; 2957 group_member = true; 2958 } 2959 2960 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { 2961 if (group_member) { 2962 field->ltype_mask = ~field->ltype_match; 2963 field_marker = true; 2964 keyoff_marker = true; 2965 valid_key = true; 2966 group_member = false; 2967 } 2968 } 2969 break; 2970 case NIX_FLOW_KEY_TYPE_ETH_DMAC: 2971 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: 2972 field->lid = NPC_LID_LA; 2973 field->ltype_match = NPC_LT_LA_ETHER; 2974 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { 2975 field->lid = NPC_LID_LF; 2976 field->ltype_match = NPC_LT_LF_TU_ETHER; 2977 } 2978 field->hdr_offset = 0; 2979 field->bytesm1 = 5; /* DMAC 6 Byte */ 2980 field->ltype_mask = 0xF; 2981 break; 2982 case NIX_FLOW_KEY_TYPE_IPV6_EXT: 2983 field->lid = NPC_LID_LC; 2984 field->hdr_offset = 40; /* IPV6 hdr */ 2985 field->bytesm1 = 0; /* 1 Byte ext hdr*/ 2986 field->ltype_match = NPC_LT_LC_IP6_EXT; 2987 field->ltype_mask = 0xF; 2988 break; 2989 case NIX_FLOW_KEY_TYPE_GTPU: 2990 field->lid = NPC_LID_LE; 2991 field->hdr_offset = 4; 2992 field->bytesm1 = 3; /* 4 bytes TID*/ 2993 field->ltype_match = NPC_LT_LE_GTPU; 2994 field->ltype_mask = 0xF; 2995 break; 2996 case NIX_FLOW_KEY_TYPE_VLAN: 2997 field->lid = NPC_LID_LB; 2998 field->hdr_offset = 2; /* Skip TPID (2-bytes) */ 2999 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ 3000 field->ltype_match = NPC_LT_LB_CTAG; 3001 field->ltype_mask = 0xF; 3002 field->fn_mask = 1; /* Mask out the first nibble */ 3003 break; 3004 case NIX_FLOW_KEY_TYPE_AH: 3005 case NIX_FLOW_KEY_TYPE_ESP: 3006 field->hdr_offset = 0; 3007 field->bytesm1 = 7; /* SPI + sequence number */ 3008 field->ltype_mask = 0xF; 3009 field->lid = NPC_LID_LE; 3010 field->ltype_match = NPC_LT_LE_ESP; 3011 if (key_type == NIX_FLOW_KEY_TYPE_AH) { 3012 field->lid = NPC_LID_LD; 3013 field->ltype_match = NPC_LT_LD_AH; 3014 field->hdr_offset = 4; 3015 keyoff_marker = false; 3016 } 3017 break; 3018 } 3019 field->ena = 1; 3020 3021 /* Found a valid flow key type */ 3022 if (valid_key) { 3023 /* Use the key offset of TCP/UDP/SCTP fields 3024 * for ESP/AH fields. 3025 */ 3026 if (key_type == NIX_FLOW_KEY_TYPE_ESP || 3027 key_type == NIX_FLOW_KEY_TYPE_AH) 3028 key_off = l4_key_offset; 3029 field->key_offset = key_off; 3030 memcpy(&alg[nr_field], field, sizeof(*field)); 3031 max_key_off = max(max_key_off, field->bytesm1 + 1); 3032 3033 /* Found a field marker, get the next field */ 3034 if (field_marker) 3035 nr_field++; 3036 } 3037 3038 /* Found a keyoff marker, update the new key_off */ 3039 if (keyoff_marker) { 3040 key_off += max_key_off; 3041 max_key_off = 0; 3042 } 3043 } 3044 /* Processed all the flow key types */ 3045 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) 3046 return 0; 3047 else 3048 return NIX_AF_ERR_RSS_NOSPC_FIELD; 3049 } 3050 3051 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) 3052 { 3053 u64 field[FIELDS_PER_ALG]; 3054 struct nix_hw *hw; 3055 int fid, rc; 3056 3057 hw = get_nix_hw(rvu->hw, blkaddr); 3058 if (!hw) 3059 return -EINVAL; 3060 3061 /* No room to add new flow hash algoritham */ 3062 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) 3063 return NIX_AF_ERR_RSS_NOSPC_ALGO; 3064 3065 /* Generate algo fields for the given flow_cfg */ 3066 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); 3067 if (rc) 3068 return rc; 3069 3070 /* Update ALGX_FIELDX register with generated fields */ 3071 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3072 rvu_write64(rvu, blkaddr, 3073 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, 3074 fid), field[fid]); 3075 3076 /* Store the flow_cfg for futher lookup */ 3077 rc = hw->flowkey.in_use; 3078 hw->flowkey.flowkey[rc] = flow_cfg; 3079 hw->flowkey.in_use++; 3080 3081 return rc; 3082 } 3083 3084 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, 3085 struct nix_rss_flowkey_cfg *req, 3086 struct nix_rss_flowkey_cfg_rsp *rsp) 3087 { 3088 u16 pcifunc = req->hdr.pcifunc; 3089 int alg_idx, nixlf, blkaddr; 3090 struct nix_hw *nix_hw; 3091 int err; 3092 3093 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3094 if (err) 3095 return err; 3096 3097 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3098 if (!nix_hw) 3099 return -EINVAL; 3100 3101 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); 3102 /* Failed to get algo index from the exiting list, reserve new */ 3103 if (alg_idx < 0) { 3104 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, 3105 req->flowkey_cfg); 3106 if (alg_idx < 0) 3107 return alg_idx; 3108 } 3109 rsp->alg_idx = alg_idx; 3110 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, 3111 alg_idx, req->mcam_index); 3112 return 0; 3113 } 3114 3115 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) 3116 { 3117 u32 flowkey_cfg, minkey_cfg; 3118 int alg, fid, rc; 3119 3120 /* Disable all flow key algx fieldx */ 3121 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { 3122 for (fid = 0; fid < FIELDS_PER_ALG; fid++) 3123 rvu_write64(rvu, blkaddr, 3124 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 3125 0); 3126 } 3127 3128 /* IPv4/IPv6 SIP/DIPs */ 3129 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; 3130 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3131 if (rc < 0) 3132 return rc; 3133 3134 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3135 minkey_cfg = flowkey_cfg; 3136 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; 3137 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3138 if (rc < 0) 3139 return rc; 3140 3141 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3142 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; 3143 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3144 if (rc < 0) 3145 return rc; 3146 3147 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ 3148 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; 3149 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3150 if (rc < 0) 3151 return rc; 3152 3153 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3154 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3155 NIX_FLOW_KEY_TYPE_UDP; 3156 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3157 if (rc < 0) 3158 return rc; 3159 3160 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3161 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3162 NIX_FLOW_KEY_TYPE_SCTP; 3163 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3164 if (rc < 0) 3165 return rc; 3166 3167 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3168 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | 3169 NIX_FLOW_KEY_TYPE_SCTP; 3170 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3171 if (rc < 0) 3172 return rc; 3173 3174 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ 3175 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | 3176 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; 3177 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); 3178 if (rc < 0) 3179 return rc; 3180 3181 return 0; 3182 } 3183 3184 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, 3185 struct nix_set_mac_addr *req, 3186 struct msg_rsp *rsp) 3187 { 3188 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; 3189 u16 pcifunc = req->hdr.pcifunc; 3190 int blkaddr, nixlf, err; 3191 struct rvu_pfvf *pfvf; 3192 3193 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); 3194 if (err) 3195 return err; 3196 3197 pfvf = rvu_get_pfvf(rvu, pcifunc); 3198 3199 /* untrusted VF can't overwrite admin(PF) changes */ 3200 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 3201 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { 3202 dev_warn(rvu->dev, 3203 "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); 3204 return -EPERM; 3205 } 3206 3207 ether_addr_copy(pfvf->mac_addr, req->mac_addr); 3208 3209 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, 3210 pfvf->rx_chan_base, req->mac_addr); 3211 3212 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) 3213 ether_addr_copy(pfvf->default_mac, req->mac_addr); 3214 3215 rvu_switch_update_rules(rvu, pcifunc); 3216 3217 return 0; 3218 } 3219 3220 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, 3221 struct msg_req *req, 3222 struct nix_get_mac_addr_rsp *rsp) 3223 { 3224 u16 pcifunc = req->hdr.pcifunc; 3225 struct rvu_pfvf *pfvf; 3226 3227 if (!is_nixlf_attached(rvu, pcifunc)) 3228 return NIX_AF_ERR_AF_LF_INVALID; 3229 3230 pfvf = rvu_get_pfvf(rvu, pcifunc); 3231 3232 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); 3233 3234 return 0; 3235 } 3236 3237 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, 3238 struct msg_rsp *rsp) 3239 { 3240 bool allmulti, promisc, nix_rx_multicast; 3241 u16 pcifunc = req->hdr.pcifunc; 3242 struct rvu_pfvf *pfvf; 3243 int nixlf, err; 3244 3245 pfvf = rvu_get_pfvf(rvu, pcifunc); 3246 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; 3247 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; 3248 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; 3249 3250 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; 3251 3252 if (is_vf(pcifunc) && !nix_rx_multicast && 3253 (promisc || allmulti)) { 3254 dev_warn_ratelimited(rvu->dev, 3255 "VF promisc/multicast not supported\n"); 3256 return 0; 3257 } 3258 3259 /* untrusted VF can't configure promisc/allmulti */ 3260 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && 3261 (promisc || allmulti)) 3262 return 0; 3263 3264 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3265 if (err) 3266 return err; 3267 3268 if (nix_rx_multicast) { 3269 /* add/del this PF_FUNC to/from mcast pkt replication list */ 3270 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, 3271 allmulti); 3272 if (err) { 3273 dev_err(rvu->dev, 3274 "Failed to update pcifunc 0x%x to multicast list\n", 3275 pcifunc); 3276 return err; 3277 } 3278 3279 /* add/del this PF_FUNC to/from promisc pkt replication list */ 3280 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, 3281 promisc); 3282 if (err) { 3283 dev_err(rvu->dev, 3284 "Failed to update pcifunc 0x%x to promisc list\n", 3285 pcifunc); 3286 return err; 3287 } 3288 } 3289 3290 /* install/uninstall allmulti entry */ 3291 if (allmulti) { 3292 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, 3293 pfvf->rx_chan_base); 3294 } else { 3295 if (!nix_rx_multicast) 3296 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); 3297 } 3298 3299 /* install/uninstall promisc entry */ 3300 if (promisc) { 3301 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 3302 pfvf->rx_chan_base, 3303 pfvf->rx_chan_cnt); 3304 } else { 3305 if (!nix_rx_multicast) 3306 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 3307 } 3308 3309 return 0; 3310 } 3311 3312 static void nix_find_link_frs(struct rvu *rvu, 3313 struct nix_frs_cfg *req, u16 pcifunc) 3314 { 3315 int pf = rvu_get_pf(pcifunc); 3316 struct rvu_pfvf *pfvf; 3317 int maxlen, minlen; 3318 int numvfs, hwvf; 3319 int vf; 3320 3321 /* Update with requester's min/max lengths */ 3322 pfvf = rvu_get_pfvf(rvu, pcifunc); 3323 pfvf->maxlen = req->maxlen; 3324 if (req->update_minlen) 3325 pfvf->minlen = req->minlen; 3326 3327 maxlen = req->maxlen; 3328 minlen = req->update_minlen ? req->minlen : 0; 3329 3330 /* Get this PF's numVFs and starting hwvf */ 3331 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 3332 3333 /* For each VF, compare requested max/minlen */ 3334 for (vf = 0; vf < numvfs; vf++) { 3335 pfvf = &rvu->hwvf[hwvf + vf]; 3336 if (pfvf->maxlen > maxlen) 3337 maxlen = pfvf->maxlen; 3338 if (req->update_minlen && 3339 pfvf->minlen && pfvf->minlen < minlen) 3340 minlen = pfvf->minlen; 3341 } 3342 3343 /* Compare requested max/minlen with PF's max/minlen */ 3344 pfvf = &rvu->pf[pf]; 3345 if (pfvf->maxlen > maxlen) 3346 maxlen = pfvf->maxlen; 3347 if (req->update_minlen && 3348 pfvf->minlen && pfvf->minlen < minlen) 3349 minlen = pfvf->minlen; 3350 3351 /* Update the request with max/min PF's and it's VF's max/min */ 3352 req->maxlen = maxlen; 3353 if (req->update_minlen) 3354 req->minlen = minlen; 3355 } 3356 3357 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, 3358 struct msg_rsp *rsp) 3359 { 3360 struct rvu_hwinfo *hw = rvu->hw; 3361 u16 pcifunc = req->hdr.pcifunc; 3362 int pf = rvu_get_pf(pcifunc); 3363 int blkaddr, schq, link = -1; 3364 struct nix_txsch *txsch; 3365 u64 cfg, lmac_fifo_len; 3366 struct nix_hw *nix_hw; 3367 u8 cgx = 0, lmac = 0; 3368 u16 max_mtu; 3369 3370 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3371 if (blkaddr < 0) 3372 return NIX_AF_ERR_AF_LF_INVALID; 3373 3374 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3375 if (!nix_hw) 3376 return -EINVAL; 3377 3378 if (is_afvf(pcifunc)) 3379 rvu_get_lbk_link_max_frs(rvu, &max_mtu); 3380 else 3381 rvu_get_lmac_link_max_frs(rvu, &max_mtu); 3382 3383 if (!req->sdp_link && req->maxlen > max_mtu) 3384 return NIX_AF_ERR_FRS_INVALID; 3385 3386 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) 3387 return NIX_AF_ERR_FRS_INVALID; 3388 3389 /* Check if requester wants to update SMQ's */ 3390 if (!req->update_smq) 3391 goto rx_frscfg; 3392 3393 /* Update min/maxlen in each of the SMQ attached to this PF/VF */ 3394 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; 3395 mutex_lock(&rvu->rsrc_lock); 3396 for (schq = 0; schq < txsch->schq.max; schq++) { 3397 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) 3398 continue; 3399 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); 3400 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); 3401 if (req->update_minlen) 3402 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); 3403 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); 3404 } 3405 mutex_unlock(&rvu->rsrc_lock); 3406 3407 rx_frscfg: 3408 /* Check if config is for SDP link */ 3409 if (req->sdp_link) { 3410 if (!hw->sdp_links) 3411 return NIX_AF_ERR_RX_LINK_INVALID; 3412 link = hw->cgx_links + hw->lbk_links; 3413 goto linkcfg; 3414 } 3415 3416 /* Check if the request is from CGX mapped RVU PF */ 3417 if (is_pf_cgxmapped(rvu, pf)) { 3418 /* Get CGX and LMAC to which this PF is mapped and find link */ 3419 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); 3420 link = (cgx * hw->lmac_per_cgx) + lmac; 3421 } else if (pf == 0) { 3422 /* For VFs of PF0 ingress is LBK port, so config LBK link */ 3423 link = hw->cgx_links; 3424 } 3425 3426 if (link < 0) 3427 return NIX_AF_ERR_RX_LINK_INVALID; 3428 3429 nix_find_link_frs(rvu, req, pcifunc); 3430 3431 linkcfg: 3432 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 3433 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 3434 if (req->update_minlen) 3435 cfg = (cfg & ~0xFFFFULL) | req->minlen; 3436 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); 3437 3438 if (req->sdp_link || pf == 0) 3439 return 0; 3440 3441 /* Update transmit credits for CGX links */ 3442 lmac_fifo_len = 3443 rvu_cgx_get_fifolen(rvu) / 3444 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3445 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); 3446 cfg &= ~(0xFFFFFULL << 12); 3447 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; 3448 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); 3449 return 0; 3450 } 3451 3452 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, 3453 struct msg_rsp *rsp) 3454 { 3455 int nixlf, blkaddr, err; 3456 u64 cfg; 3457 3458 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); 3459 if (err) 3460 return err; 3461 3462 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); 3463 /* Set the interface configuration */ 3464 if (req->len_verify & BIT(0)) 3465 cfg |= BIT_ULL(41); 3466 else 3467 cfg &= ~BIT_ULL(41); 3468 3469 if (req->len_verify & BIT(1)) 3470 cfg |= BIT_ULL(40); 3471 else 3472 cfg &= ~BIT_ULL(40); 3473 3474 if (req->csum_verify & BIT(0)) 3475 cfg |= BIT_ULL(37); 3476 else 3477 cfg &= ~BIT_ULL(37); 3478 3479 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); 3480 3481 return 0; 3482 } 3483 3484 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 3485 { 3486 /* CN10k supports 72KB FIFO size and max packet size of 64k */ 3487 if (rvu->hw->lbk_bufsize == 0x12000) 3488 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16; 3489 3490 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 3491 } 3492 3493 static void nix_link_config(struct rvu *rvu, int blkaddr) 3494 { 3495 struct rvu_hwinfo *hw = rvu->hw; 3496 int cgx, lmac_cnt, slink, link; 3497 u16 lbk_max_frs, lmac_max_frs; 3498 u64 tx_credits; 3499 3500 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 3501 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 3502 3503 /* Set default min/max packet lengths allowed on NIX Rx links. 3504 * 3505 * With HW reset minlen value of 60byte, HW will treat ARP pkts 3506 * as undersize and report them to SW as error pkts, hence 3507 * setting it to 40 bytes. 3508 */ 3509 for (link = 0; link < hw->cgx_links; link++) { 3510 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3511 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 3512 } 3513 3514 for (link = hw->cgx_links; link < hw->lbk_links; link++) { 3515 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3516 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 3517 } 3518 if (hw->sdp_links) { 3519 link = hw->cgx_links + hw->lbk_links; 3520 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3521 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 3522 } 3523 3524 /* Set credits for Tx links assuming max packet length allowed. 3525 * This will be reconfigured based on MTU set for PF/VF. 3526 */ 3527 for (cgx = 0; cgx < hw->cgx; cgx++) { 3528 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3529 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) - 3530 lmac_max_frs) / 16; 3531 /* Enable credits and set credit pkt count to max allowed */ 3532 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3533 slink = cgx * hw->lmac_per_cgx; 3534 for (link = slink; link < (slink + lmac_cnt); link++) { 3535 rvu_write64(rvu, blkaddr, 3536 NIX_AF_TX_LINKX_NORM_CREDIT(link), 3537 tx_credits); 3538 } 3539 } 3540 3541 /* Set Tx credits for LBK link */ 3542 slink = hw->cgx_links; 3543 for (link = slink; link < (slink + hw->lbk_links); link++) { 3544 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 3545 /* Enable credits and set credit pkt count to max allowed */ 3546 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3547 rvu_write64(rvu, blkaddr, 3548 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); 3549 } 3550 } 3551 3552 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) 3553 { 3554 int idx, err; 3555 u64 status; 3556 3557 /* Start X2P bus calibration */ 3558 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3559 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); 3560 /* Wait for calibration to complete */ 3561 err = rvu_poll_reg(rvu, blkaddr, 3562 NIX_AF_STATUS, BIT_ULL(10), false); 3563 if (err) { 3564 dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); 3565 return err; 3566 } 3567 3568 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); 3569 /* Check if CGX devices are ready */ 3570 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { 3571 /* Skip when cgx port is not available */ 3572 if (!rvu_cgx_pdata(idx, rvu) || 3573 (status & (BIT_ULL(16 + idx)))) 3574 continue; 3575 dev_err(rvu->dev, 3576 "CGX%d didn't respond to NIX X2P calibration\n", idx); 3577 err = -EBUSY; 3578 } 3579 3580 /* Check if LBK is ready */ 3581 if (!(status & BIT_ULL(19))) { 3582 dev_err(rvu->dev, 3583 "LBK didn't respond to NIX X2P calibration\n"); 3584 err = -EBUSY; 3585 } 3586 3587 /* Clear 'calibrate_x2p' bit */ 3588 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3589 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); 3590 if (err || (status & 0x3FFULL)) 3591 dev_err(rvu->dev, 3592 "NIX X2P calibration failed, status 0x%llx\n", status); 3593 if (err) 3594 return err; 3595 return 0; 3596 } 3597 3598 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) 3599 { 3600 u64 cfg; 3601 int err; 3602 3603 /* Set admin queue endianness */ 3604 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); 3605 #ifdef __BIG_ENDIAN 3606 cfg |= BIT_ULL(8); 3607 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 3608 #else 3609 cfg &= ~BIT_ULL(8); 3610 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); 3611 #endif 3612 3613 /* Do not bypass NDC cache */ 3614 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); 3615 cfg &= ~0x3FFEULL; 3616 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 3617 /* Disable caching of SQB aka SQEs */ 3618 cfg |= 0x04ULL; 3619 #endif 3620 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); 3621 3622 /* Result structure can be followed by RQ/SQ/CQ context at 3623 * RES + 128bytes and a write mask at RES + 256 bytes, depending on 3624 * operation type. Alloc sufficient result memory for all operations. 3625 */ 3626 err = rvu_aq_alloc(rvu, &block->aq, 3627 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), 3628 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); 3629 if (err) 3630 return err; 3631 3632 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); 3633 rvu_write64(rvu, block->addr, 3634 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); 3635 return 0; 3636 } 3637 3638 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) 3639 { 3640 const struct npc_lt_def_cfg *ltdefs; 3641 struct rvu_hwinfo *hw = rvu->hw; 3642 int blkaddr = nix_hw->blkaddr; 3643 struct rvu_block *block; 3644 int err; 3645 u64 cfg; 3646 3647 block = &hw->block[blkaddr]; 3648 3649 if (is_rvu_96xx_B0(rvu)) { 3650 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt 3651 * internal state when conditional clocks are turned off. 3652 * Hence enable them. 3653 */ 3654 rvu_write64(rvu, blkaddr, NIX_AF_CFG, 3655 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); 3656 3657 /* Set chan/link to backpressure TL3 instead of TL2 */ 3658 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); 3659 3660 /* Disable SQ manager's sticky mode operation (set TM6 = 0) 3661 * This sticky mode is known to cause SQ stalls when multiple 3662 * SQs are mapped to same SMQ and transmitting pkts at a time. 3663 */ 3664 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); 3665 cfg &= ~BIT_ULL(15); 3666 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); 3667 } 3668 3669 ltdefs = rvu->kpu.lt_def; 3670 /* Calibrate X2P bus to check if CGX/LBK links are fine */ 3671 err = nix_calibrate_x2p(rvu, blkaddr); 3672 if (err) 3673 return err; 3674 3675 /* Initialize admin queue */ 3676 err = nix_aq_init(rvu, block); 3677 if (err) 3678 return err; 3679 3680 /* Restore CINT timer delay to HW reset values */ 3681 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); 3682 3683 if (is_block_implemented(hw, blkaddr)) { 3684 err = nix_setup_txschq(rvu, nix_hw, blkaddr); 3685 if (err) 3686 return err; 3687 3688 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); 3689 if (err) 3690 return err; 3691 3692 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); 3693 if (err) 3694 return err; 3695 3696 err = nix_setup_mcast(rvu, nix_hw, blkaddr); 3697 if (err) 3698 return err; 3699 3700 err = nix_setup_txvlan(rvu, nix_hw); 3701 if (err) 3702 return err; 3703 3704 /* Configure segmentation offload formats */ 3705 nix_setup_lso(rvu, nix_hw, blkaddr); 3706 3707 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. 3708 * This helps HW protocol checker to identify headers 3709 * and validate length and checksums. 3710 */ 3711 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, 3712 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | 3713 ltdefs->rx_ol2.ltype_mask); 3714 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, 3715 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | 3716 ltdefs->rx_oip4.ltype_mask); 3717 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, 3718 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | 3719 ltdefs->rx_iip4.ltype_mask); 3720 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, 3721 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | 3722 ltdefs->rx_oip6.ltype_mask); 3723 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, 3724 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | 3725 ltdefs->rx_iip6.ltype_mask); 3726 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, 3727 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | 3728 ltdefs->rx_otcp.ltype_mask); 3729 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, 3730 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | 3731 ltdefs->rx_itcp.ltype_mask); 3732 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, 3733 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | 3734 ltdefs->rx_oudp.ltype_mask); 3735 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, 3736 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | 3737 ltdefs->rx_iudp.ltype_mask); 3738 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, 3739 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | 3740 ltdefs->rx_osctp.ltype_mask); 3741 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, 3742 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | 3743 ltdefs->rx_isctp.ltype_mask); 3744 3745 if (!is_rvu_otx2(rvu)) { 3746 /* Enable APAD calculation for other protocols 3747 * matching APAD0 and APAD1 lt def registers. 3748 */ 3749 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, 3750 (ltdefs->rx_apad0.valid << 11) | 3751 (ltdefs->rx_apad0.lid << 8) | 3752 (ltdefs->rx_apad0.ltype_match << 4) | 3753 ltdefs->rx_apad0.ltype_mask); 3754 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, 3755 (ltdefs->rx_apad1.valid << 11) | 3756 (ltdefs->rx_apad1.lid << 8) | 3757 (ltdefs->rx_apad1.ltype_match << 4) | 3758 ltdefs->rx_apad1.ltype_mask); 3759 3760 /* Receive ethertype defination register defines layer 3761 * information in NPC_RESULT_S to identify the Ethertype 3762 * location in L2 header. Used for Ethertype overwriting 3763 * in inline IPsec flow. 3764 */ 3765 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), 3766 (ltdefs->rx_et[0].offset << 12) | 3767 (ltdefs->rx_et[0].valid << 11) | 3768 (ltdefs->rx_et[0].lid << 8) | 3769 (ltdefs->rx_et[0].ltype_match << 4) | 3770 ltdefs->rx_et[0].ltype_mask); 3771 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), 3772 (ltdefs->rx_et[1].offset << 12) | 3773 (ltdefs->rx_et[1].valid << 11) | 3774 (ltdefs->rx_et[1].lid << 8) | 3775 (ltdefs->rx_et[1].ltype_match << 4) | 3776 ltdefs->rx_et[1].ltype_mask); 3777 } 3778 3779 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); 3780 if (err) 3781 return err; 3782 3783 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ 3784 nix_link_config(rvu, blkaddr); 3785 3786 /* Enable Channel backpressure */ 3787 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); 3788 } 3789 return 0; 3790 } 3791 3792 int rvu_nix_init(struct rvu *rvu) 3793 { 3794 struct rvu_hwinfo *hw = rvu->hw; 3795 struct nix_hw *nix_hw; 3796 int blkaddr = 0, err; 3797 int i = 0; 3798 3799 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), 3800 GFP_KERNEL); 3801 if (!hw->nix) 3802 return -ENOMEM; 3803 3804 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3805 while (blkaddr) { 3806 nix_hw = &hw->nix[i]; 3807 nix_hw->rvu = rvu; 3808 nix_hw->blkaddr = blkaddr; 3809 err = rvu_nix_block_init(rvu, nix_hw); 3810 if (err) 3811 return err; 3812 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3813 i++; 3814 } 3815 3816 return 0; 3817 } 3818 3819 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, 3820 struct rvu_block *block) 3821 { 3822 struct nix_txsch *txsch; 3823 struct nix_mcast *mcast; 3824 struct nix_txvlan *vlan; 3825 struct nix_hw *nix_hw; 3826 int lvl; 3827 3828 rvu_aq_free(rvu, block->aq); 3829 3830 if (is_block_implemented(rvu->hw, blkaddr)) { 3831 nix_hw = get_nix_hw(rvu->hw, blkaddr); 3832 if (!nix_hw) 3833 return; 3834 3835 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 3836 txsch = &nix_hw->txsch[lvl]; 3837 kfree(txsch->schq.bmap); 3838 } 3839 3840 nix_ipolicer_freemem(nix_hw); 3841 3842 vlan = &nix_hw->txvlan; 3843 kfree(vlan->rsrc.bmap); 3844 mutex_destroy(&vlan->rsrc_lock); 3845 devm_kfree(rvu->dev, vlan->entry2pfvf_map); 3846 3847 mcast = &nix_hw->mcast; 3848 qmem_free(rvu->dev, mcast->mce_ctx); 3849 qmem_free(rvu->dev, mcast->mcast_buf); 3850 mutex_destroy(&mcast->mce_lock); 3851 } 3852 } 3853 3854 void rvu_nix_freemem(struct rvu *rvu) 3855 { 3856 struct rvu_hwinfo *hw = rvu->hw; 3857 struct rvu_block *block; 3858 int blkaddr = 0; 3859 3860 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3861 while (blkaddr) { 3862 block = &hw->block[blkaddr]; 3863 rvu_nix_block_freemem(rvu, blkaddr, block); 3864 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 3865 } 3866 } 3867 3868 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, 3869 struct msg_rsp *rsp) 3870 { 3871 u16 pcifunc = req->hdr.pcifunc; 3872 struct rvu_pfvf *pfvf; 3873 int nixlf, err; 3874 3875 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3876 if (err) 3877 return err; 3878 3879 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); 3880 3881 npc_mcam_enable_flows(rvu, pcifunc); 3882 3883 pfvf = rvu_get_pfvf(rvu, pcifunc); 3884 set_bit(NIXLF_INITIALIZED, &pfvf->flags); 3885 3886 rvu_switch_update_rules(rvu, pcifunc); 3887 3888 return rvu_cgx_start_stop_io(rvu, pcifunc, true); 3889 } 3890 3891 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, 3892 struct msg_rsp *rsp) 3893 { 3894 u16 pcifunc = req->hdr.pcifunc; 3895 struct rvu_pfvf *pfvf; 3896 int nixlf, err; 3897 3898 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); 3899 if (err) 3900 return err; 3901 3902 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 3903 3904 pfvf = rvu_get_pfvf(rvu, pcifunc); 3905 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 3906 3907 return rvu_cgx_start_stop_io(rvu, pcifunc, false); 3908 } 3909 3910 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) 3911 { 3912 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 3913 struct hwctx_disable_req ctx_req; 3914 int err; 3915 3916 ctx_req.hdr.pcifunc = pcifunc; 3917 3918 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ 3919 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); 3920 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); 3921 nix_interface_deinit(rvu, pcifunc, nixlf); 3922 nix_rx_sync(rvu, blkaddr); 3923 nix_txschq_free(rvu, pcifunc); 3924 3925 clear_bit(NIXLF_INITIALIZED, &pfvf->flags); 3926 3927 rvu_cgx_start_stop_io(rvu, pcifunc, false); 3928 3929 if (pfvf->sq_ctx) { 3930 ctx_req.ctype = NIX_AQ_CTYPE_SQ; 3931 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3932 if (err) 3933 dev_err(rvu->dev, "SQ ctx disable failed\n"); 3934 } 3935 3936 if (pfvf->rq_ctx) { 3937 ctx_req.ctype = NIX_AQ_CTYPE_RQ; 3938 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3939 if (err) 3940 dev_err(rvu->dev, "RQ ctx disable failed\n"); 3941 } 3942 3943 if (pfvf->cq_ctx) { 3944 ctx_req.ctype = NIX_AQ_CTYPE_CQ; 3945 err = nix_lf_hwctx_disable(rvu, &ctx_req); 3946 if (err) 3947 dev_err(rvu->dev, "CQ ctx disable failed\n"); 3948 } 3949 3950 nix_ctx_free(rvu, pfvf); 3951 3952 nix_free_all_bandprof(rvu, pcifunc); 3953 } 3954 3955 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) 3956 3957 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 3958 { 3959 struct rvu_hwinfo *hw = rvu->hw; 3960 struct rvu_block *block; 3961 int blkaddr, pf; 3962 int nixlf; 3963 u64 cfg; 3964 3965 pf = rvu_get_pf(pcifunc); 3966 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 3967 return 0; 3968 3969 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3970 if (blkaddr < 0) 3971 return NIX_AF_ERR_AF_LF_INVALID; 3972 3973 block = &hw->block[blkaddr]; 3974 nixlf = rvu_get_lf(rvu, block, pcifunc, 0); 3975 if (nixlf < 0) 3976 return NIX_AF_ERR_AF_LF_INVALID; 3977 3978 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); 3979 3980 if (enable) 3981 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; 3982 else 3983 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; 3984 3985 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); 3986 3987 return 0; 3988 } 3989 3990 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, 3991 struct msg_rsp *rsp) 3992 { 3993 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); 3994 } 3995 3996 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, 3997 struct msg_rsp *rsp) 3998 { 3999 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); 4000 } 4001 4002 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, 4003 struct nix_lso_format_cfg *req, 4004 struct nix_lso_format_cfg_rsp *rsp) 4005 { 4006 u16 pcifunc = req->hdr.pcifunc; 4007 struct nix_hw *nix_hw; 4008 struct rvu_pfvf *pfvf; 4009 int blkaddr, idx, f; 4010 u64 reg; 4011 4012 pfvf = rvu_get_pfvf(rvu, pcifunc); 4013 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 4014 if (!pfvf->nixlf || blkaddr < 0) 4015 return NIX_AF_ERR_AF_LF_INVALID; 4016 4017 nix_hw = get_nix_hw(rvu->hw, blkaddr); 4018 if (!nix_hw) 4019 return -EINVAL; 4020 4021 /* Find existing matching LSO format, if any */ 4022 for (idx = 0; idx < nix_hw->lso.in_use; idx++) { 4023 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { 4024 reg = rvu_read64(rvu, blkaddr, 4025 NIX_AF_LSO_FORMATX_FIELDX(idx, f)); 4026 if (req->fields[f] != (reg & req->field_mask)) 4027 break; 4028 } 4029 4030 if (f == NIX_LSO_FIELD_MAX) 4031 break; 4032 } 4033 4034 if (idx < nix_hw->lso.in_use) { 4035 /* Match found */ 4036 rsp->lso_format_idx = idx; 4037 return 0; 4038 } 4039 4040 if (nix_hw->lso.in_use == nix_hw->lso.total) 4041 return NIX_AF_ERR_LSO_CFG_FAIL; 4042 4043 rsp->lso_format_idx = nix_hw->lso.in_use++; 4044 4045 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) 4046 rvu_write64(rvu, blkaddr, 4047 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), 4048 req->fields[f]); 4049 4050 return 0; 4051 } 4052 4053 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) 4054 { 4055 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); 4056 4057 /* overwrite vf mac address with default_mac */ 4058 if (from_vf) 4059 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); 4060 } 4061 4062 /* NIX ingress policers or bandwidth profiles APIs */ 4063 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) 4064 { 4065 struct npc_lt_def_cfg defs, *ltdefs; 4066 4067 ltdefs = &defs; 4068 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); 4069 4070 /* Extract PCP and DEI fields from outer VLAN from byte offset 4071 * 2 from the start of LB_PTR (ie TAG). 4072 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN 4073 * fields are considered when 'Tunnel enable' is set in profile. 4074 */ 4075 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, 4076 (2UL << 12) | (ltdefs->ovlan.lid << 8) | 4077 (ltdefs->ovlan.ltype_match << 4) | 4078 ltdefs->ovlan.ltype_mask); 4079 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, 4080 (2UL << 12) | (ltdefs->ivlan.lid << 8) | 4081 (ltdefs->ivlan.ltype_match << 4) | 4082 ltdefs->ivlan.ltype_mask); 4083 4084 /* DSCP field in outer and tunneled IPv4 packets */ 4085 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, 4086 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | 4087 (ltdefs->rx_oip4.ltype_match << 4) | 4088 ltdefs->rx_oip4.ltype_mask); 4089 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, 4090 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | 4091 (ltdefs->rx_iip4.ltype_match << 4) | 4092 ltdefs->rx_iip4.ltype_mask); 4093 4094 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ 4095 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, 4096 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | 4097 (ltdefs->rx_oip6.ltype_match << 4) | 4098 ltdefs->rx_oip6.ltype_mask); 4099 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, 4100 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | 4101 (ltdefs->rx_iip6.ltype_match << 4) | 4102 ltdefs->rx_iip6.ltype_mask); 4103 } 4104 4105 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, 4106 int layer, int prof_idx) 4107 { 4108 struct nix_cn10k_aq_enq_req aq_req; 4109 int rc; 4110 4111 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4112 4113 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); 4114 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 4115 aq_req.op = NIX_AQ_INSTOP_INIT; 4116 4117 /* Context is all zeros, submit to AQ */ 4118 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4119 (struct nix_aq_enq_req *)&aq_req, NULL); 4120 if (rc) 4121 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", 4122 layer, prof_idx); 4123 return rc; 4124 } 4125 4126 static int nix_setup_ipolicers(struct rvu *rvu, 4127 struct nix_hw *nix_hw, int blkaddr) 4128 { 4129 struct rvu_hwinfo *hw = rvu->hw; 4130 struct nix_ipolicer *ipolicer; 4131 int err, layer, prof_idx; 4132 u64 cfg; 4133 4134 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 4135 if (!(cfg & BIT_ULL(61))) { 4136 hw->cap.ipolicer = false; 4137 return 0; 4138 } 4139 4140 hw->cap.ipolicer = true; 4141 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, 4142 sizeof(*ipolicer), GFP_KERNEL); 4143 if (!nix_hw->ipolicer) 4144 return -ENOMEM; 4145 4146 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); 4147 4148 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4149 ipolicer = &nix_hw->ipolicer[layer]; 4150 switch (layer) { 4151 case BAND_PROF_LEAF_LAYER: 4152 ipolicer->band_prof.max = cfg & 0XFFFF; 4153 break; 4154 case BAND_PROF_MID_LAYER: 4155 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; 4156 break; 4157 case BAND_PROF_TOP_LAYER: 4158 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; 4159 break; 4160 } 4161 4162 if (!ipolicer->band_prof.max) 4163 continue; 4164 4165 err = rvu_alloc_bitmap(&ipolicer->band_prof); 4166 if (err) 4167 return err; 4168 4169 ipolicer->pfvf_map = devm_kcalloc(rvu->dev, 4170 ipolicer->band_prof.max, 4171 sizeof(u16), GFP_KERNEL); 4172 if (!ipolicer->pfvf_map) 4173 return -ENOMEM; 4174 4175 ipolicer->match_id = devm_kcalloc(rvu->dev, 4176 ipolicer->band_prof.max, 4177 sizeof(u16), GFP_KERNEL); 4178 if (!ipolicer->match_id) 4179 return -ENOMEM; 4180 4181 for (prof_idx = 0; 4182 prof_idx < ipolicer->band_prof.max; prof_idx++) { 4183 /* Set AF as current owner for INIT ops to succeed */ 4184 ipolicer->pfvf_map[prof_idx] = 0x00; 4185 4186 /* There is no enable bit in the profile context, 4187 * so no context disable. So let's INIT them here 4188 * so that PF/VF later on have to just do WRITE to 4189 * setup policer rates and config. 4190 */ 4191 err = nix_init_policer_context(rvu, nix_hw, 4192 layer, prof_idx); 4193 if (err) 4194 return err; 4195 } 4196 4197 /* Allocate memory for maintaining ref_counts for MID level 4198 * profiles, this will be needed for leaf layer profiles' 4199 * aggregation. 4200 */ 4201 if (layer != BAND_PROF_MID_LAYER) 4202 continue; 4203 4204 ipolicer->ref_count = devm_kcalloc(rvu->dev, 4205 ipolicer->band_prof.max, 4206 sizeof(u16), GFP_KERNEL); 4207 } 4208 4209 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ 4210 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); 4211 4212 nix_config_rx_pkt_policer_precolor(rvu, blkaddr); 4213 4214 return 0; 4215 } 4216 4217 static void nix_ipolicer_freemem(struct nix_hw *nix_hw) 4218 { 4219 struct nix_ipolicer *ipolicer; 4220 int layer; 4221 4222 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4223 ipolicer = &nix_hw->ipolicer[layer]; 4224 4225 if (!ipolicer->band_prof.max) 4226 continue; 4227 4228 kfree(ipolicer->band_prof.bmap); 4229 } 4230 } 4231 4232 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 4233 struct nix_hw *nix_hw, u16 pcifunc) 4234 { 4235 struct nix_ipolicer *ipolicer; 4236 int layer, hi_layer, prof_idx; 4237 4238 /* Bits [15:14] in profile index represent layer */ 4239 layer = (req->qidx >> 14) & 0x03; 4240 prof_idx = req->qidx & 0x3FFF; 4241 4242 ipolicer = &nix_hw->ipolicer[layer]; 4243 if (prof_idx >= ipolicer->band_prof.max) 4244 return -EINVAL; 4245 4246 /* Check if the profile is allocated to the requesting PCIFUNC or not 4247 * with the exception of AF. AF is allowed to read and update contexts. 4248 */ 4249 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) 4250 return -EINVAL; 4251 4252 /* If this profile is linked to higher layer profile then check 4253 * if that profile is also allocated to the requesting PCIFUNC 4254 * or not. 4255 */ 4256 if (!req->prof.hl_en) 4257 return 0; 4258 4259 /* Leaf layer profile can link only to mid layer and 4260 * mid layer to top layer. 4261 */ 4262 if (layer == BAND_PROF_LEAF_LAYER) 4263 hi_layer = BAND_PROF_MID_LAYER; 4264 else if (layer == BAND_PROF_MID_LAYER) 4265 hi_layer = BAND_PROF_TOP_LAYER; 4266 else 4267 return -EINVAL; 4268 4269 ipolicer = &nix_hw->ipolicer[hi_layer]; 4270 prof_idx = req->prof.band_prof_id; 4271 if (prof_idx >= ipolicer->band_prof.max || 4272 ipolicer->pfvf_map[prof_idx] != pcifunc) 4273 return -EINVAL; 4274 4275 return 0; 4276 } 4277 4278 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, 4279 struct nix_bandprof_alloc_req *req, 4280 struct nix_bandprof_alloc_rsp *rsp) 4281 { 4282 int blkaddr, layer, prof, idx, err; 4283 u16 pcifunc = req->hdr.pcifunc; 4284 struct nix_ipolicer *ipolicer; 4285 struct nix_hw *nix_hw; 4286 4287 if (!rvu->hw->cap.ipolicer) 4288 return NIX_AF_ERR_IPOLICER_NOTSUPP; 4289 4290 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4291 if (err) 4292 return err; 4293 4294 mutex_lock(&rvu->rsrc_lock); 4295 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4296 if (layer == BAND_PROF_INVAL_LAYER) 4297 continue; 4298 if (!req->prof_count[layer]) 4299 continue; 4300 4301 ipolicer = &nix_hw->ipolicer[layer]; 4302 for (idx = 0; idx < req->prof_count[layer]; idx++) { 4303 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ 4304 if (idx == MAX_BANDPROF_PER_PFFUNC) 4305 break; 4306 4307 prof = rvu_alloc_rsrc(&ipolicer->band_prof); 4308 if (prof < 0) 4309 break; 4310 rsp->prof_count[layer]++; 4311 rsp->prof_idx[layer][idx] = prof; 4312 ipolicer->pfvf_map[prof] = pcifunc; 4313 } 4314 } 4315 mutex_unlock(&rvu->rsrc_lock); 4316 return 0; 4317 } 4318 4319 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) 4320 { 4321 int blkaddr, layer, prof_idx, err; 4322 struct nix_ipolicer *ipolicer; 4323 struct nix_hw *nix_hw; 4324 4325 if (!rvu->hw->cap.ipolicer) 4326 return NIX_AF_ERR_IPOLICER_NOTSUPP; 4327 4328 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4329 if (err) 4330 return err; 4331 4332 mutex_lock(&rvu->rsrc_lock); 4333 /* Free all the profiles allocated to the PCIFUNC */ 4334 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4335 if (layer == BAND_PROF_INVAL_LAYER) 4336 continue; 4337 ipolicer = &nix_hw->ipolicer[layer]; 4338 4339 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { 4340 if (ipolicer->pfvf_map[prof_idx] != pcifunc) 4341 continue; 4342 4343 /* Clear ratelimit aggregation, if any */ 4344 if (layer == BAND_PROF_LEAF_LAYER && 4345 ipolicer->match_id[prof_idx]) 4346 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 4347 4348 ipolicer->pfvf_map[prof_idx] = 0x00; 4349 ipolicer->match_id[prof_idx] = 0; 4350 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 4351 } 4352 } 4353 mutex_unlock(&rvu->rsrc_lock); 4354 return 0; 4355 } 4356 4357 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, 4358 struct nix_bandprof_free_req *req, 4359 struct msg_rsp *rsp) 4360 { 4361 int blkaddr, layer, prof_idx, idx, err; 4362 u16 pcifunc = req->hdr.pcifunc; 4363 struct nix_ipolicer *ipolicer; 4364 struct nix_hw *nix_hw; 4365 4366 if (req->free_all) 4367 return nix_free_all_bandprof(rvu, pcifunc); 4368 4369 if (!rvu->hw->cap.ipolicer) 4370 return NIX_AF_ERR_IPOLICER_NOTSUPP; 4371 4372 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4373 if (err) 4374 return err; 4375 4376 mutex_lock(&rvu->rsrc_lock); 4377 /* Free the requested profile indices */ 4378 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { 4379 if (layer == BAND_PROF_INVAL_LAYER) 4380 continue; 4381 if (!req->prof_count[layer]) 4382 continue; 4383 4384 ipolicer = &nix_hw->ipolicer[layer]; 4385 for (idx = 0; idx < req->prof_count[layer]; idx++) { 4386 prof_idx = req->prof_idx[layer][idx]; 4387 if (prof_idx >= ipolicer->band_prof.max || 4388 ipolicer->pfvf_map[prof_idx] != pcifunc) 4389 continue; 4390 4391 /* Clear ratelimit aggregation, if any */ 4392 if (layer == BAND_PROF_LEAF_LAYER && 4393 ipolicer->match_id[prof_idx]) 4394 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); 4395 4396 ipolicer->pfvf_map[prof_idx] = 0x00; 4397 ipolicer->match_id[prof_idx] = 0; 4398 rvu_free_rsrc(&ipolicer->band_prof, prof_idx); 4399 if (idx == MAX_BANDPROF_PER_PFFUNC) 4400 break; 4401 } 4402 } 4403 mutex_unlock(&rvu->rsrc_lock); 4404 return 0; 4405 } 4406 4407 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, 4408 struct nix_cn10k_aq_enq_req *aq_req, 4409 struct nix_cn10k_aq_enq_rsp *aq_rsp, 4410 u16 pcifunc, u8 ctype, u32 qidx) 4411 { 4412 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4413 aq_req->hdr.pcifunc = pcifunc; 4414 aq_req->ctype = ctype; 4415 aq_req->op = NIX_AQ_INSTOP_READ; 4416 aq_req->qidx = qidx; 4417 4418 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4419 (struct nix_aq_enq_req *)aq_req, 4420 (struct nix_aq_enq_rsp *)aq_rsp); 4421 } 4422 4423 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, 4424 struct nix_hw *nix_hw, 4425 struct nix_cn10k_aq_enq_req *aq_req, 4426 struct nix_cn10k_aq_enq_rsp *aq_rsp, 4427 u32 leaf_prof, u16 mid_prof) 4428 { 4429 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4430 aq_req->hdr.pcifunc = 0x00; 4431 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; 4432 aq_req->op = NIX_AQ_INSTOP_WRITE; 4433 aq_req->qidx = leaf_prof; 4434 4435 aq_req->prof.band_prof_id = mid_prof; 4436 aq_req->prof_mask.band_prof_id = GENMASK(6, 0); 4437 aq_req->prof.hl_en = 1; 4438 aq_req->prof_mask.hl_en = 1; 4439 4440 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4441 (struct nix_aq_enq_req *)aq_req, 4442 (struct nix_aq_enq_rsp *)aq_rsp); 4443 } 4444 4445 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, 4446 u16 rq_idx, u16 match_id) 4447 { 4448 int leaf_prof, mid_prof, leaf_match; 4449 struct nix_cn10k_aq_enq_req aq_req; 4450 struct nix_cn10k_aq_enq_rsp aq_rsp; 4451 struct nix_ipolicer *ipolicer; 4452 struct nix_hw *nix_hw; 4453 int blkaddr, idx, rc; 4454 4455 if (!rvu->hw->cap.ipolicer) 4456 return 0; 4457 4458 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); 4459 if (rc) 4460 return rc; 4461 4462 /* Fetch the RQ's context to see if policing is enabled */ 4463 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, 4464 NIX_AQ_CTYPE_RQ, rq_idx); 4465 if (rc) { 4466 dev_err(rvu->dev, 4467 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", 4468 __func__, rq_idx, pcifunc); 4469 return rc; 4470 } 4471 4472 if (!aq_rsp.rq.policer_ena) 4473 return 0; 4474 4475 /* Get the bandwidth profile ID mapped to this RQ */ 4476 leaf_prof = aq_rsp.rq.band_prof_id; 4477 4478 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; 4479 ipolicer->match_id[leaf_prof] = match_id; 4480 4481 /* Check if any other leaf profile is marked with same match_id */ 4482 for (idx = 0; idx < ipolicer->band_prof.max; idx++) { 4483 if (idx == leaf_prof) 4484 continue; 4485 if (ipolicer->match_id[idx] != match_id) 4486 continue; 4487 4488 leaf_match = idx; 4489 break; 4490 } 4491 4492 if (idx == ipolicer->band_prof.max) 4493 return 0; 4494 4495 /* Fetch the matching profile's context to check if it's already 4496 * mapped to a mid level profile. 4497 */ 4498 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 4499 NIX_AQ_CTYPE_BANDPROF, leaf_match); 4500 if (rc) { 4501 dev_err(rvu->dev, 4502 "%s: Failed to fetch context of leaf profile %d\n", 4503 __func__, leaf_match); 4504 return rc; 4505 } 4506 4507 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 4508 if (aq_rsp.prof.hl_en) { 4509 /* Get Mid layer prof index and map leaf_prof index 4510 * also such that flows that are being steered 4511 * to different RQs and marked with same match_id 4512 * are rate limited in a aggregate fashion 4513 */ 4514 mid_prof = aq_rsp.prof.band_prof_id; 4515 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 4516 &aq_req, &aq_rsp, 4517 leaf_prof, mid_prof); 4518 if (rc) { 4519 dev_err(rvu->dev, 4520 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 4521 __func__, leaf_prof, mid_prof); 4522 goto exit; 4523 } 4524 4525 mutex_lock(&rvu->rsrc_lock); 4526 ipolicer->ref_count[mid_prof]++; 4527 mutex_unlock(&rvu->rsrc_lock); 4528 goto exit; 4529 } 4530 4531 /* Allocate a mid layer profile and 4532 * map both 'leaf_prof' and 'leaf_match' profiles to it. 4533 */ 4534 mutex_lock(&rvu->rsrc_lock); 4535 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); 4536 if (mid_prof < 0) { 4537 dev_err(rvu->dev, 4538 "%s: Unable to allocate mid layer profile\n", __func__); 4539 mutex_unlock(&rvu->rsrc_lock); 4540 goto exit; 4541 } 4542 mutex_unlock(&rvu->rsrc_lock); 4543 ipolicer->pfvf_map[mid_prof] = 0x00; 4544 ipolicer->ref_count[mid_prof] = 0; 4545 4546 /* Initialize mid layer profile same as 'leaf_prof' */ 4547 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 4548 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 4549 if (rc) { 4550 dev_err(rvu->dev, 4551 "%s: Failed to fetch context of leaf profile %d\n", 4552 __func__, leaf_prof); 4553 goto exit; 4554 } 4555 4556 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); 4557 aq_req.hdr.pcifunc = 0x00; 4558 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); 4559 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; 4560 aq_req.op = NIX_AQ_INSTOP_WRITE; 4561 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); 4562 /* Clear higher layer enable bit in the mid profile, just in case */ 4563 aq_req.prof.hl_en = 0; 4564 aq_req.prof_mask.hl_en = 1; 4565 4566 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, 4567 (struct nix_aq_enq_req *)&aq_req, NULL); 4568 if (rc) { 4569 dev_err(rvu->dev, 4570 "%s: Failed to INIT context of mid layer profile %d\n", 4571 __func__, mid_prof); 4572 goto exit; 4573 } 4574 4575 /* Map both leaf profiles to this mid layer profile */ 4576 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 4577 &aq_req, &aq_rsp, 4578 leaf_prof, mid_prof); 4579 if (rc) { 4580 dev_err(rvu->dev, 4581 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 4582 __func__, leaf_prof, mid_prof); 4583 goto exit; 4584 } 4585 4586 mutex_lock(&rvu->rsrc_lock); 4587 ipolicer->ref_count[mid_prof]++; 4588 mutex_unlock(&rvu->rsrc_lock); 4589 4590 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 4591 &aq_req, &aq_rsp, 4592 leaf_match, mid_prof); 4593 if (rc) { 4594 dev_err(rvu->dev, 4595 "%s: Failed to map leaf(%d) and mid(%d) profiles\n", 4596 __func__, leaf_match, mid_prof); 4597 ipolicer->ref_count[mid_prof]--; 4598 goto exit; 4599 } 4600 4601 mutex_lock(&rvu->rsrc_lock); 4602 ipolicer->ref_count[mid_prof]++; 4603 mutex_unlock(&rvu->rsrc_lock); 4604 4605 exit: 4606 return rc; 4607 } 4608 4609 /* Called with mutex rsrc_lock */ 4610 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 4611 u32 leaf_prof) 4612 { 4613 struct nix_cn10k_aq_enq_req aq_req; 4614 struct nix_cn10k_aq_enq_rsp aq_rsp; 4615 struct nix_ipolicer *ipolicer; 4616 u16 mid_prof; 4617 int rc; 4618 4619 mutex_unlock(&rvu->rsrc_lock); 4620 4621 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, 4622 NIX_AQ_CTYPE_BANDPROF, leaf_prof); 4623 4624 mutex_lock(&rvu->rsrc_lock); 4625 if (rc) { 4626 dev_err(rvu->dev, 4627 "%s: Failed to fetch context of leaf profile %d\n", 4628 __func__, leaf_prof); 4629 return; 4630 } 4631 4632 if (!aq_rsp.prof.hl_en) 4633 return; 4634 4635 mid_prof = aq_rsp.prof.band_prof_id; 4636 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 4637 ipolicer->ref_count[mid_prof]--; 4638 /* If ref_count is zero, free mid layer profile */ 4639 if (!ipolicer->ref_count[mid_prof]) { 4640 ipolicer->pfvf_map[mid_prof] = 0x00; 4641 rvu_free_rsrc(&ipolicer->band_prof, mid_prof); 4642 } 4643 } 4644