1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <net/tso.h> 14 15 #include "otx2_reg.h" 16 #include "otx2_common.h" 17 #include "otx2_struct.h" 18 19 static void otx2_nix_rq_op_stats(struct queue_stats *stats, 20 struct otx2_nic *pfvf, int qidx) 21 { 22 u64 incr = (u64)qidx << 32; 23 u64 *ptr; 24 25 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); 26 stats->bytes = otx2_atomic64_add(incr, ptr); 27 28 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); 29 stats->pkts = otx2_atomic64_add(incr, ptr); 30 } 31 32 static void otx2_nix_sq_op_stats(struct queue_stats *stats, 33 struct otx2_nic *pfvf, int qidx) 34 { 35 u64 incr = (u64)qidx << 32; 36 u64 *ptr; 37 38 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); 39 stats->bytes = otx2_atomic64_add(incr, ptr); 40 41 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); 42 stats->pkts = otx2_atomic64_add(incr, ptr); 43 } 44 45 void otx2_update_lmac_stats(struct otx2_nic *pfvf) 46 { 47 struct msg_req *req; 48 49 if (!netif_running(pfvf->netdev)) 50 return; 51 52 otx2_mbox_lock(&pfvf->mbox); 53 req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox); 54 if (!req) { 55 otx2_mbox_unlock(&pfvf->mbox); 56 return; 57 } 58 59 otx2_sync_mbox_msg(&pfvf->mbox); 60 otx2_mbox_unlock(&pfvf->mbox); 61 } 62 63 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) 64 { 65 struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; 66 67 if (!pfvf->qset.rq) 68 return 0; 69 70 otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); 71 return 1; 72 } 73 74 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) 75 { 76 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; 77 78 if (!pfvf->qset.sq) 79 return 0; 80 81 otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); 82 return 1; 83 } 84 85 void otx2_get_dev_stats(struct otx2_nic *pfvf) 86 { 87 struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats; 88 89 #define OTX2_GET_RX_STATS(reg) \ 90 otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) 91 #define OTX2_GET_TX_STATS(reg) \ 92 otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) 93 94 dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); 95 dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP); 96 dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST); 97 dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST); 98 dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST); 99 dev_stats->rx_frames = dev_stats->rx_bcast_frames + 100 dev_stats->rx_mcast_frames + 101 dev_stats->rx_ucast_frames; 102 103 dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS); 104 dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP); 105 dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST); 106 dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST); 107 dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST); 108 dev_stats->tx_frames = dev_stats->tx_bcast_frames + 109 dev_stats->tx_mcast_frames + 110 dev_stats->tx_ucast_frames; 111 } 112 113 void otx2_get_stats64(struct net_device *netdev, 114 struct rtnl_link_stats64 *stats) 115 { 116 struct otx2_nic *pfvf = netdev_priv(netdev); 117 struct otx2_dev_stats *dev_stats; 118 119 otx2_get_dev_stats(pfvf); 120 121 dev_stats = &pfvf->hw.dev_stats; 122 stats->rx_bytes = dev_stats->rx_bytes; 123 stats->rx_packets = dev_stats->rx_frames; 124 stats->rx_dropped = dev_stats->rx_drops; 125 stats->multicast = dev_stats->rx_mcast_frames; 126 127 stats->tx_bytes = dev_stats->tx_bytes; 128 stats->tx_packets = dev_stats->tx_frames; 129 stats->tx_dropped = dev_stats->tx_drops; 130 } 131 132 /* Sync MAC address with RVU AF */ 133 static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac) 134 { 135 struct nix_set_mac_addr *req; 136 int err; 137 138 otx2_mbox_lock(&pfvf->mbox); 139 req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox); 140 if (!req) { 141 otx2_mbox_unlock(&pfvf->mbox); 142 return -ENOMEM; 143 } 144 145 ether_addr_copy(req->mac_addr, mac); 146 147 err = otx2_sync_mbox_msg(&pfvf->mbox); 148 otx2_mbox_unlock(&pfvf->mbox); 149 return err; 150 } 151 152 static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf, 153 struct net_device *netdev) 154 { 155 struct nix_get_mac_addr_rsp *rsp; 156 struct mbox_msghdr *msghdr; 157 struct msg_req *req; 158 int err; 159 160 otx2_mbox_lock(&pfvf->mbox); 161 req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox); 162 if (!req) { 163 otx2_mbox_unlock(&pfvf->mbox); 164 return -ENOMEM; 165 } 166 167 err = otx2_sync_mbox_msg(&pfvf->mbox); 168 if (err) { 169 otx2_mbox_unlock(&pfvf->mbox); 170 return err; 171 } 172 173 msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 174 if (IS_ERR(msghdr)) { 175 otx2_mbox_unlock(&pfvf->mbox); 176 return PTR_ERR(msghdr); 177 } 178 rsp = (struct nix_get_mac_addr_rsp *)msghdr; 179 ether_addr_copy(netdev->dev_addr, rsp->mac_addr); 180 otx2_mbox_unlock(&pfvf->mbox); 181 182 return 0; 183 } 184 185 int otx2_set_mac_address(struct net_device *netdev, void *p) 186 { 187 struct otx2_nic *pfvf = netdev_priv(netdev); 188 struct sockaddr *addr = p; 189 190 if (!is_valid_ether_addr(addr->sa_data)) 191 return -EADDRNOTAVAIL; 192 193 if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) 194 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 195 else 196 return -EPERM; 197 198 return 0; 199 } 200 201 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) 202 { 203 struct nix_frs_cfg *req; 204 int err; 205 206 otx2_mbox_lock(&pfvf->mbox); 207 req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox); 208 if (!req) { 209 otx2_mbox_unlock(&pfvf->mbox); 210 return -ENOMEM; 211 } 212 213 /* SMQ config limits maximum pkt size that can be transmitted */ 214 req->update_smq = true; 215 pfvf->max_frs = mtu + OTX2_ETH_HLEN; 216 req->maxlen = pfvf->max_frs; 217 218 err = otx2_sync_mbox_msg(&pfvf->mbox); 219 otx2_mbox_unlock(&pfvf->mbox); 220 return err; 221 } 222 223 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) 224 { 225 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 226 struct nix_rss_flowkey_cfg *req; 227 int err; 228 229 otx2_mbox_lock(&pfvf->mbox); 230 req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox); 231 if (!req) { 232 otx2_mbox_unlock(&pfvf->mbox); 233 return -ENOMEM; 234 } 235 req->mcam_index = -1; /* Default or reserved index */ 236 req->flowkey_cfg = rss->flowkey_cfg; 237 req->group = DEFAULT_RSS_CONTEXT_GROUP; 238 239 err = otx2_sync_mbox_msg(&pfvf->mbox); 240 otx2_mbox_unlock(&pfvf->mbox); 241 return err; 242 } 243 244 int otx2_set_rss_table(struct otx2_nic *pfvf) 245 { 246 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 247 struct mbox *mbox = &pfvf->mbox; 248 struct nix_aq_enq_req *aq; 249 int idx, err; 250 251 otx2_mbox_lock(mbox); 252 /* Get memory to put this msg */ 253 for (idx = 0; idx < rss->rss_size; idx++) { 254 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); 255 if (!aq) { 256 /* The shared memory buffer can be full. 257 * Flush it and retry 258 */ 259 err = otx2_sync_mbox_msg(mbox); 260 if (err) { 261 otx2_mbox_unlock(mbox); 262 return err; 263 } 264 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); 265 if (!aq) { 266 otx2_mbox_unlock(mbox); 267 return -ENOMEM; 268 } 269 } 270 271 aq->rss.rq = rss->ind_tbl[idx]; 272 273 /* Fill AQ info */ 274 aq->qidx = idx; 275 aq->ctype = NIX_AQ_CTYPE_RSS; 276 aq->op = NIX_AQ_INSTOP_INIT; 277 } 278 err = otx2_sync_mbox_msg(mbox); 279 otx2_mbox_unlock(mbox); 280 return err; 281 } 282 283 void otx2_set_rss_key(struct otx2_nic *pfvf) 284 { 285 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 286 u64 *key = (u64 *)&rss->key[4]; 287 int idx; 288 289 /* 352bit or 44byte key needs to be configured as below 290 * NIX_LF_RX_SECRETX0 = key<351:288> 291 * NIX_LF_RX_SECRETX1 = key<287:224> 292 * NIX_LF_RX_SECRETX2 = key<223:160> 293 * NIX_LF_RX_SECRETX3 = key<159:96> 294 * NIX_LF_RX_SECRETX4 = key<95:32> 295 * NIX_LF_RX_SECRETX5<63:32> = key<31:0> 296 */ 297 otx2_write64(pfvf, NIX_LF_RX_SECRETX(5), 298 (u64)(*((u32 *)&rss->key)) << 32); 299 idx = sizeof(rss->key) / sizeof(u64); 300 while (idx > 0) { 301 idx--; 302 otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++); 303 } 304 } 305 306 int otx2_rss_init(struct otx2_nic *pfvf) 307 { 308 struct otx2_rss_info *rss = &pfvf->hw.rss_info; 309 int idx, ret = 0; 310 311 rss->rss_size = sizeof(rss->ind_tbl); 312 313 /* Init RSS key if it is not setup already */ 314 if (!rss->enable) 315 netdev_rss_key_fill(rss->key, sizeof(rss->key)); 316 otx2_set_rss_key(pfvf); 317 318 if (!netif_is_rxfh_configured(pfvf->netdev)) { 319 /* Default indirection table */ 320 for (idx = 0; idx < rss->rss_size; idx++) 321 rss->ind_tbl[idx] = 322 ethtool_rxfh_indir_default(idx, 323 pfvf->hw.rx_queues); 324 } 325 ret = otx2_set_rss_table(pfvf); 326 if (ret) 327 return ret; 328 329 /* Flowkey or hash config to be used for generating flow tag */ 330 rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg : 331 NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 | 332 NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP | 333 NIX_FLOW_KEY_TYPE_SCTP; 334 335 ret = otx2_set_flowkey_cfg(pfvf); 336 if (ret) 337 return ret; 338 339 rss->enable = true; 340 return 0; 341 } 342 343 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) 344 { 345 /* Configure CQE interrupt coalescing parameters 346 * 347 * HW triggers an irq when ECOUNT > cq_ecount_wait, hence 348 * set 1 less than cq_ecount_wait. And cq_time_wait is in 349 * usecs, convert that to 100ns count. 350 */ 351 otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx), 352 ((u64)(pfvf->hw.cq_time_wait * 10) << 48) | 353 ((u64)pfvf->hw.cq_qcount_wait << 32) | 354 (pfvf->hw.cq_ecount_wait - 1)); 355 } 356 357 dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 358 gfp_t gfp) 359 { 360 dma_addr_t iova; 361 362 /* Check if request can be accommodated in previous allocated page */ 363 if (pool->page && ((pool->page_offset + pool->rbsize) <= 364 (PAGE_SIZE << pool->rbpage_order))) { 365 pool->pageref++; 366 goto ret; 367 } 368 369 otx2_get_page(pool); 370 371 /* Allocate a new page */ 372 pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 373 pool->rbpage_order); 374 if (unlikely(!pool->page)) 375 return -ENOMEM; 376 377 pool->page_offset = 0; 378 ret: 379 iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset, 380 pool->rbsize, DMA_FROM_DEVICE); 381 if (!iova) { 382 if (!pool->page_offset) 383 __free_pages(pool->page, pool->rbpage_order); 384 pool->page = NULL; 385 return -ENOMEM; 386 } 387 pool->page_offset += pool->rbsize; 388 return iova; 389 } 390 391 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq) 392 { 393 struct otx2_nic *pfvf = netdev_priv(netdev); 394 395 schedule_work(&pfvf->reset_task); 396 } 397 398 void otx2_get_mac_from_af(struct net_device *netdev) 399 { 400 struct otx2_nic *pfvf = netdev_priv(netdev); 401 int err; 402 403 err = otx2_hw_get_mac_addr(pfvf, netdev); 404 if (err) 405 dev_warn(pfvf->dev, "Failed to read mac from hardware\n"); 406 407 /* If AF doesn't provide a valid MAC, generate a random one */ 408 if (!is_valid_ether_addr(netdev->dev_addr)) 409 eth_hw_addr_random(netdev); 410 } 411 412 static int otx2_get_link(struct otx2_nic *pfvf) 413 { 414 int link = 0; 415 u16 map; 416 417 /* cgx lmac link */ 418 if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) { 419 map = pfvf->hw.tx_chan_base & 0x7FF; 420 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF); 421 } 422 /* LBK channel */ 423 if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) 424 link = 12; 425 426 return link; 427 } 428 429 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) 430 { 431 struct otx2_hw *hw = &pfvf->hw; 432 struct nix_txschq_config *req; 433 u64 schq, parent; 434 435 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); 436 if (!req) 437 return -ENOMEM; 438 439 req->lvl = lvl; 440 req->num_regs = 1; 441 442 schq = hw->txschq_list[lvl][0]; 443 /* Set topology e.t.c configuration */ 444 if (lvl == NIX_TXSCH_LVL_SMQ) { 445 req->reg[0] = NIX_AF_SMQX_CFG(schq); 446 req->regval[0] = ((pfvf->netdev->mtu + OTX2_ETH_HLEN) << 8) | 447 OTX2_MIN_MTU; 448 449 req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) | 450 (0x2ULL << 36); 451 req->num_regs++; 452 /* MDQ config */ 453 parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 454 req->reg[1] = NIX_AF_MDQX_PARENT(schq); 455 req->regval[1] = parent << 16; 456 req->num_regs++; 457 /* Set DWRR quantum */ 458 req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); 459 req->regval[2] = DFLT_RR_QTM; 460 } else if (lvl == NIX_TXSCH_LVL_TL4) { 461 parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0]; 462 req->reg[0] = NIX_AF_TL4X_PARENT(schq); 463 req->regval[0] = parent << 16; 464 req->num_regs++; 465 req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); 466 req->regval[1] = DFLT_RR_QTM; 467 } else if (lvl == NIX_TXSCH_LVL_TL3) { 468 parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0]; 469 req->reg[0] = NIX_AF_TL3X_PARENT(schq); 470 req->regval[0] = parent << 16; 471 req->num_regs++; 472 req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); 473 req->regval[1] = DFLT_RR_QTM; 474 } else if (lvl == NIX_TXSCH_LVL_TL2) { 475 parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0]; 476 req->reg[0] = NIX_AF_TL2X_PARENT(schq); 477 req->regval[0] = parent << 16; 478 479 req->num_regs++; 480 req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); 481 req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM; 482 483 req->num_regs++; 484 req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, 485 otx2_get_link(pfvf)); 486 /* Enable this queue and backpressure */ 487 req->regval[2] = BIT_ULL(13) | BIT_ULL(12); 488 489 } else if (lvl == NIX_TXSCH_LVL_TL1) { 490 /* Default config for TL1. 491 * For VF this is always ignored. 492 */ 493 494 /* Set DWRR quantum */ 495 req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); 496 req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; 497 498 req->num_regs++; 499 req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); 500 req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1); 501 502 req->num_regs++; 503 req->reg[2] = NIX_AF_TL1X_CIR(schq); 504 req->regval[2] = 0; 505 } 506 507 return otx2_sync_mbox_msg(&pfvf->mbox); 508 } 509 510 int otx2_txsch_alloc(struct otx2_nic *pfvf) 511 { 512 struct nix_txsch_alloc_req *req; 513 int lvl; 514 515 /* Get memory to put this msg */ 516 req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); 517 if (!req) 518 return -ENOMEM; 519 520 /* Request one schq per level */ 521 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) 522 req->schq[lvl] = 1; 523 524 return otx2_sync_mbox_msg(&pfvf->mbox); 525 } 526 527 int otx2_txschq_stop(struct otx2_nic *pfvf) 528 { 529 struct nix_txsch_free_req *free_req; 530 int lvl, schq, err; 531 532 otx2_mbox_lock(&pfvf->mbox); 533 /* Free the transmit schedulers */ 534 free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); 535 if (!free_req) { 536 otx2_mbox_unlock(&pfvf->mbox); 537 return -ENOMEM; 538 } 539 540 free_req->flags = TXSCHQ_FREE_ALL; 541 err = otx2_sync_mbox_msg(&pfvf->mbox); 542 otx2_mbox_unlock(&pfvf->mbox); 543 544 /* Clear the txschq list */ 545 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 546 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) 547 pfvf->hw.txschq_list[lvl][schq] = 0; 548 } 549 return err; 550 } 551 552 void otx2_sqb_flush(struct otx2_nic *pfvf) 553 { 554 int qidx, sqe_tail, sqe_head; 555 u64 incr, *ptr, val; 556 557 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); 558 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { 559 incr = (u64)qidx << 32; 560 while (1) { 561 val = otx2_atomic64_add(incr, ptr); 562 sqe_head = (val >> 20) & 0x3F; 563 sqe_tail = (val >> 28) & 0x3F; 564 if (sqe_head == sqe_tail) 565 break; 566 usleep_range(1, 3); 567 } 568 } 569 } 570 571 /* RED and drop levels of CQ on packet reception. 572 * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty). 573 */ 574 #define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize)) 575 #define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize)) 576 577 /* RED and drop levels of AURA for packet reception. 578 * For AURA level is measure of fullness (0x0 = empty, 255 = full). 579 * Eg: For RQ length 1K, for pass/drop level 204/230. 580 * RED accepts pkts if free pointers > 102 & <= 205. 581 * Drops pkts if free pointers < 102. 582 */ 583 #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */ 584 #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */ 585 586 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ 587 #define SEND_CQ_SKID 2000 588 589 static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) 590 { 591 struct otx2_qset *qset = &pfvf->qset; 592 struct nix_aq_enq_req *aq; 593 594 /* Get memory to put this msg */ 595 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); 596 if (!aq) 597 return -ENOMEM; 598 599 aq->rq.cq = qidx; 600 aq->rq.ena = 1; 601 aq->rq.pb_caching = 1; 602 aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */ 603 aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1; 604 aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */ 605 aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */ 606 aq->rq.qint_idx = 0; 607 aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */ 608 aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */ 609 aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); 610 aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); 611 aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA; 612 aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA; 613 614 /* Fill AQ info */ 615 aq->qidx = qidx; 616 aq->ctype = NIX_AQ_CTYPE_RQ; 617 aq->op = NIX_AQ_INSTOP_INIT; 618 619 return otx2_sync_mbox_msg(&pfvf->mbox); 620 } 621 622 static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) 623 { 624 struct otx2_qset *qset = &pfvf->qset; 625 struct otx2_snd_queue *sq; 626 struct nix_aq_enq_req *aq; 627 struct otx2_pool *pool; 628 int err; 629 630 pool = &pfvf->qset.pool[sqb_aura]; 631 sq = &qset->sq[qidx]; 632 sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128; 633 sq->sqe_cnt = qset->sqe_cnt; 634 635 err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size); 636 if (err) 637 return err; 638 639 err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, 640 TSO_HEADER_SIZE); 641 if (err) 642 return err; 643 644 sq->sqe_base = sq->sqe->base; 645 sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL); 646 if (!sq->sg) 647 return -ENOMEM; 648 649 sq->head = 0; 650 sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1; 651 sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb; 652 /* Set SQE threshold to 10% of total SQEs */ 653 sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100; 654 sq->aura_id = sqb_aura; 655 sq->aura_fc_addr = pool->fc_addr->base; 656 sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx)); 657 sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0)); 658 659 sq->stats.bytes = 0; 660 sq->stats.pkts = 0; 661 662 /* Get memory to put this msg */ 663 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); 664 if (!aq) 665 return -ENOMEM; 666 667 aq->sq.cq = pfvf->hw.rx_queues + qidx; 668 aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ 669 aq->sq.cq_ena = 1; 670 aq->sq.ena = 1; 671 /* Only one SMQ is allocated, map all SQ's to that SMQ */ 672 aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; 673 aq->sq.smq_rr_quantum = DFLT_RR_QTM; 674 aq->sq.default_chan = pfvf->hw.tx_chan_base; 675 aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ 676 aq->sq.sqb_aura = sqb_aura; 677 aq->sq.sq_int_ena = NIX_SQINT_BITS; 678 aq->sq.qint_idx = 0; 679 /* Due pipelining impact minimum 2000 unused SQ CQE's 680 * need to maintain to avoid CQ overflow. 681 */ 682 aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt)); 683 684 /* Fill AQ info */ 685 aq->qidx = qidx; 686 aq->ctype = NIX_AQ_CTYPE_SQ; 687 aq->op = NIX_AQ_INSTOP_INIT; 688 689 return otx2_sync_mbox_msg(&pfvf->mbox); 690 } 691 692 static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) 693 { 694 struct otx2_qset *qset = &pfvf->qset; 695 struct nix_aq_enq_req *aq; 696 struct otx2_cq_queue *cq; 697 int err, pool_id; 698 699 cq = &qset->cq[qidx]; 700 cq->cq_idx = qidx; 701 if (qidx < pfvf->hw.rx_queues) { 702 cq->cq_type = CQ_RX; 703 cq->cint_idx = qidx; 704 cq->cqe_cnt = qset->rqe_cnt; 705 } else { 706 cq->cq_type = CQ_TX; 707 cq->cint_idx = qidx - pfvf->hw.rx_queues; 708 cq->cqe_cnt = qset->sqe_cnt; 709 } 710 cq->cqe_size = pfvf->qset.xqe_size; 711 712 /* Allocate memory for CQEs */ 713 err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size); 714 if (err) 715 return err; 716 717 /* Save CQE CPU base for faster reference */ 718 cq->cqe_base = cq->cqe->base; 719 /* In case where all RQs auras point to single pool, 720 * all CQs receive buffer pool also point to same pool. 721 */ 722 pool_id = ((cq->cq_type == CQ_RX) && 723 (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx; 724 cq->rbpool = &qset->pool[pool_id]; 725 cq->refill_task_sched = false; 726 727 /* Get memory to put this msg */ 728 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); 729 if (!aq) 730 return -ENOMEM; 731 732 aq->cq.ena = 1; 733 aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4); 734 aq->cq.caching = 1; 735 aq->cq.base = cq->cqe->iova; 736 aq->cq.cint_idx = cq->cint_idx; 737 aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS; 738 aq->cq.qint_idx = 0; 739 aq->cq.avg_level = 255; 740 741 if (qidx < pfvf->hw.rx_queues) { 742 aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); 743 aq->cq.drop_ena = 1; 744 } 745 746 /* Fill AQ info */ 747 aq->qidx = qidx; 748 aq->ctype = NIX_AQ_CTYPE_CQ; 749 aq->op = NIX_AQ_INSTOP_INIT; 750 751 return otx2_sync_mbox_msg(&pfvf->mbox); 752 } 753 754 static void otx2_pool_refill_task(struct work_struct *work) 755 { 756 struct otx2_cq_queue *cq; 757 struct otx2_pool *rbpool; 758 struct refill_work *wrk; 759 int qidx, free_ptrs = 0; 760 struct otx2_nic *pfvf; 761 s64 bufptr; 762 763 wrk = container_of(work, struct refill_work, pool_refill_work.work); 764 pfvf = wrk->pf; 765 qidx = wrk - pfvf->refill_wrk; 766 cq = &pfvf->qset.cq[qidx]; 767 rbpool = cq->rbpool; 768 free_ptrs = cq->pool_ptrs; 769 770 while (cq->pool_ptrs) { 771 bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL); 772 if (bufptr <= 0) { 773 /* Schedule a WQ if we fails to free atleast half of the 774 * pointers else enable napi for this RQ. 775 */ 776 if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) { 777 struct delayed_work *dwork; 778 779 dwork = &wrk->pool_refill_work; 780 schedule_delayed_work(dwork, 781 msecs_to_jiffies(100)); 782 } else { 783 cq->refill_task_sched = false; 784 } 785 return; 786 } 787 otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); 788 cq->pool_ptrs--; 789 } 790 cq->refill_task_sched = false; 791 } 792 793 int otx2_config_nix_queues(struct otx2_nic *pfvf) 794 { 795 int qidx, err; 796 797 /* Initialize RX queues */ 798 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { 799 u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); 800 801 err = otx2_rq_init(pfvf, qidx, lpb_aura); 802 if (err) 803 return err; 804 } 805 806 /* Initialize TX queues */ 807 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { 808 u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); 809 810 err = otx2_sq_init(pfvf, qidx, sqb_aura); 811 if (err) 812 return err; 813 } 814 815 /* Initialize completion queues */ 816 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { 817 err = otx2_cq_init(pfvf, qidx); 818 if (err) 819 return err; 820 } 821 822 /* Initialize work queue for receive buffer refill */ 823 pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, 824 sizeof(struct refill_work), GFP_KERNEL); 825 if (!pfvf->refill_wrk) 826 return -ENOMEM; 827 828 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { 829 pfvf->refill_wrk[qidx].pf = pfvf; 830 INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work, 831 otx2_pool_refill_task); 832 } 833 return 0; 834 } 835 836 int otx2_config_nix(struct otx2_nic *pfvf) 837 { 838 struct nix_lf_alloc_req *nixlf; 839 struct nix_lf_alloc_rsp *rsp; 840 int err; 841 842 pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512; 843 844 /* Get memory to put this msg */ 845 nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox); 846 if (!nixlf) 847 return -ENOMEM; 848 849 /* Set RQ/SQ/CQ counts */ 850 nixlf->rq_cnt = pfvf->hw.rx_queues; 851 nixlf->sq_cnt = pfvf->hw.tx_queues; 852 nixlf->cq_cnt = pfvf->qset.cq_cnt; 853 nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; 854 nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */ 855 nixlf->xqe_sz = NIX_XQESZ_W16; 856 /* We don't know absolute NPA LF idx attached. 857 * AF will replace 'RVU_DEFAULT_PF_FUNC' with 858 * NPA LF attached to this RVU PF/VF. 859 */ 860 nixlf->npa_func = RVU_DEFAULT_PF_FUNC; 861 /* Disable alignment pad, enable L2 length check, 862 * enable L4 TCP/UDP checksum verification. 863 */ 864 nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37); 865 866 err = otx2_sync_mbox_msg(&pfvf->mbox); 867 if (err) 868 return err; 869 870 rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, 871 &nixlf->hdr); 872 if (IS_ERR(rsp)) 873 return PTR_ERR(rsp); 874 875 if (rsp->qints < 1) 876 return -ENXIO; 877 878 return rsp->hdr.rc; 879 } 880 881 void otx2_sq_free_sqbs(struct otx2_nic *pfvf) 882 { 883 struct otx2_qset *qset = &pfvf->qset; 884 struct otx2_hw *hw = &pfvf->hw; 885 struct otx2_snd_queue *sq; 886 int sqb, qidx; 887 u64 iova, pa; 888 889 for (qidx = 0; qidx < hw->tx_queues; qidx++) { 890 sq = &qset->sq[qidx]; 891 if (!sq->sqb_ptrs) 892 continue; 893 for (sqb = 0; sqb < sq->sqb_count; sqb++) { 894 if (!sq->sqb_ptrs[sqb]) 895 continue; 896 iova = sq->sqb_ptrs[sqb]; 897 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); 898 dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, 899 DMA_FROM_DEVICE, 900 DMA_ATTR_SKIP_CPU_SYNC); 901 put_page(virt_to_page(phys_to_virt(pa))); 902 } 903 sq->sqb_count = 0; 904 } 905 } 906 907 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) 908 { 909 int pool_id, pool_start = 0, pool_end = 0, size = 0; 910 u64 iova, pa; 911 912 if (type == AURA_NIX_SQ) { 913 pool_start = otx2_get_pool_idx(pfvf, type, 0); 914 pool_end = pool_start + pfvf->hw.sqpool_cnt; 915 size = pfvf->hw.sqb_size; 916 } 917 if (type == AURA_NIX_RQ) { 918 pool_start = otx2_get_pool_idx(pfvf, type, 0); 919 pool_end = pfvf->hw.rqpool_cnt; 920 size = pfvf->rbsize; 921 } 922 923 /* Free SQB and RQB pointers from the aura pool */ 924 for (pool_id = pool_start; pool_id < pool_end; pool_id++) { 925 iova = otx2_aura_allocptr(pfvf, pool_id); 926 while (iova) { 927 if (type == AURA_NIX_RQ) 928 iova -= OTX2_HEAD_ROOM; 929 930 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); 931 dma_unmap_page_attrs(pfvf->dev, iova, size, 932 DMA_FROM_DEVICE, 933 DMA_ATTR_SKIP_CPU_SYNC); 934 put_page(virt_to_page(phys_to_virt(pa))); 935 iova = otx2_aura_allocptr(pfvf, pool_id); 936 } 937 } 938 } 939 940 void otx2_aura_pool_free(struct otx2_nic *pfvf) 941 { 942 struct otx2_pool *pool; 943 int pool_id; 944 945 if (!pfvf->qset.pool) 946 return; 947 948 for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) { 949 pool = &pfvf->qset.pool[pool_id]; 950 qmem_free(pfvf->dev, pool->stack); 951 qmem_free(pfvf->dev, pool->fc_addr); 952 } 953 devm_kfree(pfvf->dev, pfvf->qset.pool); 954 } 955 956 static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, 957 int pool_id, int numptrs) 958 { 959 struct npa_aq_enq_req *aq; 960 struct otx2_pool *pool; 961 int err; 962 963 pool = &pfvf->qset.pool[pool_id]; 964 965 /* Allocate memory for HW to update Aura count. 966 * Alloc one cache line, so that it fits all FC_STYPE modes. 967 */ 968 if (!pool->fc_addr) { 969 err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); 970 if (err) 971 return err; 972 } 973 974 /* Initialize this aura's context via AF */ 975 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); 976 if (!aq) { 977 /* Shared mbox memory buffer is full, flush it and retry */ 978 err = otx2_sync_mbox_msg(&pfvf->mbox); 979 if (err) 980 return err; 981 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); 982 if (!aq) 983 return -ENOMEM; 984 } 985 986 aq->aura_id = aura_id; 987 /* Will be filled by AF with correct pool context address */ 988 aq->aura.pool_addr = pool_id; 989 aq->aura.pool_caching = 1; 990 aq->aura.shift = ilog2(numptrs) - 8; 991 aq->aura.count = numptrs; 992 aq->aura.limit = numptrs; 993 aq->aura.avg_level = 255; 994 aq->aura.ena = 1; 995 aq->aura.fc_ena = 1; 996 aq->aura.fc_addr = pool->fc_addr->iova; 997 aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ 998 999 /* Fill AQ info */ 1000 aq->ctype = NPA_AQ_CTYPE_AURA; 1001 aq->op = NPA_AQ_INSTOP_INIT; 1002 1003 return 0; 1004 } 1005 1006 static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, 1007 int stack_pages, int numptrs, int buf_size) 1008 { 1009 struct npa_aq_enq_req *aq; 1010 struct otx2_pool *pool; 1011 int err; 1012 1013 pool = &pfvf->qset.pool[pool_id]; 1014 /* Alloc memory for stack which is used to store buffer pointers */ 1015 err = qmem_alloc(pfvf->dev, &pool->stack, 1016 stack_pages, pfvf->hw.stack_pg_bytes); 1017 if (err) 1018 return err; 1019 1020 pool->rbsize = buf_size; 1021 pool->rbpage_order = get_order(buf_size); 1022 1023 /* Initialize this pool's context via AF */ 1024 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); 1025 if (!aq) { 1026 /* Shared mbox memory buffer is full, flush it and retry */ 1027 err = otx2_sync_mbox_msg(&pfvf->mbox); 1028 if (err) { 1029 qmem_free(pfvf->dev, pool->stack); 1030 return err; 1031 } 1032 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); 1033 if (!aq) { 1034 qmem_free(pfvf->dev, pool->stack); 1035 return -ENOMEM; 1036 } 1037 } 1038 1039 aq->aura_id = pool_id; 1040 aq->pool.stack_base = pool->stack->iova; 1041 aq->pool.stack_caching = 1; 1042 aq->pool.ena = 1; 1043 aq->pool.buf_size = buf_size / 128; 1044 aq->pool.stack_max_pages = stack_pages; 1045 aq->pool.shift = ilog2(numptrs) - 8; 1046 aq->pool.ptr_start = 0; 1047 aq->pool.ptr_end = ~0ULL; 1048 1049 /* Fill AQ info */ 1050 aq->ctype = NPA_AQ_CTYPE_POOL; 1051 aq->op = NPA_AQ_INSTOP_INIT; 1052 1053 return 0; 1054 } 1055 1056 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) 1057 { 1058 int qidx, pool_id, stack_pages, num_sqbs; 1059 struct otx2_qset *qset = &pfvf->qset; 1060 struct otx2_hw *hw = &pfvf->hw; 1061 struct otx2_snd_queue *sq; 1062 struct otx2_pool *pool; 1063 int err, ptr; 1064 s64 bufptr; 1065 1066 /* Calculate number of SQBs needed. 1067 * 1068 * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB. 1069 * Last SQE is used for pointing to next SQB. 1070 */ 1071 num_sqbs = (hw->sqb_size / 128) - 1; 1072 num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs; 1073 1074 /* Get no of stack pages needed */ 1075 stack_pages = 1076 (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; 1077 1078 for (qidx = 0; qidx < hw->tx_queues; qidx++) { 1079 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); 1080 /* Initialize aura context */ 1081 err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); 1082 if (err) 1083 goto fail; 1084 1085 /* Initialize pool context */ 1086 err = otx2_pool_init(pfvf, pool_id, stack_pages, 1087 num_sqbs, hw->sqb_size); 1088 if (err) 1089 goto fail; 1090 } 1091 1092 /* Flush accumulated messages */ 1093 err = otx2_sync_mbox_msg(&pfvf->mbox); 1094 if (err) 1095 goto fail; 1096 1097 /* Allocate pointers and free them to aura/pool */ 1098 for (qidx = 0; qidx < hw->tx_queues; qidx++) { 1099 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); 1100 pool = &pfvf->qset.pool[pool_id]; 1101 1102 sq = &qset->sq[qidx]; 1103 sq->sqb_count = 0; 1104 sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL); 1105 if (!sq->sqb_ptrs) 1106 return -ENOMEM; 1107 1108 for (ptr = 0; ptr < num_sqbs; ptr++) { 1109 bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL); 1110 if (bufptr <= 0) 1111 return bufptr; 1112 otx2_aura_freeptr(pfvf, pool_id, bufptr); 1113 sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; 1114 } 1115 otx2_get_page(pool); 1116 } 1117 1118 return 0; 1119 fail: 1120 otx2_mbox_reset(&pfvf->mbox.mbox, 0); 1121 otx2_aura_pool_free(pfvf); 1122 return err; 1123 } 1124 1125 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) 1126 { 1127 struct otx2_hw *hw = &pfvf->hw; 1128 int stack_pages, pool_id, rq; 1129 struct otx2_pool *pool; 1130 int err, ptr, num_ptrs; 1131 s64 bufptr; 1132 1133 num_ptrs = pfvf->qset.rqe_cnt; 1134 1135 stack_pages = 1136 (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; 1137 1138 for (rq = 0; rq < hw->rx_queues; rq++) { 1139 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq); 1140 /* Initialize aura context */ 1141 err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs); 1142 if (err) 1143 goto fail; 1144 } 1145 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { 1146 err = otx2_pool_init(pfvf, pool_id, stack_pages, 1147 num_ptrs, pfvf->rbsize); 1148 if (err) 1149 goto fail; 1150 } 1151 1152 /* Flush accumulated messages */ 1153 err = otx2_sync_mbox_msg(&pfvf->mbox); 1154 if (err) 1155 goto fail; 1156 1157 /* Allocate pointers and free them to aura/pool */ 1158 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { 1159 pool = &pfvf->qset.pool[pool_id]; 1160 for (ptr = 0; ptr < num_ptrs; ptr++) { 1161 bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL); 1162 if (bufptr <= 0) 1163 return bufptr; 1164 otx2_aura_freeptr(pfvf, pool_id, 1165 bufptr + OTX2_HEAD_ROOM); 1166 } 1167 otx2_get_page(pool); 1168 } 1169 1170 return 0; 1171 fail: 1172 otx2_mbox_reset(&pfvf->mbox.mbox, 0); 1173 otx2_aura_pool_free(pfvf); 1174 return err; 1175 } 1176 1177 int otx2_config_npa(struct otx2_nic *pfvf) 1178 { 1179 struct otx2_qset *qset = &pfvf->qset; 1180 struct npa_lf_alloc_req *npalf; 1181 struct otx2_hw *hw = &pfvf->hw; 1182 int aura_cnt; 1183 1184 /* Pool - Stack of free buffer pointers 1185 * Aura - Alloc/frees pointers from/to pool for NIX DMA. 1186 */ 1187 1188 if (!hw->pool_cnt) 1189 return -EINVAL; 1190 1191 qset->pool = devm_kzalloc(pfvf->dev, sizeof(struct otx2_pool) * 1192 hw->pool_cnt, GFP_KERNEL); 1193 if (!qset->pool) 1194 return -ENOMEM; 1195 1196 /* Get memory to put this msg */ 1197 npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox); 1198 if (!npalf) 1199 return -ENOMEM; 1200 1201 /* Set aura and pool counts */ 1202 npalf->nr_pools = hw->pool_cnt; 1203 aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt)); 1204 npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1; 1205 1206 return otx2_sync_mbox_msg(&pfvf->mbox); 1207 } 1208 1209 int otx2_detach_resources(struct mbox *mbox) 1210 { 1211 struct rsrc_detach *detach; 1212 1213 otx2_mbox_lock(mbox); 1214 detach = otx2_mbox_alloc_msg_detach_resources(mbox); 1215 if (!detach) { 1216 otx2_mbox_unlock(mbox); 1217 return -ENOMEM; 1218 } 1219 1220 /* detach all */ 1221 detach->partial = false; 1222 1223 /* Send detach request to AF */ 1224 otx2_mbox_msg_send(&mbox->mbox, 0); 1225 otx2_mbox_unlock(mbox); 1226 return 0; 1227 } 1228 1229 int otx2_attach_npa_nix(struct otx2_nic *pfvf) 1230 { 1231 struct rsrc_attach *attach; 1232 struct msg_req *msix; 1233 int err; 1234 1235 otx2_mbox_lock(&pfvf->mbox); 1236 /* Get memory to put this msg */ 1237 attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox); 1238 if (!attach) { 1239 otx2_mbox_unlock(&pfvf->mbox); 1240 return -ENOMEM; 1241 } 1242 1243 attach->npalf = true; 1244 attach->nixlf = true; 1245 1246 /* Send attach request to AF */ 1247 err = otx2_sync_mbox_msg(&pfvf->mbox); 1248 if (err) { 1249 otx2_mbox_unlock(&pfvf->mbox); 1250 return err; 1251 } 1252 1253 pfvf->nix_blkaddr = BLKADDR_NIX0; 1254 1255 /* If the platform has two NIX blocks then LF may be 1256 * allocated from NIX1. 1257 */ 1258 if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL) 1259 pfvf->nix_blkaddr = BLKADDR_NIX1; 1260 1261 /* Get NPA and NIX MSIX vector offsets */ 1262 msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox); 1263 if (!msix) { 1264 otx2_mbox_unlock(&pfvf->mbox); 1265 return -ENOMEM; 1266 } 1267 1268 err = otx2_sync_mbox_msg(&pfvf->mbox); 1269 if (err) { 1270 otx2_mbox_unlock(&pfvf->mbox); 1271 return err; 1272 } 1273 otx2_mbox_unlock(&pfvf->mbox); 1274 1275 if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID || 1276 pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) { 1277 dev_err(pfvf->dev, 1278 "RVUPF: Invalid MSIX vector offset for NPA/NIX\n"); 1279 return -EINVAL; 1280 } 1281 1282 return 0; 1283 } 1284 1285 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa) 1286 { 1287 struct hwctx_disable_req *req; 1288 1289 otx2_mbox_lock(mbox); 1290 /* Request AQ to disable this context */ 1291 if (npa) 1292 req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox); 1293 else 1294 req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox); 1295 1296 if (!req) { 1297 otx2_mbox_unlock(mbox); 1298 return; 1299 } 1300 1301 req->ctype = type; 1302 1303 if (otx2_sync_mbox_msg(mbox)) 1304 dev_err(mbox->pfvf->dev, "%s failed to disable context\n", 1305 __func__); 1306 1307 otx2_mbox_unlock(mbox); 1308 } 1309 1310 /* Mbox message handlers */ 1311 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, 1312 struct cgx_stats_rsp *rsp) 1313 { 1314 int id; 1315 1316 for (id = 0; id < CGX_RX_STATS_COUNT; id++) 1317 pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id]; 1318 for (id = 0; id < CGX_TX_STATS_COUNT; id++) 1319 pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id]; 1320 } 1321 1322 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, 1323 struct nix_txsch_alloc_rsp *rsp) 1324 { 1325 int lvl, schq; 1326 1327 /* Setup transmit scheduler list */ 1328 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) 1329 for (schq = 0; schq < rsp->schq[lvl]; schq++) 1330 pf->hw.txschq_list[lvl][schq] = 1331 rsp->schq_list[lvl][schq]; 1332 } 1333 1334 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, 1335 struct npa_lf_alloc_rsp *rsp) 1336 { 1337 pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs; 1338 pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes; 1339 } 1340 1341 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, 1342 struct nix_lf_alloc_rsp *rsp) 1343 { 1344 pfvf->hw.sqb_size = rsp->sqb_size; 1345 pfvf->hw.rx_chan_base = rsp->rx_chan_base; 1346 pfvf->hw.tx_chan_base = rsp->tx_chan_base; 1347 pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; 1348 pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; 1349 } 1350 1351 void mbox_handler_msix_offset(struct otx2_nic *pfvf, 1352 struct msix_offset_rsp *rsp) 1353 { 1354 pfvf->hw.npa_msixoff = rsp->npa_msixoff; 1355 pfvf->hw.nix_msixoff = rsp->nix_msixoff; 1356 } 1357 1358 void otx2_free_cints(struct otx2_nic *pfvf, int n) 1359 { 1360 struct otx2_qset *qset = &pfvf->qset; 1361 struct otx2_hw *hw = &pfvf->hw; 1362 int irq, qidx; 1363 1364 for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START; 1365 qidx < n; 1366 qidx++, irq++) { 1367 int vector = pci_irq_vector(pfvf->pdev, irq); 1368 1369 irq_set_affinity_hint(vector, NULL); 1370 free_cpumask_var(hw->affinity_mask[irq]); 1371 free_irq(vector, &qset->napi[qidx]); 1372 } 1373 } 1374 1375 void otx2_set_cints_affinity(struct otx2_nic *pfvf) 1376 { 1377 struct otx2_hw *hw = &pfvf->hw; 1378 int vec, cpu, irq, cint; 1379 1380 vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; 1381 cpu = cpumask_first(cpu_online_mask); 1382 1383 /* CQ interrupts */ 1384 for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) { 1385 if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL)) 1386 return; 1387 1388 cpumask_set_cpu(cpu, hw->affinity_mask[vec]); 1389 1390 irq = pci_irq_vector(pfvf->pdev, vec); 1391 irq_set_affinity_hint(irq, hw->affinity_mask[vec]); 1392 1393 cpu = cpumask_next(cpu, cpu_online_mask); 1394 if (unlikely(cpu >= nr_cpu_ids)) 1395 cpu = 0; 1396 } 1397 } 1398 1399 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 1400 int __weak \ 1401 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ 1402 struct _req_type *req, \ 1403 struct _rsp_type *rsp) \ 1404 { \ 1405 /* Nothing to do here */ \ 1406 return 0; \ 1407 } \ 1408 EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); 1409 MBOX_UP_CGX_MESSAGES 1410 #undef M 1411