1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Marvell OcteonTx2 RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #ifndef OTX2_COMMON_H 12 #define OTX2_COMMON_H 13 14 #include <linux/pci.h> 15 #include <linux/iommu.h> 16 #include <linux/net_tstamp.h> 17 #include <linux/ptp_clock_kernel.h> 18 #include <linux/timecounter.h> 19 20 #include <mbox.h> 21 #include "otx2_reg.h" 22 #include "otx2_txrx.h" 23 #include <rvu_trace.h> 24 25 /* PCI device IDs */ 26 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 27 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064 28 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 29 30 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 31 32 /* PCI BAR nos */ 33 #define PCI_CFG_REG_BAR_NUM 2 34 #define PCI_MBOX_BAR_NUM 4 35 36 #define NAME_SIZE 32 37 38 enum arua_mapped_qtypes { 39 AURA_NIX_RQ, 40 AURA_NIX_SQ, 41 }; 42 43 /* NIX LF interrupts range*/ 44 #define NIX_LF_QINT_VEC_START 0x00 45 #define NIX_LF_CINT_VEC_START 0x40 46 #define NIX_LF_GINT_VEC 0x80 47 #define NIX_LF_ERR_VEC 0x81 48 #define NIX_LF_POISON_VEC 0x82 49 50 /* RSS configuration */ 51 struct otx2_rss_info { 52 u8 enable; 53 u32 flowkey_cfg; 54 u16 rss_size; 55 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; 56 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ 57 u8 key[RSS_HASH_KEY_SIZE]; 58 }; 59 60 /* NIX (or NPC) RX errors */ 61 enum otx2_errlvl { 62 NPC_ERRLVL_RE, 63 NPC_ERRLVL_LID_LA, 64 NPC_ERRLVL_LID_LB, 65 NPC_ERRLVL_LID_LC, 66 NPC_ERRLVL_LID_LD, 67 NPC_ERRLVL_LID_LE, 68 NPC_ERRLVL_LID_LF, 69 NPC_ERRLVL_LID_LG, 70 NPC_ERRLVL_LID_LH, 71 NPC_ERRLVL_NIX = 0x0F, 72 }; 73 74 enum otx2_errcodes_re { 75 /* NPC_ERRLVL_RE errcodes */ 76 ERRCODE_FCS = 0x7, 77 ERRCODE_FCS_RCV = 0x8, 78 ERRCODE_UNDERSIZE = 0x10, 79 ERRCODE_OVERSIZE = 0x11, 80 ERRCODE_OL2_LEN_MISMATCH = 0x12, 81 /* NPC_ERRLVL_NIX errcodes */ 82 ERRCODE_OL3_LEN = 0x10, 83 ERRCODE_OL4_LEN = 0x11, 84 ERRCODE_OL4_CSUM = 0x12, 85 ERRCODE_IL3_LEN = 0x20, 86 ERRCODE_IL4_LEN = 0x21, 87 ERRCODE_IL4_CSUM = 0x22, 88 }; 89 90 /* NIX TX stats */ 91 enum nix_stat_lf_tx { 92 TX_UCAST = 0x0, 93 TX_BCAST = 0x1, 94 TX_MCAST = 0x2, 95 TX_DROP = 0x3, 96 TX_OCTS = 0x4, 97 TX_STATS_ENUM_LAST, 98 }; 99 100 /* NIX RX stats */ 101 enum nix_stat_lf_rx { 102 RX_OCTS = 0x0, 103 RX_UCAST = 0x1, 104 RX_BCAST = 0x2, 105 RX_MCAST = 0x3, 106 RX_DROP = 0x4, 107 RX_DROP_OCTS = 0x5, 108 RX_FCS = 0x6, 109 RX_ERR = 0x7, 110 RX_DRP_BCAST = 0x8, 111 RX_DRP_MCAST = 0x9, 112 RX_DRP_L3BCAST = 0xa, 113 RX_DRP_L3MCAST = 0xb, 114 RX_STATS_ENUM_LAST, 115 }; 116 117 struct otx2_dev_stats { 118 u64 rx_bytes; 119 u64 rx_frames; 120 u64 rx_ucast_frames; 121 u64 rx_bcast_frames; 122 u64 rx_mcast_frames; 123 u64 rx_drops; 124 125 u64 tx_bytes; 126 u64 tx_frames; 127 u64 tx_ucast_frames; 128 u64 tx_bcast_frames; 129 u64 tx_mcast_frames; 130 u64 tx_drops; 131 }; 132 133 /* Driver counted stats */ 134 struct otx2_drv_stats { 135 atomic_t rx_fcs_errs; 136 atomic_t rx_oversize_errs; 137 atomic_t rx_undersize_errs; 138 atomic_t rx_csum_errs; 139 atomic_t rx_len_errs; 140 atomic_t rx_other_errs; 141 }; 142 143 struct mbox { 144 struct otx2_mbox mbox; 145 struct work_struct mbox_wrk; 146 struct otx2_mbox mbox_up; 147 struct work_struct mbox_up_wrk; 148 struct otx2_nic *pfvf; 149 void *bbuf_base; /* Bounce buffer for mbox memory */ 150 struct mutex lock; /* serialize mailbox access */ 151 int num_msgs; /* mbox number of messages */ 152 int up_num_msgs; /* mbox_up number of messages */ 153 }; 154 155 struct otx2_hw { 156 struct pci_dev *pdev; 157 struct otx2_rss_info rss_info; 158 u16 rx_queues; 159 u16 tx_queues; 160 u16 max_queues; 161 u16 pool_cnt; 162 u16 rqpool_cnt; 163 u16 sqpool_cnt; 164 165 /* NPA */ 166 u32 stack_pg_ptrs; /* No of ptrs per stack page */ 167 u32 stack_pg_bytes; /* Size of stack page */ 168 u16 sqb_size; 169 170 /* NIX */ 171 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 172 173 /* HW settings, coalescing etc */ 174 u16 rx_chan_base; 175 u16 tx_chan_base; 176 u16 cq_qcount_wait; 177 u16 cq_ecount_wait; 178 u16 rq_skid; 179 u8 cq_time_wait; 180 181 /* Segmentation */ 182 u8 lso_tsov4_idx; 183 u8 lso_tsov6_idx; 184 u8 lso_udpv4_idx; 185 u8 lso_udpv6_idx; 186 u8 hw_tso; 187 188 /* MSI-X */ 189 u8 cint_cnt; /* CQ interrupt count */ 190 u16 npa_msixoff; /* Offset of NPA vectors */ 191 u16 nix_msixoff; /* Offset of NIX vectors */ 192 char *irq_name; 193 cpumask_var_t *affinity_mask; 194 195 /* Stats */ 196 struct otx2_dev_stats dev_stats; 197 struct otx2_drv_stats drv_stats; 198 u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; 199 u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; 200 }; 201 202 struct otx2_vf_config { 203 struct otx2_nic *pf; 204 struct delayed_work link_event_work; 205 bool intf_down; /* interface was either configured or not */ 206 }; 207 208 struct flr_work { 209 struct work_struct work; 210 struct otx2_nic *pf; 211 }; 212 213 struct refill_work { 214 struct delayed_work pool_refill_work; 215 struct otx2_nic *pf; 216 }; 217 218 struct otx2_ptp { 219 struct ptp_clock_info ptp_info; 220 struct ptp_clock *ptp_clock; 221 struct otx2_nic *nic; 222 223 struct cyclecounter cycle_counter; 224 struct timecounter time_counter; 225 }; 226 227 #define OTX2_HW_TIMESTAMP_LEN 8 228 229 struct otx2_nic { 230 void __iomem *reg_base; 231 struct net_device *netdev; 232 void *iommu_domain; 233 u16 max_frs; 234 u16 rbsize; /* Receive buffer size */ 235 236 #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) 237 #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1) 238 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2) 239 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) 240 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) 241 u64 flags; 242 243 struct otx2_qset qset; 244 struct otx2_hw hw; 245 struct pci_dev *pdev; 246 struct device *dev; 247 248 /* Mbox */ 249 struct mbox mbox; 250 struct mbox *mbox_pfvf; 251 struct workqueue_struct *mbox_wq; 252 struct workqueue_struct *mbox_pfvf_wq; 253 254 u8 total_vfs; 255 u16 pcifunc; /* RVU PF_FUNC */ 256 u16 bpid[NIX_MAX_BPID_CHAN]; 257 struct otx2_vf_config *vf_configs; 258 struct cgx_link_user_info linfo; 259 260 u64 reset_count; 261 struct work_struct reset_task; 262 struct workqueue_struct *flr_wq; 263 struct flr_work *flr_wrk; 264 struct refill_work *refill_wrk; 265 struct workqueue_struct *otx2_wq; 266 struct work_struct rx_mode_work; 267 268 /* Ethtool stuff */ 269 u32 msg_enable; 270 271 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ 272 int nix_blkaddr; 273 274 struct otx2_ptp *ptp; 275 struct hwtstamp_config tstamp; 276 }; 277 278 static inline bool is_otx2_lbkvf(struct pci_dev *pdev) 279 { 280 return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF; 281 } 282 283 static inline bool is_96xx_A0(struct pci_dev *pdev) 284 { 285 return (pdev->revision == 0x00) && 286 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 287 } 288 289 static inline bool is_96xx_B0(struct pci_dev *pdev) 290 { 291 return (pdev->revision == 0x01) && 292 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 293 } 294 295 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) 296 { 297 struct otx2_hw *hw = &pfvf->hw; 298 299 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; 300 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; 301 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; 302 303 hw->hw_tso = true; 304 305 if (is_96xx_A0(pfvf->pdev)) { 306 hw->hw_tso = false; 307 308 /* Time based irq coalescing is not supported */ 309 pfvf->hw.cq_qcount_wait = 0x0; 310 311 /* Due to HW issue previous silicons required minimum 312 * 600 unused CQE to avoid CQ overflow. 313 */ 314 pfvf->hw.rq_skid = 600; 315 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); 316 } 317 } 318 319 /* Register read/write APIs */ 320 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) 321 { 322 u64 blkaddr; 323 324 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) { 325 case BLKTYPE_NIX: 326 blkaddr = nic->nix_blkaddr; 327 break; 328 case BLKTYPE_NPA: 329 blkaddr = BLKADDR_NPA; 330 break; 331 default: 332 blkaddr = BLKADDR_RVUM; 333 break; 334 } 335 336 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT); 337 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT); 338 339 return nic->reg_base + offset; 340 } 341 342 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val) 343 { 344 void __iomem *addr = otx2_get_regaddr(nic, offset); 345 346 writeq(val, addr); 347 } 348 349 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset) 350 { 351 void __iomem *addr = otx2_get_regaddr(nic, offset); 352 353 return readq(addr); 354 } 355 356 /* Mbox bounce buffer APIs */ 357 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev) 358 { 359 struct otx2_mbox *otx2_mbox; 360 struct otx2_mbox_dev *mdev; 361 362 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); 363 if (!mbox->bbuf_base) 364 return -ENOMEM; 365 366 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF 367 * prepare all mbox messages in bounce buffer instead of directly 368 * in hw mbox memory. 369 */ 370 otx2_mbox = &mbox->mbox; 371 mdev = &otx2_mbox->dev[0]; 372 mdev->mbase = mbox->bbuf_base; 373 374 otx2_mbox = &mbox->mbox_up; 375 mdev = &otx2_mbox->dev[0]; 376 mdev->mbase = mbox->bbuf_base; 377 return 0; 378 } 379 380 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) 381 { 382 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 383 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 384 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 385 struct mbox_hdr *hdr; 386 u64 msg_size; 387 388 if (mdev->mbase == hw_mbase) 389 return; 390 391 hdr = hw_mbase + mbox->rx_start; 392 msg_size = hdr->msg_size; 393 394 if (msg_size > mbox->rx_size - msgs_offset) 395 msg_size = mbox->rx_size - msgs_offset; 396 397 /* Copy mbox messages from mbox memory to bounce buffer */ 398 memcpy(mdev->mbase + mbox->rx_start, 399 hw_mbase + mbox->rx_start, msg_size + msgs_offset); 400 } 401 402 /* With the absence of API for 128-bit IO memory access for arm64, 403 * implement required operations at place. 404 */ 405 #if defined(CONFIG_ARM64) 406 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) 407 { 408 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!" 409 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); 410 } 411 412 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) 413 { 414 u64 result; 415 416 __asm__ volatile(".cpu generic+lse\n" 417 "ldadd %x[i], %x[r], [%[b]]" 418 : [r]"=r"(result), "+m"(*ptr) 419 : [i]"r"(incr), [b]"r"(ptr) 420 : "memory"); 421 return result; 422 } 423 424 static inline u64 otx2_lmt_flush(uint64_t addr) 425 { 426 u64 result = 0; 427 428 __asm__ volatile(".cpu generic+lse\n" 429 "ldeor xzr,%x[rf],[%[rs]]" 430 : [rf]"=r"(result) 431 : [rs]"r"(addr)); 432 return result; 433 } 434 435 #else 436 #define otx2_write128(lo, hi, addr) 437 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) 438 #define otx2_lmt_flush(addr) ({ 0; }) 439 #endif 440 441 /* Alloc pointer from pool/aura */ 442 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) 443 { 444 u64 *ptr = (u64 *)otx2_get_regaddr(pfvf, 445 NPA_LF_AURA_OP_ALLOCX(0)); 446 u64 incr = (u64)aura | BIT_ULL(63); 447 448 return otx2_atomic64_add(incr, ptr); 449 } 450 451 /* Free pointer to a pool/aura */ 452 static inline void otx2_aura_freeptr(struct otx2_nic *pfvf, 453 int aura, s64 buf) 454 { 455 otx2_write128((u64)buf, (u64)aura | BIT_ULL(63), 456 otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0)); 457 } 458 459 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) 460 { 461 if (type == AURA_NIX_SQ) 462 return pfvf->hw.rqpool_cnt + idx; 463 464 /* AURA_NIX_RQ */ 465 return idx; 466 } 467 468 /* Mbox APIs */ 469 static inline int otx2_sync_mbox_msg(struct mbox *mbox) 470 { 471 int err; 472 473 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 474 return 0; 475 otx2_mbox_msg_send(&mbox->mbox, 0); 476 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); 477 if (err) 478 return err; 479 480 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 481 } 482 483 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) 484 { 485 int err; 486 487 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) 488 return 0; 489 otx2_mbox_msg_send(&mbox->mbox_up, devid); 490 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); 491 if (err) 492 return err; 493 494 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); 495 } 496 497 /* Use this API to send mbox msgs in atomic context 498 * where sleeping is not allowed 499 */ 500 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox) 501 { 502 int err; 503 504 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 505 return 0; 506 otx2_mbox_msg_send(&mbox->mbox, 0); 507 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); 508 if (err) 509 return err; 510 511 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 512 } 513 514 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 515 static struct _req_type __maybe_unused \ 516 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ 517 { \ 518 struct _req_type *req; \ 519 \ 520 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 521 &mbox->mbox, 0, sizeof(struct _req_type), \ 522 sizeof(struct _rsp_type)); \ 523 if (!req) \ 524 return NULL; \ 525 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 526 req->hdr.id = _id; \ 527 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \ 528 return req; \ 529 } 530 531 MBOX_MESSAGES 532 #undef M 533 534 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 535 int \ 536 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ 537 struct _req_type *req, \ 538 struct _rsp_type *rsp); \ 539 540 MBOX_UP_CGX_MESSAGES 541 #undef M 542 543 /* Time to wait before watchdog kicks off */ 544 #define OTX2_TX_TIMEOUT (100 * HZ) 545 546 #define RVU_PFVF_PF_SHIFT 10 547 #define RVU_PFVF_PF_MASK 0x3F 548 #define RVU_PFVF_FUNC_SHIFT 0 549 #define RVU_PFVF_FUNC_MASK 0x3FF 550 551 static inline int rvu_get_pf(u16 pcifunc) 552 { 553 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 554 } 555 556 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, 557 struct page *page, 558 size_t offset, size_t size, 559 enum dma_data_direction dir) 560 { 561 dma_addr_t iova; 562 563 iova = dma_map_page_attrs(pfvf->dev, page, 564 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC); 565 if (unlikely(dma_mapping_error(pfvf->dev, iova))) 566 return (dma_addr_t)NULL; 567 return iova; 568 } 569 570 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, 571 dma_addr_t addr, size_t size, 572 enum dma_data_direction dir) 573 { 574 dma_unmap_page_attrs(pfvf->dev, addr, size, 575 dir, DMA_ATTR_SKIP_CPU_SYNC); 576 } 577 578 /* MSI-X APIs */ 579 void otx2_free_cints(struct otx2_nic *pfvf, int n); 580 void otx2_set_cints_affinity(struct otx2_nic *pfvf); 581 int otx2_set_mac_address(struct net_device *netdev, void *p); 582 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); 583 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq); 584 void otx2_get_mac_from_af(struct net_device *netdev); 585 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 586 int otx2_config_pause_frm(struct otx2_nic *pfvf); 587 void otx2_setup_segmentation(struct otx2_nic *pfvf); 588 589 /* RVU block related APIs */ 590 int otx2_attach_npa_nix(struct otx2_nic *pfvf); 591 int otx2_detach_resources(struct mbox *mbox); 592 int otx2_config_npa(struct otx2_nic *pfvf); 593 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf); 594 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf); 595 void otx2_aura_pool_free(struct otx2_nic *pfvf); 596 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); 597 void otx2_sq_free_sqbs(struct otx2_nic *pfvf); 598 int otx2_config_nix(struct otx2_nic *pfvf); 599 int otx2_config_nix_queues(struct otx2_nic *pfvf); 600 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl); 601 int otx2_txsch_alloc(struct otx2_nic *pfvf); 602 int otx2_txschq_stop(struct otx2_nic *pfvf); 603 void otx2_sqb_flush(struct otx2_nic *pfvf); 604 dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool); 605 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); 606 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); 607 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); 608 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 609 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 610 611 /* RSS configuration APIs*/ 612 int otx2_rss_init(struct otx2_nic *pfvf); 613 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); 614 void otx2_set_rss_key(struct otx2_nic *pfvf); 615 int otx2_set_rss_table(struct otx2_nic *pfvf); 616 617 /* Mbox handlers */ 618 void mbox_handler_msix_offset(struct otx2_nic *pfvf, 619 struct msix_offset_rsp *rsp); 620 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, 621 struct npa_lf_alloc_rsp *rsp); 622 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, 623 struct nix_lf_alloc_rsp *rsp); 624 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, 625 struct nix_txsch_alloc_rsp *rsp); 626 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, 627 struct cgx_stats_rsp *rsp); 628 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, 629 struct nix_bp_cfg_rsp *rsp); 630 631 /* Device stats APIs */ 632 void otx2_get_dev_stats(struct otx2_nic *pfvf); 633 void otx2_get_stats64(struct net_device *netdev, 634 struct rtnl_link_stats64 *stats); 635 void otx2_update_lmac_stats(struct otx2_nic *pfvf); 636 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); 637 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); 638 void otx2_set_ethtool_ops(struct net_device *netdev); 639 void otx2vf_set_ethtool_ops(struct net_device *netdev); 640 641 int otx2_open(struct net_device *netdev); 642 int otx2_stop(struct net_device *netdev); 643 int otx2_set_real_num_queues(struct net_device *netdev, 644 int tx_queues, int rx_queues); 645 #endif /* OTX2_COMMON_H */ 646