1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #ifndef OTX2_COMMON_H 9 #define OTX2_COMMON_H 10 11 #include <linux/ethtool.h> 12 #include <linux/pci.h> 13 #include <linux/iommu.h> 14 #include <linux/net_tstamp.h> 15 #include <linux/ptp_clock_kernel.h> 16 #include <linux/timecounter.h> 17 #include <linux/soc/marvell/octeontx2/asm.h> 18 #include <net/pkt_cls.h> 19 #include <net/devlink.h> 20 #include <linux/time64.h> 21 22 #include <mbox.h> 23 #include <npc.h> 24 #include "otx2_reg.h" 25 #include "otx2_txrx.h" 26 #include "otx2_devlink.h" 27 #include <rvu_trace.h> 28 29 /* PCI device IDs */ 30 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 31 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064 32 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 33 34 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 35 36 /* PCI BAR nos */ 37 #define PCI_CFG_REG_BAR_NUM 2 38 #define PCI_MBOX_BAR_NUM 4 39 40 #define NAME_SIZE 32 41 42 enum arua_mapped_qtypes { 43 AURA_NIX_RQ, 44 AURA_NIX_SQ, 45 }; 46 47 /* NIX LF interrupts range*/ 48 #define NIX_LF_QINT_VEC_START 0x00 49 #define NIX_LF_CINT_VEC_START 0x40 50 #define NIX_LF_GINT_VEC 0x80 51 #define NIX_LF_ERR_VEC 0x81 52 #define NIX_LF_POISON_VEC 0x82 53 54 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ 55 #define SEND_CQ_SKID 2000 56 57 struct otx2_lmt_info { 58 u64 lmt_addr; 59 u16 lmt_id; 60 }; 61 /* RSS configuration */ 62 struct otx2_rss_ctx { 63 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; 64 }; 65 66 struct otx2_rss_info { 67 u8 enable; 68 u32 flowkey_cfg; 69 u16 rss_size; 70 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ 71 u8 key[RSS_HASH_KEY_SIZE]; 72 struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS]; 73 }; 74 75 /* NIX (or NPC) RX errors */ 76 enum otx2_errlvl { 77 NPC_ERRLVL_RE, 78 NPC_ERRLVL_LID_LA, 79 NPC_ERRLVL_LID_LB, 80 NPC_ERRLVL_LID_LC, 81 NPC_ERRLVL_LID_LD, 82 NPC_ERRLVL_LID_LE, 83 NPC_ERRLVL_LID_LF, 84 NPC_ERRLVL_LID_LG, 85 NPC_ERRLVL_LID_LH, 86 NPC_ERRLVL_NIX = 0x0F, 87 }; 88 89 enum otx2_errcodes_re { 90 /* NPC_ERRLVL_RE errcodes */ 91 ERRCODE_FCS = 0x7, 92 ERRCODE_FCS_RCV = 0x8, 93 ERRCODE_UNDERSIZE = 0x10, 94 ERRCODE_OVERSIZE = 0x11, 95 ERRCODE_OL2_LEN_MISMATCH = 0x12, 96 /* NPC_ERRLVL_NIX errcodes */ 97 ERRCODE_OL3_LEN = 0x10, 98 ERRCODE_OL4_LEN = 0x11, 99 ERRCODE_OL4_CSUM = 0x12, 100 ERRCODE_IL3_LEN = 0x20, 101 ERRCODE_IL4_LEN = 0x21, 102 ERRCODE_IL4_CSUM = 0x22, 103 }; 104 105 /* NIX TX stats */ 106 enum nix_stat_lf_tx { 107 TX_UCAST = 0x0, 108 TX_BCAST = 0x1, 109 TX_MCAST = 0x2, 110 TX_DROP = 0x3, 111 TX_OCTS = 0x4, 112 TX_STATS_ENUM_LAST, 113 }; 114 115 /* NIX RX stats */ 116 enum nix_stat_lf_rx { 117 RX_OCTS = 0x0, 118 RX_UCAST = 0x1, 119 RX_BCAST = 0x2, 120 RX_MCAST = 0x3, 121 RX_DROP = 0x4, 122 RX_DROP_OCTS = 0x5, 123 RX_FCS = 0x6, 124 RX_ERR = 0x7, 125 RX_DRP_BCAST = 0x8, 126 RX_DRP_MCAST = 0x9, 127 RX_DRP_L3BCAST = 0xa, 128 RX_DRP_L3MCAST = 0xb, 129 RX_STATS_ENUM_LAST, 130 }; 131 132 struct otx2_dev_stats { 133 u64 rx_bytes; 134 u64 rx_frames; 135 u64 rx_ucast_frames; 136 u64 rx_bcast_frames; 137 u64 rx_mcast_frames; 138 u64 rx_drops; 139 140 u64 tx_bytes; 141 u64 tx_frames; 142 u64 tx_ucast_frames; 143 u64 tx_bcast_frames; 144 u64 tx_mcast_frames; 145 u64 tx_drops; 146 }; 147 148 /* Driver counted stats */ 149 struct otx2_drv_stats { 150 atomic_t rx_fcs_errs; 151 atomic_t rx_oversize_errs; 152 atomic_t rx_undersize_errs; 153 atomic_t rx_csum_errs; 154 atomic_t rx_len_errs; 155 atomic_t rx_other_errs; 156 }; 157 158 struct mbox { 159 struct otx2_mbox mbox; 160 struct work_struct mbox_wrk; 161 struct otx2_mbox mbox_up; 162 struct work_struct mbox_up_wrk; 163 struct otx2_nic *pfvf; 164 void *bbuf_base; /* Bounce buffer for mbox memory */ 165 struct mutex lock; /* serialize mailbox access */ 166 int num_msgs; /* mbox number of messages */ 167 int up_num_msgs; /* mbox_up number of messages */ 168 }; 169 170 struct otx2_hw { 171 struct pci_dev *pdev; 172 struct otx2_rss_info rss_info; 173 u16 rx_queues; 174 u16 tx_queues; 175 u16 xdp_queues; 176 u16 tot_tx_queues; 177 u16 max_queues; 178 u16 pool_cnt; 179 u16 rqpool_cnt; 180 u16 sqpool_cnt; 181 182 #define OTX2_DEFAULT_RBUF_LEN 2048 183 u16 rbuf_len; 184 u32 xqe_size; 185 186 /* NPA */ 187 u32 stack_pg_ptrs; /* No of ptrs per stack page */ 188 u32 stack_pg_bytes; /* Size of stack page */ 189 u16 sqb_size; 190 191 /* NIX */ 192 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 193 u16 matchall_ipolicer; 194 u32 dwrr_mtu; 195 196 /* HW settings, coalescing etc */ 197 u16 rx_chan_base; 198 u16 tx_chan_base; 199 u16 cq_qcount_wait; 200 u16 cq_ecount_wait; 201 u16 rq_skid; 202 u8 cq_time_wait; 203 204 /* Segmentation */ 205 u8 lso_tsov4_idx; 206 u8 lso_tsov6_idx; 207 u8 lso_udpv4_idx; 208 u8 lso_udpv6_idx; 209 210 /* RSS */ 211 u8 flowkey_alg_idx; 212 213 /* MSI-X */ 214 u8 cint_cnt; /* CQ interrupt count */ 215 u16 npa_msixoff; /* Offset of NPA vectors */ 216 u16 nix_msixoff; /* Offset of NIX vectors */ 217 char *irq_name; 218 cpumask_var_t *affinity_mask; 219 220 /* Stats */ 221 struct otx2_dev_stats dev_stats; 222 struct otx2_drv_stats drv_stats; 223 u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; 224 u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; 225 u64 cgx_fec_corr_blks; 226 u64 cgx_fec_uncorr_blks; 227 u8 cgx_links; /* No. of CGX links present in HW */ 228 u8 lbk_links; /* No. of LBK links present in HW */ 229 u8 tx_link; /* Transmit channel link number */ 230 #define HW_TSO 0 231 #define CN10K_MBOX 1 232 #define CN10K_LMTST 2 233 #define CN10K_RPM 3 234 unsigned long cap_flag; 235 236 #define LMT_LINE_SIZE 128 237 #define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ 238 u64 *lmt_base; 239 struct otx2_lmt_info __percpu *lmt_info; 240 }; 241 242 enum vfperm { 243 OTX2_RESET_VF_PERM, 244 OTX2_TRUSTED_VF, 245 }; 246 247 struct otx2_vf_config { 248 struct otx2_nic *pf; 249 struct delayed_work link_event_work; 250 bool intf_down; /* interface was either configured or not */ 251 u8 mac[ETH_ALEN]; 252 u16 vlan; 253 int tx_vtag_idx; 254 bool trusted; 255 }; 256 257 struct flr_work { 258 struct work_struct work; 259 struct otx2_nic *pf; 260 }; 261 262 struct refill_work { 263 struct delayed_work pool_refill_work; 264 struct otx2_nic *pf; 265 }; 266 267 struct otx2_ptp { 268 struct ptp_clock_info ptp_info; 269 struct ptp_clock *ptp_clock; 270 struct otx2_nic *nic; 271 272 struct cyclecounter cycle_counter; 273 struct timecounter time_counter; 274 275 struct delayed_work extts_work; 276 u64 last_extts; 277 u64 thresh; 278 279 struct ptp_pin_desc extts_config; 280 u64 (*convert_rx_ptp_tstmp)(u64 timestamp); 281 u64 (*convert_tx_ptp_tstmp)(u64 timestamp); 282 }; 283 284 #define OTX2_HW_TIMESTAMP_LEN 8 285 286 struct otx2_mac_table { 287 u8 addr[ETH_ALEN]; 288 u16 mcam_entry; 289 bool inuse; 290 }; 291 292 struct otx2_flow_config { 293 u16 *flow_ent; 294 u16 *def_ent; 295 u16 nr_flows; 296 #define OTX2_DEFAULT_FLOWCOUNT 16 297 #define OTX2_MAX_UNICAST_FLOWS 8 298 #define OTX2_MAX_VLAN_FLOWS 1 299 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT 300 #define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \ 301 OTX2_MAX_UNICAST_FLOWS + \ 302 OTX2_MAX_VLAN_FLOWS) 303 u16 unicast_offset; 304 u16 rx_vlan_offset; 305 u16 vf_vlan_offset; 306 #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */ 307 #define OTX2_VF_VLAN_RX_INDEX 0 308 #define OTX2_VF_VLAN_TX_INDEX 1 309 u16 max_flows; 310 u8 dmacflt_max_flows; 311 u8 *bmap_to_dmacindex; 312 unsigned long dmacflt_bmap; 313 struct list_head flow_list; 314 }; 315 316 struct otx2_tc_info { 317 /* hash table to store TC offloaded flows */ 318 struct rhashtable flow_table; 319 struct rhashtable_params flow_ht_params; 320 unsigned long *tc_entries_bitmap; 321 }; 322 323 struct dev_hw_ops { 324 int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); 325 void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, 326 int size, int qidx); 327 void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); 328 void (*aura_freeptr)(void *dev, int aura, u64 buf); 329 }; 330 331 struct otx2_nic { 332 void __iomem *reg_base; 333 struct net_device *netdev; 334 struct dev_hw_ops *hw_ops; 335 void *iommu_domain; 336 u16 tx_max_pktlen; 337 u16 rbsize; /* Receive buffer size */ 338 339 #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) 340 #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1) 341 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2) 342 #define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3) 343 #define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4) 344 #define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5) 345 #define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6) 346 #define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7) 347 #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8) 348 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) 349 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) 350 #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) 351 #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) 352 #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) 353 #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) 354 u64 flags; 355 u64 *cq_op_addr; 356 357 struct bpf_prog *xdp_prog; 358 struct otx2_qset qset; 359 struct otx2_hw hw; 360 struct pci_dev *pdev; 361 struct device *dev; 362 363 /* Mbox */ 364 struct mbox mbox; 365 struct mbox *mbox_pfvf; 366 struct workqueue_struct *mbox_wq; 367 struct workqueue_struct *mbox_pfvf_wq; 368 369 u8 total_vfs; 370 u16 pcifunc; /* RVU PF_FUNC */ 371 u16 bpid[NIX_MAX_BPID_CHAN]; 372 struct otx2_vf_config *vf_configs; 373 struct cgx_link_user_info linfo; 374 375 /* NPC MCAM */ 376 struct otx2_flow_config *flow_cfg; 377 struct otx2_mac_table *mac_table; 378 struct otx2_tc_info tc_info; 379 380 u64 reset_count; 381 struct work_struct reset_task; 382 struct workqueue_struct *flr_wq; 383 struct flr_work *flr_wrk; 384 struct refill_work *refill_wrk; 385 struct workqueue_struct *otx2_wq; 386 struct work_struct rx_mode_work; 387 388 /* Ethtool stuff */ 389 u32 msg_enable; 390 391 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ 392 int nix_blkaddr; 393 /* LMTST Lines info */ 394 struct qmem *dync_lmt; 395 u16 tot_lmt_lines; 396 u16 npa_lmt_lines; 397 u32 nix_lmt_size; 398 399 struct otx2_ptp *ptp; 400 struct hwtstamp_config tstamp; 401 402 unsigned long rq_bmap; 403 404 /* Devlink */ 405 struct otx2_devlink *dl; 406 #ifdef CONFIG_DCB 407 /* PFC */ 408 u8 pfc_en; 409 u8 *queue_to_pfc_map; 410 #endif 411 }; 412 413 static inline bool is_otx2_lbkvf(struct pci_dev *pdev) 414 { 415 return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF; 416 } 417 418 static inline bool is_96xx_A0(struct pci_dev *pdev) 419 { 420 return (pdev->revision == 0x00) && 421 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 422 } 423 424 static inline bool is_96xx_B0(struct pci_dev *pdev) 425 { 426 return (pdev->revision == 0x01) && 427 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 428 } 429 430 /* REVID for PCIe devices. 431 * Bits 0..1: minor pass, bit 3..2: major pass 432 * bits 7..4: midr id 433 */ 434 #define PCI_REVISION_ID_96XX 0x00 435 #define PCI_REVISION_ID_95XX 0x10 436 #define PCI_REVISION_ID_95XXN 0x20 437 #define PCI_REVISION_ID_98XX 0x30 438 #define PCI_REVISION_ID_95XXMM 0x40 439 #define PCI_REVISION_ID_95XXO 0xE0 440 441 static inline bool is_dev_otx2(struct pci_dev *pdev) 442 { 443 u8 midr = pdev->revision & 0xF0; 444 445 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || 446 midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || 447 midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); 448 } 449 450 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) 451 { 452 struct otx2_hw *hw = &pfvf->hw; 453 454 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; 455 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; 456 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; 457 458 __set_bit(HW_TSO, &hw->cap_flag); 459 460 if (is_96xx_A0(pfvf->pdev)) { 461 __clear_bit(HW_TSO, &hw->cap_flag); 462 463 /* Time based irq coalescing is not supported */ 464 pfvf->hw.cq_qcount_wait = 0x0; 465 466 /* Due to HW issue previous silicons required minimum 467 * 600 unused CQE to avoid CQ overflow. 468 */ 469 pfvf->hw.rq_skid = 600; 470 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); 471 } 472 if (is_96xx_B0(pfvf->pdev)) 473 __clear_bit(HW_TSO, &hw->cap_flag); 474 475 if (!is_dev_otx2(pfvf->pdev)) { 476 __set_bit(CN10K_MBOX, &hw->cap_flag); 477 __set_bit(CN10K_LMTST, &hw->cap_flag); 478 __set_bit(CN10K_RPM, &hw->cap_flag); 479 } 480 } 481 482 /* Register read/write APIs */ 483 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) 484 { 485 u64 blkaddr; 486 487 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) { 488 case BLKTYPE_NIX: 489 blkaddr = nic->nix_blkaddr; 490 break; 491 case BLKTYPE_NPA: 492 blkaddr = BLKADDR_NPA; 493 break; 494 default: 495 blkaddr = BLKADDR_RVUM; 496 break; 497 } 498 499 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT); 500 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT); 501 502 return nic->reg_base + offset; 503 } 504 505 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val) 506 { 507 void __iomem *addr = otx2_get_regaddr(nic, offset); 508 509 writeq(val, addr); 510 } 511 512 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset) 513 { 514 void __iomem *addr = otx2_get_regaddr(nic, offset); 515 516 return readq(addr); 517 } 518 519 /* Mbox bounce buffer APIs */ 520 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev) 521 { 522 struct otx2_mbox *otx2_mbox; 523 struct otx2_mbox_dev *mdev; 524 525 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); 526 if (!mbox->bbuf_base) 527 return -ENOMEM; 528 529 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF 530 * prepare all mbox messages in bounce buffer instead of directly 531 * in hw mbox memory. 532 */ 533 otx2_mbox = &mbox->mbox; 534 mdev = &otx2_mbox->dev[0]; 535 mdev->mbase = mbox->bbuf_base; 536 537 otx2_mbox = &mbox->mbox_up; 538 mdev = &otx2_mbox->dev[0]; 539 mdev->mbase = mbox->bbuf_base; 540 return 0; 541 } 542 543 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) 544 { 545 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 546 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 547 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 548 struct mbox_hdr *hdr; 549 u64 msg_size; 550 551 if (mdev->mbase == hw_mbase) 552 return; 553 554 hdr = hw_mbase + mbox->rx_start; 555 msg_size = hdr->msg_size; 556 557 if (msg_size > mbox->rx_size - msgs_offset) 558 msg_size = mbox->rx_size - msgs_offset; 559 560 /* Copy mbox messages from mbox memory to bounce buffer */ 561 memcpy(mdev->mbase + mbox->rx_start, 562 hw_mbase + mbox->rx_start, msg_size + msgs_offset); 563 } 564 565 /* With the absence of API for 128-bit IO memory access for arm64, 566 * implement required operations at place. 567 */ 568 #if defined(CONFIG_ARM64) 569 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) 570 { 571 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!" 572 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); 573 } 574 575 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) 576 { 577 u64 result; 578 579 __asm__ volatile(".cpu generic+lse\n" 580 "ldadd %x[i], %x[r], [%[b]]" 581 : [r]"=r"(result), "+m"(*ptr) 582 : [i]"r"(incr), [b]"r"(ptr) 583 : "memory"); 584 return result; 585 } 586 587 #else 588 #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr) 589 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) 590 #endif 591 592 static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, 593 u64 *ptrs, u64 num_ptrs) 594 { 595 struct otx2_lmt_info *lmt_info; 596 u64 size = 0, count_eot = 0; 597 u64 tar_addr, val = 0; 598 599 lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id()); 600 tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0); 601 /* LMTID is same as AURA Id */ 602 val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63); 603 /* Set if [127:64] of last 128bit word has a valid pointer */ 604 count_eot = (num_ptrs % 2) ? 0ULL : 1ULL; 605 /* Set AURA ID to free pointer */ 606 ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF); 607 /* Target address for LMTST flush tells HW how many 128bit 608 * words are valid from NPA_LF_AURA_BATCH_FREE0. 609 * 610 * tar_addr[6:4] is LMTST size-1 in units of 128b. 611 */ 612 if (num_ptrs > 2) { 613 size = (sizeof(u64) * num_ptrs) / 16; 614 if (!count_eot) 615 size++; 616 tar_addr |= ((size - 1) & 0x7) << 4; 617 } 618 dma_wmb(); 619 memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs); 620 /* Perform LMTST flush */ 621 cn10k_lmt_flush(val, tar_addr); 622 } 623 624 static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf) 625 { 626 struct otx2_nic *pfvf = dev; 627 u64 ptrs[2]; 628 629 ptrs[1] = buf; 630 /* Free only one buffer at time during init and teardown */ 631 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2); 632 } 633 634 /* Alloc pointer from pool/aura */ 635 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) 636 { 637 u64 *ptr = (u64 *)otx2_get_regaddr(pfvf, 638 NPA_LF_AURA_OP_ALLOCX(0)); 639 u64 incr = (u64)aura | BIT_ULL(63); 640 641 return otx2_atomic64_add(incr, ptr); 642 } 643 644 /* Free pointer to a pool/aura */ 645 static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf) 646 { 647 struct otx2_nic *pfvf = dev; 648 void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0); 649 650 otx2_write128(buf, (u64)aura | BIT_ULL(63), addr); 651 } 652 653 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) 654 { 655 if (type == AURA_NIX_SQ) 656 return pfvf->hw.rqpool_cnt + idx; 657 658 /* AURA_NIX_RQ */ 659 return idx; 660 } 661 662 /* Mbox APIs */ 663 static inline int otx2_sync_mbox_msg(struct mbox *mbox) 664 { 665 int err; 666 667 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 668 return 0; 669 otx2_mbox_msg_send(&mbox->mbox, 0); 670 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); 671 if (err) 672 return err; 673 674 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 675 } 676 677 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) 678 { 679 int err; 680 681 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) 682 return 0; 683 otx2_mbox_msg_send(&mbox->mbox_up, devid); 684 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); 685 if (err) 686 return err; 687 688 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); 689 } 690 691 /* Use this API to send mbox msgs in atomic context 692 * where sleeping is not allowed 693 */ 694 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox) 695 { 696 int err; 697 698 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 699 return 0; 700 otx2_mbox_msg_send(&mbox->mbox, 0); 701 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); 702 if (err) 703 return err; 704 705 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 706 } 707 708 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 709 static struct _req_type __maybe_unused \ 710 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ 711 { \ 712 struct _req_type *req; \ 713 \ 714 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 715 &mbox->mbox, 0, sizeof(struct _req_type), \ 716 sizeof(struct _rsp_type)); \ 717 if (!req) \ 718 return NULL; \ 719 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 720 req->hdr.id = _id; \ 721 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \ 722 return req; \ 723 } 724 725 MBOX_MESSAGES 726 #undef M 727 728 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 729 int \ 730 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ 731 struct _req_type *req, \ 732 struct _rsp_type *rsp); \ 733 734 MBOX_UP_CGX_MESSAGES 735 #undef M 736 737 /* Time to wait before watchdog kicks off */ 738 #define OTX2_TX_TIMEOUT (100 * HZ) 739 740 #define RVU_PFVF_PF_SHIFT 10 741 #define RVU_PFVF_PF_MASK 0x3F 742 #define RVU_PFVF_FUNC_SHIFT 0 743 #define RVU_PFVF_FUNC_MASK 0x3FF 744 745 static inline bool is_otx2_vf(u16 pcifunc) 746 { 747 return !!(pcifunc & RVU_PFVF_FUNC_MASK); 748 } 749 750 static inline int rvu_get_pf(u16 pcifunc) 751 { 752 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 753 } 754 755 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, 756 struct page *page, 757 size_t offset, size_t size, 758 enum dma_data_direction dir) 759 { 760 dma_addr_t iova; 761 762 iova = dma_map_page_attrs(pfvf->dev, page, 763 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC); 764 if (unlikely(dma_mapping_error(pfvf->dev, iova))) 765 return (dma_addr_t)NULL; 766 return iova; 767 } 768 769 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, 770 dma_addr_t addr, size_t size, 771 enum dma_data_direction dir) 772 { 773 dma_unmap_page_attrs(pfvf->dev, addr, size, 774 dir, DMA_ATTR_SKIP_CPU_SYNC); 775 } 776 777 /* MSI-X APIs */ 778 void otx2_free_cints(struct otx2_nic *pfvf, int n); 779 void otx2_set_cints_affinity(struct otx2_nic *pfvf); 780 int otx2_set_mac_address(struct net_device *netdev, void *p); 781 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); 782 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq); 783 void otx2_get_mac_from_af(struct net_device *netdev); 784 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 785 int otx2_config_pause_frm(struct otx2_nic *pfvf); 786 void otx2_setup_segmentation(struct otx2_nic *pfvf); 787 788 /* RVU block related APIs */ 789 int otx2_attach_npa_nix(struct otx2_nic *pfvf); 790 int otx2_detach_resources(struct mbox *mbox); 791 int otx2_config_npa(struct otx2_nic *pfvf); 792 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf); 793 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf); 794 void otx2_aura_pool_free(struct otx2_nic *pfvf); 795 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); 796 void otx2_sq_free_sqbs(struct otx2_nic *pfvf); 797 int otx2_config_nix(struct otx2_nic *pfvf); 798 int otx2_config_nix_queues(struct otx2_nic *pfvf); 799 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl); 800 int otx2_txsch_alloc(struct otx2_nic *pfvf); 801 int otx2_txschq_stop(struct otx2_nic *pfvf); 802 void otx2_sqb_flush(struct otx2_nic *pfvf); 803 int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 804 dma_addr_t *dma); 805 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); 806 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); 807 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); 808 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 809 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 810 int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); 811 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); 812 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, 813 dma_addr_t *dma); 814 815 /* RSS configuration APIs*/ 816 int otx2_rss_init(struct otx2_nic *pfvf); 817 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); 818 void otx2_set_rss_key(struct otx2_nic *pfvf); 819 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id); 820 821 /* Mbox handlers */ 822 void mbox_handler_msix_offset(struct otx2_nic *pfvf, 823 struct msix_offset_rsp *rsp); 824 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, 825 struct npa_lf_alloc_rsp *rsp); 826 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, 827 struct nix_lf_alloc_rsp *rsp); 828 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, 829 struct nix_txsch_alloc_rsp *rsp); 830 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, 831 struct cgx_stats_rsp *rsp); 832 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, 833 struct cgx_fec_stats_rsp *rsp); 834 void otx2_set_fec_stats_count(struct otx2_nic *pfvf); 835 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, 836 struct nix_bp_cfg_rsp *rsp); 837 838 /* Device stats APIs */ 839 void otx2_get_dev_stats(struct otx2_nic *pfvf); 840 void otx2_get_stats64(struct net_device *netdev, 841 struct rtnl_link_stats64 *stats); 842 void otx2_update_lmac_stats(struct otx2_nic *pfvf); 843 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf); 844 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); 845 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); 846 void otx2_set_ethtool_ops(struct net_device *netdev); 847 void otx2vf_set_ethtool_ops(struct net_device *netdev); 848 849 int otx2_open(struct net_device *netdev); 850 int otx2_stop(struct net_device *netdev); 851 int otx2_set_real_num_queues(struct net_device *netdev, 852 int tx_queues, int rx_queues); 853 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd); 854 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr); 855 856 /* MCAM filter related APIs */ 857 int otx2_mcam_flow_init(struct otx2_nic *pf); 858 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); 859 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count); 860 void otx2_mcam_flow_del(struct otx2_nic *pf); 861 int otx2_destroy_ntuple_flows(struct otx2_nic *pf); 862 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf); 863 int otx2_get_flow(struct otx2_nic *pfvf, 864 struct ethtool_rxnfc *nfc, u32 location); 865 int otx2_get_all_flows(struct otx2_nic *pfvf, 866 struct ethtool_rxnfc *nfc, u32 *rule_locs); 867 int otx2_add_flow(struct otx2_nic *pfvf, 868 struct ethtool_rxnfc *nfc); 869 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); 870 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg); 871 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id); 872 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); 873 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); 874 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); 875 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); 876 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx); 877 u16 otx2_get_max_mtu(struct otx2_nic *pfvf); 878 int otx2_handle_ntuple_tc_features(struct net_device *netdev, 879 netdev_features_t features); 880 /* tc support */ 881 int otx2_init_tc(struct otx2_nic *nic); 882 void otx2_shutdown_tc(struct otx2_nic *nic); 883 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 884 void *type_data); 885 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic); 886 /* CGX/RPM DMAC filters support */ 887 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); 888 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); 889 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); 890 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos); 891 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); 892 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); 893 894 #ifdef CONFIG_DCB 895 /* DCB support*/ 896 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable); 897 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf); 898 int otx2_dcbnl_set_ops(struct net_device *dev); 899 #endif 900 #endif /* OTX2_COMMON_H */ 901